Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,103 +1,65 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
from torch.utils.data import Dataset
|
4 |
-
from transformers import (
|
5 |
-
MBartTokenizer,
|
6 |
-
MBartForConditionalGeneration,
|
7 |
-
Trainer,
|
8 |
-
TrainingArguments,
|
9 |
-
|
10 |
-
)
|
11 |
-
from huggingface_hub import HfFolder
|
12 |
-
# Save the Hugging Face token (if not already saved)
|
13 |
-
token = os.getenv("HF_TOKEN")
|
14 |
-
if token:
|
15 |
-
HfFolder.save_token(token)
|
16 |
-
print("Token saved successfully!")
|
17 |
-
else:
|
18 |
-
print("HF_TOKEN environment variable not set. Ensure your token is saved for authentication.")
|
19 |
|
20 |
-
#
|
21 |
-
|
22 |
-
def __init__(self, data_path, tokenizer, max_length=512):
|
23 |
-
"""
|
24 |
-
Dataset class for Hindi translation tasks.
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
tokenizer (MBartTokenizer): Tokenizer for mBART.
|
29 |
-
max_length (int): Maximum sequence length for tokenization.
|
30 |
-
"""
|
31 |
-
self.data = pd.read_csv(data_path, sep="\t")
|
32 |
-
self.tokenizer = tokenizer
|
33 |
-
self.max_length = max_length
|
34 |
|
35 |
-
|
36 |
-
|
|
|
37 |
|
38 |
-
|
39 |
-
|
40 |
-
target = self.data.iloc[idx]["target"]
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
)
|
45 |
-
target_encodings = self.tokenizer(
|
46 |
-
target, max_length=self.max_length, truncation=True, padding="max_length", return_tensors="pt"
|
47 |
-
)
|
48 |
|
49 |
-
|
50 |
-
"input_ids": source_encodings["input_ids"].squeeze(),
|
51 |
-
"attention_mask": source_encodings["attention_mask"].squeeze(),
|
52 |
-
"labels": target_encodings["input_ids"].squeeze(),
|
53 |
-
}
|
54 |
|
55 |
-
#
|
56 |
-
|
57 |
-
tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-50")
|
58 |
-
train_dataset = HindiDataset(data_path, tokenizer)
|
59 |
|
60 |
-
#
|
61 |
-
model =
|
62 |
|
63 |
-
#
|
64 |
training_args = TrainingArguments(
|
65 |
-
output_dir=
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
weight_decay=0.01, # Weight decay for optimizer
|
75 |
-
report_to="none" # Disable third-party logging
|
76 |
)
|
77 |
|
78 |
-
#
|
79 |
trainer = Trainer(
|
80 |
model=model,
|
81 |
args=training_args,
|
82 |
-
train_dataset=
|
83 |
-
|
|
|
|
|
84 |
)
|
85 |
|
86 |
-
#
|
87 |
-
print("Starting training...")
|
88 |
trainer.train()
|
89 |
|
90 |
-
#
|
91 |
-
|
92 |
-
print(f"Saving fine-tuned model to {output_dir}...")
|
93 |
-
trainer.save_model(output_dir)
|
94 |
|
95 |
-
#
|
96 |
-
|
97 |
-
|
98 |
-
tokenizer = MBartTokenizer.from_pretrained(output_dir)
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
from transformers import MarianMTModel, MarianTokenizer, TrainingArguments, Trainer, DataCollatorForSeq2Seq
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
+
# Load dataset
|
5 |
+
dataset = load_dataset('csv', data_files='dataset.tsv', delimiter='\t')
|
|
|
|
|
|
|
6 |
|
7 |
+
# Load MarianMT tokenizer for translation task
|
8 |
+
tokenizer = MarianTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-hi')
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
+
# Tokenize the English text (source language)
|
11 |
+
def tokenize_function(examples):
|
12 |
+
return tokenizer(examples['english'], truncation=True, padding='max_length', max_length=128)
|
13 |
|
14 |
+
# Tokenize both English and Hindi sentences
|
15 |
+
tokenized_datasets = dataset.map(tokenize_function, batched=True)
|
|
|
16 |
|
17 |
+
def tokenize_target_function(examples):
|
18 |
+
return tokenizer(examples['hindi'], truncation=True, padding='max_length', max_length=128)
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
tokenized_datasets = tokenized_datasets.map(tokenize_target_function, batched=True)
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
# Data Collator for padding sequences
|
23 |
+
data_collator = DataCollatorForSeq2Seq(tokenizer, model=None)
|
|
|
|
|
24 |
|
25 |
+
# Load MarianMT model for translation
|
26 |
+
model = MarianMTModel.from_pretrained('Helsinki-NLP/opus-mt-en-hi')
|
27 |
|
28 |
+
# Define training arguments
|
29 |
training_args = TrainingArguments(
|
30 |
+
output_dir='./results',
|
31 |
+
evaluation_strategy="epoch",
|
32 |
+
learning_rate=2e-5,
|
33 |
+
per_device_train_batch_size=16,
|
34 |
+
per_device_eval_batch_size=16,
|
35 |
+
num_train_epochs=3,
|
36 |
+
weight_decay=0.01,
|
37 |
+
save_total_limit=2,
|
38 |
+
predict_with_generate=True,
|
|
|
|
|
39 |
)
|
40 |
|
41 |
+
# Initialize Trainer
|
42 |
trainer = Trainer(
|
43 |
model=model,
|
44 |
args=training_args,
|
45 |
+
train_dataset=tokenized_datasets['train'],
|
46 |
+
eval_dataset=tokenized_datasets['test'],
|
47 |
+
tokenizer=tokenizer,
|
48 |
+
data_collator=data_collator,
|
49 |
)
|
50 |
|
51 |
+
# Start training
|
|
|
52 |
trainer.train()
|
53 |
|
54 |
+
# Save the model
|
55 |
+
trainer.save_model('./my_hindi_translation_model')
|
|
|
|
|
56 |
|
57 |
+
# Evaluate the model
|
58 |
+
results = trainer.evaluate()
|
59 |
+
print(results)
|
|
|
60 |
|
61 |
+
# Generate a prediction
|
62 |
+
model.eval()
|
63 |
+
inputs = tokenizer("How are you?", return_tensors="pt")
|
64 |
+
outputs = model.generate(inputs["input_ids"], max_length=128)
|
65 |
+
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|