Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -114,7 +114,7 @@ for question in ["What is karma?", "Who is Arjuna?"]:
|
|
114 |
from transformers import BertTokenizer, TFBertForQuestionAnswering
|
115 |
from transformers import AdamW # Optimizer (optional)
|
116 |
# from transformers import SquadLoss # Loss function (optional)
|
117 |
-
from transformers.models.squad import SquadLoss
|
118 |
|
119 |
|
120 |
# Load pre-trained model and tokenizer
|
@@ -170,9 +170,11 @@ train_data = prepare_training_data(qa_pairs)
|
|
170 |
# Train the model
|
171 |
learning_rate = 2e-5
|
172 |
epochs = 3 # Adjust these values as needed
|
173 |
-
model.compile(optimizer=AdamW(learning_rate=learning_rate)
|
174 |
model.fit(train_data, epochs=epochs)
|
175 |
|
|
|
|
|
176 |
# Save the trained model and tokenizer
|
177 |
model.save_pretrained("bhagavad_gita_qa_model")
|
178 |
tokenizer.save_pretrained("bhagavad_gita_qa_model")
|
|
|
114 |
from transformers import BertTokenizer, TFBertForQuestionAnswering
|
115 |
from transformers import AdamW # Optimizer (optional)
|
116 |
# from transformers import SquadLoss # Loss function (optional)
|
117 |
+
# from transformers.models.squad import SquadLoss
|
118 |
|
119 |
|
120 |
# Load pre-trained model and tokenizer
|
|
|
170 |
# Train the model
|
171 |
learning_rate = 2e-5
|
172 |
epochs = 3 # Adjust these values as needed
|
173 |
+
model.compile(optimizer=AdamW(learning_rate=learning_rate))
|
174 |
model.fit(train_data, epochs=epochs)
|
175 |
|
176 |
+
# loss=SquadLoss()
|
177 |
+
|
178 |
# Save the trained model and tokenizer
|
179 |
model.save_pretrained("bhagavad_gita_qa_model")
|
180 |
tokenizer.save_pretrained("bhagavad_gita_qa_model")
|