add model
Browse files- eval/metric.first.answer.paragraph_sentence.answer.lmqg_qg_squad.default.json +1 -0
- eval/metric.first.sentence.paragraph_sentence.answer.lmqg_qg_squad.default.json +1 -0
- eval/samples.test.hyp.paragraph_sentence.answer.lmqg_qg_squad.default.txt +0 -0
- eval/samples.validation.hyp.paragraph_sentence.answer.lmqg_qg_squad.default.txt +0 -0
- tokenizer_config.json +1 -1
- trainer_config.json +1 -0
eval/metric.first.answer.paragraph_sentence.answer.lmqg_qg_squad.default.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"validation": {"Bleu_1": 0.4715179781551369, "Bleu_2": 0.4277173335594881, "Bleu_3": 0.3849632868643028, "Bleu_4": 0.3468025778279883, "METEOR": 0.3947140501901248, "ROUGE_L": 0.6300447845480465, "BERTScore": 0.9079972292808507, "MoverScore": 0.7760101490203553, "AnswerF1Score": 63.64704337996092, "AnswerExactMatch": 48.978240302743615}, "test": {"Bleu_1": 0.5242348506575876, "Bleu_2": 0.47809312592713743, "Bleu_3": 0.43217541880115024, "Bleu_4": 0.3922979559210909, "METEOR": 0.4249685660312887, "ROUGE_L": 0.6757681079378073, "BERTScore": 0.9120216670838873, "MoverScore": 0.8091604506954679, "AnswerF1Score": 68.06059221814523, "AnswerExactMatch": 56.15054306643092}}
|
eval/metric.first.sentence.paragraph_sentence.answer.lmqg_qg_squad.default.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"validation": {"Bleu_1": 0.5381452429149627, "Bleu_2": 0.49012854410499973, "Bleu_3": 0.4434865210921782, "Bleu_4": 0.4022049723682318, "METEOR": 0.42138458895007896, "ROUGE_L": 0.6891710224622246, "BERTScore": 0.9127616033736531, "MoverScore": 0.7844776185754523}, "test": {"Bleu_1": 0.5990032518026085, "Bleu_2": 0.5519010911059491, "Bleu_3": 0.5051279159102922, "Bleu_4": 0.464309419962817, "METEOR": 0.46848630286484544, "ROUGE_L": 0.74949790258691, "BERTScore": 0.9170988952887753, "MoverScore": 0.816619573730592}}
|
eval/samples.test.hyp.paragraph_sentence.answer.lmqg_qg_squad.default.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval/samples.validation.hyp.paragraph_sentence.answer.lmqg_qg_squad.default.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -104,7 +104,7 @@
|
|
104 |
"eos_token": "</s>",
|
105 |
"extra_ids": 100,
|
106 |
"model_max_length": 512,
|
107 |
-
"name_or_path": "lmqg_output/t5-small-squad-answer-extraction/
|
108 |
"pad_token": "<pad>",
|
109 |
"special_tokens_map_file": null,
|
110 |
"tokenizer_class": "T5Tokenizer",
|
|
|
104 |
"eos_token": "</s>",
|
105 |
"extra_ids": 100,
|
106 |
"model_max_length": 512,
|
107 |
+
"name_or_path": "lmqg_output/t5-small-squad-answer-extraction/model_eszyci/epoch_2",
|
108 |
"pad_token": "<pad>",
|
109 |
"special_tokens_map_file": null,
|
110 |
"tokenizer_class": "T5Tokenizer",
|
trainer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"dataset_path": "lmqg/qg_squad", "dataset_name": "default", "input_types": ["paragraph_sentence"], "output_types": ["answer"], "prefix_types": ["ae"], "model": "t5-small", "max_length": 512, "max_length_output": 32, "epoch": 7, "batch": 64, "lr": 0.0001, "fp16": false, "random_seed": 1, "gradient_accumulation_steps": 1, "label_smoothing": 0.15}
|