model update
Browse files- README.md +0 -3
- config.json +1 -1
- pytorch_model.bin +2 -2
- tokenizer_config.json +1 -1
README.md
CHANGED
@@ -67,7 +67,6 @@ This model is fine-tuned version of [t5-base](https://huggingface.co/t5-base) fo
|
|
67 |
### Usage
|
68 |
- With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-)
|
69 |
```python
|
70 |
-
|
71 |
from lmqg import TransformersQG
|
72 |
|
73 |
# initialize model
|
@@ -80,7 +79,6 @@ answers = model.answer_q(list_question="What is a person called is practicing he
|
|
80 |
|
81 |
- With `transformers`
|
82 |
```python
|
83 |
-
|
84 |
from transformers import pipeline
|
85 |
|
86 |
pipe = pipeline("text2text-generation", "lmqg/t5-base-tweetqa-question-answering")
|
@@ -131,7 +129,6 @@ The full configuration can be found at [fine-tuning config file](https://hugging
|
|
131 |
|
132 |
## Citation
|
133 |
```
|
134 |
-
|
135 |
@inproceedings{ushio-etal-2022-generative,
|
136 |
title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration",
|
137 |
author = "Ushio, Asahi and
|
|
|
67 |
### Usage
|
68 |
- With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-)
|
69 |
```python
|
|
|
70 |
from lmqg import TransformersQG
|
71 |
|
72 |
# initialize model
|
|
|
79 |
|
80 |
- With `transformers`
|
81 |
```python
|
|
|
82 |
from transformers import pipeline
|
83 |
|
84 |
pipe = pipeline("text2text-generation", "lmqg/t5-base-tweetqa-question-answering")
|
|
|
129 |
|
130 |
## Citation
|
131 |
```
|
|
|
132 |
@inproceedings{ushio-etal-2022-generative,
|
133 |
title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration",
|
134 |
author = "Ushio, Asahi and
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "lmqg_output/t5-base-tweetqa-question-answering/
|
3 |
"add_prefix": false,
|
4 |
"architectures": [
|
5 |
"T5ForConditionalGeneration"
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "lmqg_output/t5-base-tweetqa-question-answering/model_eszyci/epoch_9",
|
3 |
"add_prefix": false,
|
4 |
"architectures": [
|
5 |
"T5ForConditionalGeneration"
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d9af18f30d344085a0c0eb5fce63dbe9354c80444d241743054f9bc72089d2f
|
3 |
+
size 891617855
|
tokenizer_config.json
CHANGED
@@ -104,7 +104,7 @@
|
|
104 |
"eos_token": "</s>",
|
105 |
"extra_ids": 100,
|
106 |
"model_max_length": 512,
|
107 |
-
"name_or_path": "lmqg_output/t5-base-tweetqa-question-answering/
|
108 |
"pad_token": "<pad>",
|
109 |
"special_tokens_map_file": null,
|
110 |
"tokenizer_class": "T5Tokenizer",
|
|
|
104 |
"eos_token": "</s>",
|
105 |
"extra_ids": 100,
|
106 |
"model_max_length": 512,
|
107 |
+
"name_or_path": "lmqg_output/t5-base-tweetqa-question-answering/model_eszyci/epoch_9",
|
108 |
"pad_token": "<pad>",
|
109 |
"special_tokens_map_file": null,
|
110 |
"tokenizer_class": "T5Tokenizer",
|