jejun commited on
Commit
6bd5a0c
1 Parent(s): 82ec1be

Upload 11 files

Browse files
.gitattributes CHANGED
@@ -1,34 +1,18 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tflite filter=lfs diff=lfs merge=lfs -text
29
- *.tgz filter=lfs diff=lfs merge=lfs -text
30
- *.wasm filter=lfs diff=lfs merge=lfs -text
31
- *.xz filter=lfs diff=lfs merge=lfs -text
32
- *.zip filter=lfs diff=lfs merge=lfs -text
33
- *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
 
 
 
4
  *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.arrow filter=lfs diff=lfs merge=lfs -text
10
+ *.ftz filter=lfs diff=lfs merge=lfs -text
11
  *.joblib filter=lfs diff=lfs merge=lfs -text
 
 
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
17
  *tfevents* filter=lfs diff=lfs merge=lfs -text
18
+ *.csv* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ tags:
4
+ - seq2seq
5
+ - t5
6
+ - text-generation
7
+ - recipe-generation
8
+ pipeline_tag: text2text-generation
9
+ widget:
10
+ - text: "provolone cheese, bacon, bread, ginger"
11
+ - text: "sugar, crunchy jif peanut butter, cornflakes"
12
+ - text: "sweet butter, confectioners sugar, flaked coconut, condensed milk, nuts, vanilla, dipping chocolate"
13
+ - text: "macaroni, butter, salt, bacon, milk, flour, pepper, cream corn"
14
+ - text: "hamburger, sausage, onion, regular, american cheese, colby cheese"
15
+ - text: "chicken breasts, onion, garlic, great northern beans, black beans, green chilies, broccoli, garlic oil, butter, cajun seasoning, salt, oregano, thyme, black pepper, basil, worcestershire sauce, chicken broth, sour cream, chardonnay wine"
16
+ - text: "serrano peppers, garlic, celery, oregano, canola oil, vinegar, water, kosher salt, salt, black pepper"
17
+ ---
18
+
19
+ ![avatar](chef-transformer.png)
20
+
21
+ # Chef Transformer (T5)
22
+ > This is part of the
23
+ [Flax/Jax Community Week](https://discuss.huggingface.co/t/recipe-generation-model/7475), organized by [HuggingFace](https://huggingface.co/) and TPU usage sponsored by Google.
24
+
25
+ Want to give it a try? Then what's the wait, head over to Hugging Face Spaces [here](https://huggingface.co/spaces/flax-community/chef-transformer).
26
+
27
+
28
+ ## Team Members
29
+ - Mehrdad Farahani ([m3hrdadfi](https://huggingface.co/m3hrdadfi))
30
+ - Kartik Godawat ([dk-crazydiv](https://huggingface.co/dk-crazydiv))
31
+ - Haswanth Aekula ([hassiahk](https://huggingface.co/hassiahk))
32
+ - Deepak Pandian ([rays2pix](https://huggingface.co/rays2pix))
33
+ - Nicholas Broad ([nbroad](https://huggingface.co/nbroad))
34
+
35
+ ## Dataset
36
+
37
+ [RecipeNLG: A Cooking Recipes Dataset for Semi-Structured Text Generation](https://recipenlg.cs.put.poznan.pl/). This dataset contains **2,231,142** cooking recipes (>2 millions) with size of **2.14 GB**. It's processed in more careful way.
38
+
39
+ ### Example
40
+
41
+ ```json
42
+ {
43
+ "NER": [
44
+ "oyster crackers",
45
+ "salad dressing",
46
+ "lemon pepper",
47
+ "dill weed",
48
+ "garlic powder",
49
+ "salad oil"
50
+ ],
51
+ "directions": [
52
+ "Combine salad dressing mix and oil.",
53
+ "Add dill weed, garlic powder and lemon pepper.",
54
+ "Pour over crackers; stir to coat.",
55
+ "Place in warm oven.",
56
+ "Use very low temperature for 15 to 20 minutes."
57
+ ],
58
+ "ingredients": [
59
+ "12 to 16 oz. plain oyster crackers",
60
+ "1 pkg. Hidden Valley Ranch salad dressing mix",
61
+ "1/4 tsp. lemon pepper",
62
+ "1/2 to 1 tsp. dill weed",
63
+ "1/4 tsp. garlic powder",
64
+ "3/4 to 1 c. salad oil"
65
+ ],
66
+ "link": "www.cookbooks.com/Recipe-Details.aspx?id=648947",
67
+ "source": "Gathered",
68
+ "title": "Hidden Valley Ranch Oyster Crackers"
69
+ }
70
+ ```
71
+
72
+ ## How To Use
73
+
74
+ ```bash
75
+ # Installing requirements
76
+ pip install transformers
77
+ ```
78
+
79
+ ```python
80
+ from transformers import FlaxAutoModelForSeq2SeqLM
81
+ from transformers import AutoTokenizer
82
+
83
+ MODEL_NAME_OR_PATH = "flax-community/t5-recipe-generation"
84
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME_OR_PATH, use_fast=True)
85
+ model = FlaxAutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME_OR_PATH)
86
+
87
+ prefix = "items: "
88
+ # generation_kwargs = {
89
+ # "max_length": 512,
90
+ # "min_length": 64,
91
+ # "no_repeat_ngram_size": 3,
92
+ # "early_stopping": True,
93
+ # "num_beams": 5,
94
+ # "length_penalty": 1.5,
95
+ # }
96
+ generation_kwargs = {
97
+ "max_length": 512,
98
+ "min_length": 64,
99
+ "no_repeat_ngram_size": 3,
100
+ "do_sample": True,
101
+ "top_k": 60,
102
+ "top_p": 0.95
103
+ }
104
+
105
+
106
+ special_tokens = tokenizer.all_special_tokens
107
+ tokens_map = {
108
+ "<sep>": "--",
109
+ "<section>": "\n"
110
+ }
111
+ def skip_special_tokens(text, special_tokens):
112
+ for token in special_tokens:
113
+ text = text.replace(token, "")
114
+
115
+ return text
116
+
117
+ def target_postprocessing(texts, special_tokens):
118
+ if not isinstance(texts, list):
119
+ texts = [texts]
120
+
121
+ new_texts = []
122
+ for text in texts:
123
+ text = skip_special_tokens(text, special_tokens)
124
+
125
+ for k, v in tokens_map.items():
126
+ text = text.replace(k, v)
127
+
128
+ new_texts.append(text)
129
+
130
+ return new_texts
131
+
132
+ def generation_function(texts):
133
+ _inputs = texts if isinstance(texts, list) else [texts]
134
+ inputs = [prefix + inp for inp in _inputs]
135
+ inputs = tokenizer(
136
+ inputs,
137
+ max_length=256,
138
+ padding="max_length",
139
+ truncation=True,
140
+ return_tensors="jax"
141
+ )
142
+
143
+ input_ids = inputs.input_ids
144
+ attention_mask = inputs.attention_mask
145
+
146
+ output_ids = model.generate(
147
+ input_ids=input_ids,
148
+ attention_mask=attention_mask,
149
+ **generation_kwargs
150
+ )
151
+ generated = output_ids.sequences
152
+ generated_recipe = target_postprocessing(
153
+ tokenizer.batch_decode(generated, skip_special_tokens=False),
154
+ special_tokens
155
+ )
156
+ return generated_recipe
157
+ ```
158
+
159
+ ```python
160
+ items = [
161
+ "macaroni, butter, salt, bacon, milk, flour, pepper, cream corn",
162
+ "provolone cheese, bacon, bread, ginger"
163
+ ]
164
+ generated = generation_function(items)
165
+ for text in generated:
166
+ sections = text.split("\n")
167
+ for section in sections:
168
+ section = section.strip()
169
+ if section.startswith("title:"):
170
+ section = section.replace("title:", "")
171
+ headline = "TITLE"
172
+ elif section.startswith("ingredients:"):
173
+ section = section.replace("ingredients:", "")
174
+ headline = "INGREDIENTS"
175
+ elif section.startswith("directions:"):
176
+ section = section.replace("directions:", "")
177
+ headline = "DIRECTIONS"
178
+
179
+ if headline == "TITLE":
180
+ print(f"[{headline}]: {section.strip().capitalize()}")
181
+ else:
182
+ section_info = [f" - {i+1}: {info.strip().capitalize()}" for i, info in enumerate(section.split("--"))]
183
+ print(f"[{headline}]:")
184
+ print("\n".join(section_info))
185
+
186
+ print("-" * 130)
187
+ ```
188
+
189
+ Output:
190
+ ```text
191
+ [TITLE]: Macaroni and corn
192
+ [INGREDIENTS]:
193
+ - 1: 2 c. macaroni
194
+ - 2: 2 tbsp. butter
195
+ - 3: 1 tsp. salt
196
+ - 4: 4 slices bacon
197
+ - 5: 2 c. milk
198
+ - 6: 2 tbsp. flour
199
+ - 7: 1/4 tsp. pepper
200
+ - 8: 1 can cream corn
201
+ [DIRECTIONS]:
202
+ - 1: Cook macaroni in boiling salted water until tender.
203
+ - 2: Drain.
204
+ - 3: Melt butter in saucepan.
205
+ - 4: Blend in flour, salt and pepper.
206
+ - 5: Add milk all at once.
207
+ - 6: Cook and stir until thickened and bubbly.
208
+ - 7: Stir in corn and bacon.
209
+ - 8: Pour over macaroni and mix well.
210
+ ----------------------------------------------------------------------------------------------------------------------------------
211
+ [TITLE]: Grilled provolone and bacon sandwich
212
+ [INGREDIENTS]:
213
+ - 1: 2 slices provolone cheese
214
+ - 2: 2 slices bacon
215
+ - 3: 2 slices sourdough bread
216
+ - 4: 2 slices pickled ginger
217
+ [DIRECTIONS]:
218
+ - 1: Place a slice of provolone cheese on one slice of bread.
219
+ - 2: Top with a slice of bacon.
220
+ - 3: Top with a slice of pickled ginger.
221
+ - 4: Top with the other slice of bread.
222
+ - 5: Heat a skillet over medium heat.
223
+ - 6: Place the sandwich in the skillet and cook until the cheese is melted and the bread is golden brown.
224
+ ----------------------------------------------------------------------------------------------------------------------------------
225
+ ```
226
+
227
+ ## Evaluation
228
+ Since the test set is not available, we will evaluate the model based on a shared test set. This test set consists of 5% of the whole test (*= 5,000 records*),
229
+ and we will generate five recipes for each input(*= 25,000 records*).
230
+ The following table summarizes the scores obtained by the **Chef Transformer** and **RecipeNLG** as our baseline.
231
+
232
+ | Model | COSIM | WER | ROUGE-2 | BLEU | GLEU | METEOR |
233
+ |:------------------------------------------------------------------------:|:----------:|:----------:|:----------:|:----------:|:----------:|:----------:|
234
+ | [RecipeNLG](https://huggingface.co/mbien/recipenlg) | 0.5723 | 1.2125 | 0.1354 | 0.1164 | 0.1503 | 0.2309 |
235
+ | [Chef Transformer](huggingface.co/flax-community/t5-recipe-generation) * | **0.7282** | **0.7613** | **0.2470** | **0.3245** | **0.2624** | **0.4150** |
236
+
237
+ *From the 5 generated recipes corresponding to each NER (food items), only the highest score was taken into account in the WER, COSIM, and ROUGE metrics. At the same time, BLEU, GLEU, Meteor were designed to have many possible references.*
238
+
239
+
240
+ ## Copyright
241
+
242
+ Special thanks to those who provided these fantastic materials.
243
+ - [Anatomy](https://www.flaticon.com/free-icon)
244
+ - [Chef Hat](https://www.vecteezy.com/members/jellyfishwater)
245
+ - [Moira Nazzari](https://pixabay.com/photos/food-dessert-cake-eggs-butter-3048440/)
246
+ - [Instagram Post](https://www.freepik.com/free-psd/recipes-ad-social-media-post-template_11520617.htm)
chef-transformer.png ADDED
config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "T5ForConditionalGeneration"
4
+ ],
5
+ "d_ff": 3072,
6
+ "d_kv": 64,
7
+ "d_model": 768,
8
+ "decoder_start_token_id": 0,
9
+ "dropout_rate": 0.1,
10
+ "eos_token_id": 1,
11
+ "feed_forward_proj": "relu",
12
+ "gradient_checkpointing": false,
13
+ "initializer_factor": 1.0,
14
+ "is_encoder_decoder": true,
15
+ "layer_norm_epsilon": 1e-06,
16
+ "model_type": "t5",
17
+ "n_positions": 512,
18
+ "num_decoder_layers": 12,
19
+ "num_heads": 12,
20
+ "num_layers": 12,
21
+ "output_past": true,
22
+ "pad_token_id": 0,
23
+ "relative_attention_num_buckets": 32,
24
+ "task_specific_params": {
25
+ "summarization": {
26
+ "early_stopping": true,
27
+ "length_penalty": 2.0,
28
+ "max_length": 200,
29
+ "min_length": 30,
30
+ "no_repeat_ngram_size": 3,
31
+ "num_beams": 4,
32
+ "prefix": "summarize: "
33
+ },
34
+ "translation_en_to_de": {
35
+ "early_stopping": true,
36
+ "max_length": 300,
37
+ "num_beams": 4,
38
+ "prefix": "translate English to German: "
39
+ },
40
+ "translation_en_to_fr": {
41
+ "early_stopping": true,
42
+ "max_length": 300,
43
+ "num_beams": 4,
44
+ "prefix": "translate English to French: "
45
+ },
46
+ "translation_en_to_ro": {
47
+ "early_stopping": true,
48
+ "max_length": 300,
49
+ "num_beams": 4,
50
+ "prefix": "translate English to Romanian: "
51
+ },
52
+ "text2text-generation": {
53
+ "early_stopping": true,
54
+ "max_length": 512,
55
+ "repetition_penalty": 1.2,
56
+ "length_penalty": 1.2,
57
+ "num_beams": 5,
58
+ "prefix": "items: "
59
+ }
60
+ },
61
+ "transformers_version": "4.9.0.dev0",
62
+ "use_cache": true,
63
+ "vocab_size": 32128
64
+ }
events.out.tfevents.1625725210.t1v-n-a0c138ef-w-0.183348.3.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ddf08cb1bf3c10e07a056dd0cf7050d92c3835b947812877ef872a76156f460
3
+ size 20265157
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:effb0d06380db9445a71a59885bb5006bf40eddd781b54a32da1efe0f154fc57
3
+ size 891625348
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:637cc03a65c7ab9b1798e0ea03e6cd3909e278b1e441b2bc85a6eb3380607919
3
+ size 891727295
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"]}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce0d107670b05c104f606fe6aca353bf17be0e58c24b683b8e44163336dbf68d
3
+ size 892144216
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "extra_ids": 100, "additional_special_tokens": ["<extra_id_0>", "<extra_id_1>", "<extra_id_2>", "<extra_id_3>", "<extra_id_4>", "<extra_id_5>", "<extra_id_6>", "<extra_id_7>", "<extra_id_8>", "<extra_id_9>", "<extra_id_10>", "<extra_id_11>", "<extra_id_12>", "<extra_id_13>", "<extra_id_14>", "<extra_id_15>", "<extra_id_16>", "<extra_id_17>", "<extra_id_18>", "<extra_id_19>", "<extra_id_20>", "<extra_id_21>", "<extra_id_22>", "<extra_id_23>", "<extra_id_24>", "<extra_id_25>", "<extra_id_26>", "<extra_id_27>", "<extra_id_28>", "<extra_id_29>", "<extra_id_30>", "<extra_id_31>", "<extra_id_32>", "<extra_id_33>", "<extra_id_34>", "<extra_id_35>", "<extra_id_36>", "<extra_id_37>", "<extra_id_38>", "<extra_id_39>", "<extra_id_40>", "<extra_id_41>", "<extra_id_42>", "<extra_id_43>", "<extra_id_44>", "<extra_id_45>", "<extra_id_46>", "<extra_id_47>", "<extra_id_48>", "<extra_id_49>", "<extra_id_50>", "<extra_id_51>", "<extra_id_52>", "<extra_id_53>", "<extra_id_54>", "<extra_id_55>", "<extra_id_56>", "<extra_id_57>", "<extra_id_58>", "<extra_id_59>", "<extra_id_60>", "<extra_id_61>", "<extra_id_62>", "<extra_id_63>", "<extra_id_64>", "<extra_id_65>", "<extra_id_66>", "<extra_id_67>", "<extra_id_68>", "<extra_id_69>", "<extra_id_70>", "<extra_id_71>", "<extra_id_72>", "<extra_id_73>", "<extra_id_74>", "<extra_id_75>", "<extra_id_76>", "<extra_id_77>", "<extra_id_78>", "<extra_id_79>", "<extra_id_80>", "<extra_id_81>", "<extra_id_82>", "<extra_id_83>", "<extra_id_84>", "<extra_id_85>", "<extra_id_86>", "<extra_id_87>", "<extra_id_88>", "<extra_id_89>", "<extra_id_90>", "<extra_id_91>", "<extra_id_92>", "<extra_id_93>", "<extra_id_94>", "<extra_id_95>", "<extra_id_96>", "<extra_id_97>", "<extra_id_98>", "<extra_id_99>"], "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "t5-base", "tokenizer_class": "T5Tokenizer"}