thundaa commited on
Commit
312f952
1 Parent(s): d44ae27

End of training

Browse files
last-checkpoint/config.json DELETED
@@ -1,32 +0,0 @@
1
- {
2
- "_name_or_path": "thundaa/thermo-evotuning-prot_bert",
3
- "architectures": [
4
- "BertForSequenceClassification"
5
- ],
6
- "attention_probs_dropout_prob": 0.0,
7
- "classifier_dropout": null,
8
- "hidden_act": "gelu",
9
- "hidden_dropout_prob": 0.0,
10
- "hidden_size": 1024,
11
- "id2label": {
12
- "0": "LABEL_0"
13
- },
14
- "initializer_range": 0.02,
15
- "intermediate_size": 4096,
16
- "label2id": {
17
- "LABEL_0": 0
18
- },
19
- "layer_norm_eps": 1e-12,
20
- "max_position_embeddings": 40000,
21
- "model_type": "bert",
22
- "num_attention_heads": 16,
23
- "num_hidden_layers": 30,
24
- "pad_token_id": 0,
25
- "position_embedding_type": "absolute",
26
- "problem_type": "regression",
27
- "torch_dtype": "float32",
28
- "transformers_version": "4.18.0",
29
- "type_vocab_size": 2,
30
- "use_cache": true,
31
- "vocab_size": 30
32
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a523d3b6334f6de4cfe938b124acc74f6e920e02b978919a5bc10e988425b460
3
- size 3359741661
 
 
 
 
last-checkpoint/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:26216648098c14e4ba6ffec645eb9ad4a8ac34e91bfcbc10876e53b9fb255fac
3
- size 1680212909
 
 
 
 
last-checkpoint/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ad6daa8b9e8dabb625aba226dd6a1abad2aa9eb21f2fb53f6b21da32524d629
3
- size 15523
 
 
 
 
last-checkpoint/scaler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3560197ccceeedcb264b77ccdc2c8d3107dd09afe711ecc158f3eaaad21f105
3
- size 559
 
 
 
 
last-checkpoint/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4491687887918e0107d5d93321e5d5275629c6050b0b80e61089ad3a3f2e3f3b
3
- size 623
 
 
 
 
last-checkpoint/special_tokens_map.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
 
 
last-checkpoint/tokenizer.json DELETED
@@ -1,180 +0,0 @@
1
- {
2
- "version": "1.0",
3
- "truncation": null,
4
- "padding": null,
5
- "added_tokens": [
6
- {
7
- "id": 0,
8
- "content": "[PAD]",
9
- "single_word": false,
10
- "lstrip": false,
11
- "rstrip": false,
12
- "normalized": false,
13
- "special": true
14
- },
15
- {
16
- "id": 1,
17
- "content": "[UNK]",
18
- "single_word": false,
19
- "lstrip": false,
20
- "rstrip": false,
21
- "normalized": false,
22
- "special": true
23
- },
24
- {
25
- "id": 2,
26
- "content": "[CLS]",
27
- "single_word": false,
28
- "lstrip": false,
29
- "rstrip": false,
30
- "normalized": false,
31
- "special": true
32
- },
33
- {
34
- "id": 3,
35
- "content": "[SEP]",
36
- "single_word": false,
37
- "lstrip": false,
38
- "rstrip": false,
39
- "normalized": false,
40
- "special": true
41
- },
42
- {
43
- "id": 4,
44
- "content": "[MASK]",
45
- "single_word": false,
46
- "lstrip": false,
47
- "rstrip": false,
48
- "normalized": false,
49
- "special": true
50
- }
51
- ],
52
- "normalizer": {
53
- "type": "BertNormalizer",
54
- "clean_text": true,
55
- "handle_chinese_chars": true,
56
- "strip_accents": null,
57
- "lowercase": false
58
- },
59
- "pre_tokenizer": {
60
- "type": "BertPreTokenizer"
61
- },
62
- "post_processor": {
63
- "type": "TemplateProcessing",
64
- "single": [
65
- {
66
- "SpecialToken": {
67
- "id": "[CLS]",
68
- "type_id": 0
69
- }
70
- },
71
- {
72
- "Sequence": {
73
- "id": "A",
74
- "type_id": 0
75
- }
76
- },
77
- {
78
- "SpecialToken": {
79
- "id": "[SEP]",
80
- "type_id": 0
81
- }
82
- }
83
- ],
84
- "pair": [
85
- {
86
- "SpecialToken": {
87
- "id": "[CLS]",
88
- "type_id": 0
89
- }
90
- },
91
- {
92
- "Sequence": {
93
- "id": "A",
94
- "type_id": 0
95
- }
96
- },
97
- {
98
- "SpecialToken": {
99
- "id": "[SEP]",
100
- "type_id": 0
101
- }
102
- },
103
- {
104
- "Sequence": {
105
- "id": "B",
106
- "type_id": 1
107
- }
108
- },
109
- {
110
- "SpecialToken": {
111
- "id": "[SEP]",
112
- "type_id": 1
113
- }
114
- }
115
- ],
116
- "special_tokens": {
117
- "[CLS]": {
118
- "id": "[CLS]",
119
- "ids": [
120
- 2
121
- ],
122
- "tokens": [
123
- "[CLS]"
124
- ]
125
- },
126
- "[SEP]": {
127
- "id": "[SEP]",
128
- "ids": [
129
- 3
130
- ],
131
- "tokens": [
132
- "[SEP]"
133
- ]
134
- }
135
- }
136
- },
137
- "decoder": {
138
- "type": "WordPiece",
139
- "prefix": "##",
140
- "cleanup": true
141
- },
142
- "model": {
143
- "type": "WordPiece",
144
- "unk_token": "[UNK]",
145
- "continuing_subword_prefix": "##",
146
- "max_input_chars_per_word": 100,
147
- "vocab": {
148
- "[PAD]": 0,
149
- "[UNK]": 1,
150
- "[CLS]": 2,
151
- "[SEP]": 3,
152
- "[MASK]": 4,
153
- "L": 5,
154
- "A": 6,
155
- "G": 7,
156
- "V": 8,
157
- "E": 9,
158
- "S": 10,
159
- "I": 11,
160
- "K": 12,
161
- "R": 13,
162
- "D": 14,
163
- "T": 15,
164
- "P": 16,
165
- "N": 17,
166
- "Q": 18,
167
- "F": 19,
168
- "Y": 20,
169
- "M": 21,
170
- "H": 22,
171
- "C": 23,
172
- "W": 24,
173
- "X": 25,
174
- "U": 26,
175
- "B": 27,
176
- "Z": 28,
177
- "O": 29
178
- }
179
- }
180
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/tokenizer_config.json DELETED
@@ -1 +0,0 @@
1
- {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": null, "full_tokenizer_file": null, "name_or_path": "thundaa/thermo-evotuning-prot_bert", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
 
 
last-checkpoint/trainer_state.json DELETED
@@ -1,451 +0,0 @@
1
- {
2
- "best_metric": 0.18365982174873352,
3
- "best_model_checkpoint": "thermo-predictor-thermo-evotuning-prot_bert/checkpoint-58",
4
- "epoch": 28.677248677248677,
5
- "global_step": 58,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.68,
12
- "learning_rate": 1.9600000000000002e-05,
13
- "loss": 0.4734,
14
- "step": 2
15
- },
16
- {
17
- "epoch": 0.68,
18
- "eval_loss": 0.31455937027931213,
19
- "eval_runtime": 2.9218,
20
- "eval_samples_per_second": 1835.142,
21
- "eval_spearmanr": 0.3358911230016707,
22
- "eval_steps_per_second": 7.187,
23
- "step": 2
24
- },
25
- {
26
- "epoch": 1.68,
27
- "learning_rate": 1.9200000000000003e-05,
28
- "loss": 0.4392,
29
- "step": 4
30
- },
31
- {
32
- "epoch": 1.68,
33
- "eval_loss": 0.29363012313842773,
34
- "eval_runtime": 2.904,
35
- "eval_samples_per_second": 1846.417,
36
- "eval_spearmanr": 0.34070735481419273,
37
- "eval_steps_per_second": 7.231,
38
- "step": 4
39
- },
40
- {
41
- "epoch": 2.68,
42
- "learning_rate": 1.88e-05,
43
- "loss": 0.4034,
44
- "step": 6
45
- },
46
- {
47
- "epoch": 2.68,
48
- "eval_loss": 0.2633129358291626,
49
- "eval_runtime": 3.0574,
50
- "eval_samples_per_second": 1753.755,
51
- "eval_spearmanr": 0.36959161893964504,
52
- "eval_steps_per_second": 6.868,
53
- "step": 6
54
- },
55
- {
56
- "epoch": 3.68,
57
- "learning_rate": 1.8400000000000003e-05,
58
- "loss": 0.3669,
59
- "step": 8
60
- },
61
- {
62
- "epoch": 3.68,
63
- "eval_loss": 0.24365590512752533,
64
- "eval_runtime": 2.9265,
65
- "eval_samples_per_second": 1832.235,
66
- "eval_spearmanr": 0.390325257804677,
67
- "eval_steps_per_second": 7.176,
68
- "step": 8
69
- },
70
- {
71
- "epoch": 4.68,
72
- "learning_rate": 1.8e-05,
73
- "loss": 0.3496,
74
- "step": 10
75
- },
76
- {
77
- "epoch": 4.68,
78
- "eval_loss": 0.23773197829723358,
79
- "eval_runtime": 2.9224,
80
- "eval_samples_per_second": 1834.793,
81
- "eval_spearmanr": 0.41020718084494817,
82
- "eval_steps_per_second": 7.186,
83
- "step": 10
84
- },
85
- {
86
- "epoch": 5.68,
87
- "learning_rate": 1.76e-05,
88
- "loss": 0.3351,
89
- "step": 12
90
- },
91
- {
92
- "epoch": 5.68,
93
- "eval_loss": 0.22854185104370117,
94
- "eval_runtime": 3.4499,
95
- "eval_samples_per_second": 1554.251,
96
- "eval_spearmanr": 0.42040641918816646,
97
- "eval_steps_per_second": 6.087,
98
- "step": 12
99
- },
100
- {
101
- "epoch": 6.68,
102
- "learning_rate": 1.72e-05,
103
- "loss": 0.3289,
104
- "step": 14
105
- },
106
- {
107
- "epoch": 6.68,
108
- "eval_loss": 0.2267082780599594,
109
- "eval_runtime": 3.0431,
110
- "eval_samples_per_second": 1762.044,
111
- "eval_spearmanr": 0.41796639021828647,
112
- "eval_steps_per_second": 6.901,
113
- "step": 14
114
- },
115
- {
116
- "epoch": 7.68,
117
- "learning_rate": 1.6800000000000002e-05,
118
- "loss": 0.3267,
119
- "step": 16
120
- },
121
- {
122
- "epoch": 7.68,
123
- "eval_loss": 0.22581231594085693,
124
- "eval_runtime": 3.1089,
125
- "eval_samples_per_second": 1724.753,
126
- "eval_spearmanr": 0.4242454004002921,
127
- "eval_steps_per_second": 6.755,
128
- "step": 16
129
- },
130
- {
131
- "epoch": 8.68,
132
- "learning_rate": 1.64e-05,
133
- "loss": 0.3177,
134
- "step": 18
135
- },
136
- {
137
- "epoch": 8.68,
138
- "eval_loss": 0.2206145077943802,
139
- "eval_runtime": 2.9131,
140
- "eval_samples_per_second": 1840.623,
141
- "eval_spearmanr": 0.42951271566156707,
142
- "eval_steps_per_second": 7.209,
143
- "step": 18
144
- },
145
- {
146
- "epoch": 9.68,
147
- "learning_rate": 1.6000000000000003e-05,
148
- "loss": 0.3116,
149
- "step": 20
150
- },
151
- {
152
- "epoch": 9.68,
153
- "eval_loss": 0.21502549946308136,
154
- "eval_runtime": 2.8807,
155
- "eval_samples_per_second": 1861.38,
156
- "eval_spearmanr": 0.43652182698224073,
157
- "eval_steps_per_second": 7.29,
158
- "step": 20
159
- },
160
- {
161
- "epoch": 10.68,
162
- "learning_rate": 1.5600000000000003e-05,
163
- "loss": 0.3039,
164
- "step": 22
165
- },
166
- {
167
- "epoch": 10.68,
168
- "eval_loss": 0.21152722835540771,
169
- "eval_runtime": 2.8687,
170
- "eval_samples_per_second": 1869.16,
171
- "eval_spearmanr": 0.4364973861642812,
172
- "eval_steps_per_second": 7.32,
173
- "step": 22
174
- },
175
- {
176
- "epoch": 11.68,
177
- "learning_rate": 1.5200000000000002e-05,
178
- "loss": 0.2985,
179
- "step": 24
180
- },
181
- {
182
- "epoch": 11.68,
183
- "eval_loss": 0.20623379945755005,
184
- "eval_runtime": 2.9943,
185
- "eval_samples_per_second": 1790.717,
186
- "eval_spearmanr": 0.4469415528786085,
187
- "eval_steps_per_second": 7.013,
188
- "step": 24
189
- },
190
- {
191
- "epoch": 12.68,
192
- "learning_rate": 1.48e-05,
193
- "loss": 0.2927,
194
- "step": 26
195
- },
196
- {
197
- "epoch": 12.68,
198
- "eval_loss": 0.20452716946601868,
199
- "eval_runtime": 3.0404,
200
- "eval_samples_per_second": 1763.602,
201
- "eval_spearmanr": 0.45307896545734583,
202
- "eval_steps_per_second": 6.907,
203
- "step": 26
204
- },
205
- {
206
- "epoch": 13.68,
207
- "learning_rate": 1.4400000000000001e-05,
208
- "loss": 0.2885,
209
- "step": 28
210
- },
211
- {
212
- "epoch": 13.68,
213
- "eval_loss": 0.2004762440919876,
214
- "eval_runtime": 2.8984,
215
- "eval_samples_per_second": 1849.957,
216
- "eval_spearmanr": 0.4602718574665676,
217
- "eval_steps_per_second": 7.245,
218
- "step": 28
219
- },
220
- {
221
- "epoch": 14.68,
222
- "learning_rate": 1.4e-05,
223
- "loss": 0.2838,
224
- "step": 30
225
- },
226
- {
227
- "epoch": 14.68,
228
- "eval_loss": 0.1986912339925766,
229
- "eval_runtime": 3.0267,
230
- "eval_samples_per_second": 1771.545,
231
- "eval_spearmanr": 0.46902799622360686,
232
- "eval_steps_per_second": 6.938,
233
- "step": 30
234
- },
235
- {
236
- "epoch": 15.68,
237
- "learning_rate": 1.3600000000000002e-05,
238
- "loss": 0.2806,
239
- "step": 32
240
- },
241
- {
242
- "epoch": 15.68,
243
- "eval_loss": 0.19752489030361176,
244
- "eval_runtime": 2.9976,
245
- "eval_samples_per_second": 1788.761,
246
- "eval_spearmanr": 0.47439420407286187,
247
- "eval_steps_per_second": 7.006,
248
- "step": 32
249
- },
250
- {
251
- "epoch": 16.68,
252
- "learning_rate": 1.3200000000000002e-05,
253
- "loss": 0.2772,
254
- "step": 34
255
- },
256
- {
257
- "epoch": 16.68,
258
- "eval_loss": 0.19701921939849854,
259
- "eval_runtime": 2.9895,
260
- "eval_samples_per_second": 1793.634,
261
- "eval_spearmanr": 0.47652315821607377,
262
- "eval_steps_per_second": 7.025,
263
- "step": 34
264
- },
265
- {
266
- "epoch": 17.68,
267
- "learning_rate": 1.2800000000000001e-05,
268
- "loss": 0.2728,
269
- "step": 36
270
- },
271
- {
272
- "epoch": 17.68,
273
- "eval_loss": 0.19386564195156097,
274
- "eval_runtime": 3.2955,
275
- "eval_samples_per_second": 1627.048,
276
- "eval_spearmanr": 0.48447898122969435,
277
- "eval_steps_per_second": 6.372,
278
- "step": 36
279
- },
280
- {
281
- "epoch": 18.68,
282
- "learning_rate": 1.2400000000000002e-05,
283
- "loss": 0.2684,
284
- "step": 38
285
- },
286
- {
287
- "epoch": 18.68,
288
- "eval_loss": 0.19311943650245667,
289
- "eval_runtime": 2.9977,
290
- "eval_samples_per_second": 1788.676,
291
- "eval_spearmanr": 0.4858414916440366,
292
- "eval_steps_per_second": 7.005,
293
- "step": 38
294
- },
295
- {
296
- "epoch": 19.68,
297
- "learning_rate": 1.2e-05,
298
- "loss": 0.2641,
299
- "step": 40
300
- },
301
- {
302
- "epoch": 19.68,
303
- "eval_loss": 0.19249317049980164,
304
- "eval_runtime": 3.1202,
305
- "eval_samples_per_second": 1718.506,
306
- "eval_spearmanr": 0.4935711457073984,
307
- "eval_steps_per_second": 6.73,
308
- "step": 40
309
- },
310
- {
311
- "epoch": 20.68,
312
- "learning_rate": 1.16e-05,
313
- "loss": 0.2608,
314
- "step": 42
315
- },
316
- {
317
- "epoch": 20.68,
318
- "eval_loss": 0.19051019847393036,
319
- "eval_runtime": 3.0142,
320
- "eval_samples_per_second": 1778.905,
321
- "eval_spearmanr": 0.4929029010849903,
322
- "eval_steps_per_second": 6.967,
323
- "step": 42
324
- },
325
- {
326
- "epoch": 21.68,
327
- "learning_rate": 1.1200000000000001e-05,
328
- "loss": 0.2566,
329
- "step": 44
330
- },
331
- {
332
- "epoch": 21.68,
333
- "eval_loss": 0.18856459856033325,
334
- "eval_runtime": 2.9879,
335
- "eval_samples_per_second": 1794.551,
336
- "eval_spearmanr": 0.5048595718357143,
337
- "eval_steps_per_second": 7.028,
338
- "step": 44
339
- },
340
- {
341
- "epoch": 22.68,
342
- "learning_rate": 1.0800000000000002e-05,
343
- "loss": 0.2518,
344
- "step": 46
345
- },
346
- {
347
- "epoch": 22.68,
348
- "eval_loss": 0.18753479421138763,
349
- "eval_runtime": 3.2412,
350
- "eval_samples_per_second": 1654.33,
351
- "eval_spearmanr": 0.5095133606919371,
352
- "eval_steps_per_second": 6.479,
353
- "step": 46
354
- },
355
- {
356
- "epoch": 23.68,
357
- "learning_rate": 1.04e-05,
358
- "loss": 0.2467,
359
- "step": 48
360
- },
361
- {
362
- "epoch": 23.68,
363
- "eval_loss": 0.18694807589054108,
364
- "eval_runtime": 3.6439,
365
- "eval_samples_per_second": 1471.507,
366
- "eval_spearmanr": 0.5140557788640083,
367
- "eval_steps_per_second": 5.763,
368
- "step": 48
369
- },
370
- {
371
- "epoch": 24.68,
372
- "learning_rate": 1e-05,
373
- "loss": 0.2424,
374
- "step": 50
375
- },
376
- {
377
- "epoch": 24.68,
378
- "eval_loss": 0.18594887852668762,
379
- "eval_runtime": 3.0425,
380
- "eval_samples_per_second": 1762.352,
381
- "eval_spearmanr": 0.5161498654457386,
382
- "eval_steps_per_second": 6.902,
383
- "step": 50
384
- },
385
- {
386
- "epoch": 25.68,
387
- "learning_rate": 9.600000000000001e-06,
388
- "loss": 0.2375,
389
- "step": 52
390
- },
391
- {
392
- "epoch": 25.68,
393
- "eval_loss": 0.1849762201309204,
394
- "eval_runtime": 2.8745,
395
- "eval_samples_per_second": 1865.365,
396
- "eval_spearmanr": 0.5223331654258908,
397
- "eval_steps_per_second": 7.306,
398
- "step": 52
399
- },
400
- {
401
- "epoch": 26.68,
402
- "learning_rate": 9.200000000000002e-06,
403
- "loss": 0.2329,
404
- "step": 54
405
- },
406
- {
407
- "epoch": 26.68,
408
- "eval_loss": 0.18514755368232727,
409
- "eval_runtime": 2.8748,
410
- "eval_samples_per_second": 1865.157,
411
- "eval_spearmanr": 0.5210178115492534,
412
- "eval_steps_per_second": 7.305,
413
- "step": 54
414
- },
415
- {
416
- "epoch": 27.68,
417
- "learning_rate": 8.8e-06,
418
- "loss": 0.2279,
419
- "step": 56
420
- },
421
- {
422
- "epoch": 27.68,
423
- "eval_loss": 0.18495012819766998,
424
- "eval_runtime": 2.9179,
425
- "eval_samples_per_second": 1837.612,
426
- "eval_spearmanr": 0.5294199372441676,
427
- "eval_steps_per_second": 7.197,
428
- "step": 56
429
- },
430
- {
431
- "epoch": 28.68,
432
- "learning_rate": 8.400000000000001e-06,
433
- "loss": 0.2226,
434
- "step": 58
435
- },
436
- {
437
- "epoch": 28.68,
438
- "eval_loss": 0.18365982174873352,
439
- "eval_runtime": 2.8753,
440
- "eval_samples_per_second": 1864.829,
441
- "eval_spearmanr": 0.5309868099654046,
442
- "eval_steps_per_second": 7.304,
443
- "step": 58
444
- }
445
- ],
446
- "max_steps": 100,
447
- "num_train_epochs": 50,
448
- "total_flos": 1.6543891233169373e+17,
449
- "trial_name": null,
450
- "trial_params": null
451
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e615ca7b54e69a7dd39cd3cf92a7ee56fe4d94a86e23ee9f3a0e5c9429720f0
3
- size 3247
 
 
 
 
last-checkpoint/vocab.txt DELETED
@@ -1,30 +0,0 @@
1
- [PAD]
2
- [UNK]
3
- [CLS]
4
- [SEP]
5
- [MASK]
6
- L
7
- A
8
- G
9
- V
10
- E
11
- S
12
- I
13
- K
14
- R
15
- D
16
- T
17
- P
18
- N
19
- Q
20
- F
21
- Y
22
- M
23
- H
24
- C
25
- W
26
- X
27
- U
28
- B
29
- Z
30
- O
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e615ca7b54e69a7dd39cd3cf92a7ee56fe4d94a86e23ee9f3a0e5c9429720f0
3
  size 3247
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b91fae7104f8fef1b3bee3166d71b11dbfa4de40dec2f545c0bf43c14bf4bcf8
3
  size 3247