Heaplax commited on
Commit
3d2efdc
·
verified ·
1 Parent(s): b43df00

Upload folder using huggingface_hub

Browse files
checkpoint-120/adapter_model/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+ ### Framework versions
7
+
8
+
9
+ - PEFT 0.4.0
checkpoint-120/adapter_model/lora_default/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/nobackup/users/zfchen/cdl/LLaVA-RLHF/VILA1.5-3b/llm",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "k_proj",
19
+ "v_proj",
20
+ "o_proj",
21
+ "gate_proj",
22
+ "up_proj",
23
+ "down_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-120/adapter_model/lora_default/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:091c4020c80e9d9fc315c44e8bf529be50c3430db06b6f8c68428f5a7c3ada74
3
+ size 224393877
checkpoint-120/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RewardModel"
4
+ ],
5
+ "backbone_model_name_or_path": "/nobackup/users/zfchen/cdl/LLaVA-RLHF/VILA1.5-3b/llm",
6
+ "mm_use_im_start_end": false,
7
+ "model_type": "reward_model",
8
+ "torch_dtype": "bfloat16",
9
+ "transformers_version": "4.30.2"
10
+ }
checkpoint-120/reward_head ADDED
Binary file (11.3 kB). View file
 
checkpoint-120/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9108380eb8e76c1064593403bcea3798532ffc999f60aa83d77f1986c3111852
3
+ size 21687
checkpoint-120/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23ccbfcfaa29f8718318d2ca568c15317eeb460c340c72d82fecc183d56d17ba
3
+ size 21687
checkpoint-120/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02dd6b8354207613ffddba9ad82b40cd300d39a17a5b5bc7461d9a0e9dbbfb1e
3
+ size 21687
checkpoint-120/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28db48b3220dbbb391220d436cc3aa0a9dc31b7f44552c0ab3cb87b13905d62a
3
+ size 21687
checkpoint-120/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b76df961261b4c1808f6de8fd12ddfb9adaa71836e4192a0e5e0588b4d5011f
3
+ size 21687
checkpoint-120/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcaabcf235f388013089a0731f3185918b9bcf3efc3be8cafd3048d57e5229e2
3
+ size 21687
checkpoint-120/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc00b4d49ee413bc73f995a9ce3c0ecd7fd6c1628c392094668f46d0635029d8
3
+ size 21687
checkpoint-120/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8b14a8a29425691a14fc71d787608a6554eded1f16c908c18ada686cc97ec42
3
+ size 21687
checkpoint-120/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f735eb293f032dfeb9e458f35501d617ae5cf159a29a9b6ac465d72d0667b44e
3
+ size 627
checkpoint-120/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
checkpoint-120/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7aedb3582ecda9fa99ee9242c17a9658f6744db083ee6ebdc8fb14857f84d220
3
+ size 499723
checkpoint-120/tokenizer_config.json ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": {
32
+ "__type": "AddedToken",
33
+ "content": "<s>",
34
+ "lstrip": false,
35
+ "normalized": true,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ },
39
+ "clean_up_tokenization_spaces": false,
40
+ "eos_token": {
41
+ "__type": "AddedToken",
42
+ "content": "</s>",
43
+ "lstrip": false,
44
+ "normalized": true,
45
+ "rstrip": false,
46
+ "single_word": false
47
+ },
48
+ "legacy": false,
49
+ "model_max_length": 4096,
50
+ "pad_token": {
51
+ "__type": "AddedToken",
52
+ "content": "<unk>",
53
+ "lstrip": false,
54
+ "normalized": true,
55
+ "rstrip": false,
56
+ "single_word": false
57
+ },
58
+ "padding_side": "left",
59
+ "sp_model_kwargs": {},
60
+ "spaces_between_special_tokens": false,
61
+ "tokenizer_class": "LlamaTokenizer",
62
+ "truncation_side": "right",
63
+ "unk_token": {
64
+ "__type": "AddedToken",
65
+ "content": "<unk>",
66
+ "lstrip": false,
67
+ "normalized": true,
68
+ "rstrip": false,
69
+ "single_word": false
70
+ },
71
+ "use_default_system_prompt": false
72
+ }
checkpoint-120/trainer_state.json ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.304932735426009,
5
+ "global_step": 120,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.07,
12
+ "learning_rate": 3.7037037037037036e-07,
13
+ "loss": 0.6848,
14
+ "step": 2
15
+ },
16
+ {
17
+ "epoch": 0.14,
18
+ "learning_rate": 7.407407407407407e-07,
19
+ "loss": 0.6805,
20
+ "step": 4
21
+ },
22
+ {
23
+ "epoch": 0.22,
24
+ "learning_rate": 1.111111111111111e-06,
25
+ "loss": 0.6842,
26
+ "step": 6
27
+ },
28
+ {
29
+ "epoch": 0.29,
30
+ "learning_rate": 1.4814814814814815e-06,
31
+ "loss": 0.6893,
32
+ "step": 8
33
+ },
34
+ {
35
+ "epoch": 0.36,
36
+ "learning_rate": 1.8518518518518519e-06,
37
+ "loss": 0.6787,
38
+ "step": 10
39
+ },
40
+ {
41
+ "epoch": 0.43,
42
+ "learning_rate": 2.222222222222222e-06,
43
+ "loss": 0.6789,
44
+ "step": 12
45
+ },
46
+ {
47
+ "epoch": 0.5,
48
+ "learning_rate": 2.5925925925925925e-06,
49
+ "loss": 0.685,
50
+ "step": 14
51
+ },
52
+ {
53
+ "epoch": 0.57,
54
+ "learning_rate": 2.962962962962963e-06,
55
+ "loss": 0.6823,
56
+ "step": 16
57
+ },
58
+ {
59
+ "epoch": 0.65,
60
+ "learning_rate": 3.3333333333333333e-06,
61
+ "loss": 0.6836,
62
+ "step": 18
63
+ },
64
+ {
65
+ "epoch": 0.72,
66
+ "learning_rate": 3.7037037037037037e-06,
67
+ "loss": 0.6756,
68
+ "step": 20
69
+ },
70
+ {
71
+ "epoch": 0.72,
72
+ "eval_accuracy": 0.5419999957084656,
73
+ "eval_average_score": 0.5289427638053894,
74
+ "eval_label_positive_rate": 0.47600001096725464,
75
+ "eval_loss": 0.6793817281723022,
76
+ "eval_runtime": 8.697,
77
+ "eval_samples_per_second": 57.491,
78
+ "eval_steps_per_second": 7.244,
79
+ "step": 20
80
+ },
81
+ {
82
+ "epoch": 0.79,
83
+ "learning_rate": 4.074074074074074e-06,
84
+ "loss": 0.6845,
85
+ "step": 22
86
+ },
87
+ {
88
+ "epoch": 0.86,
89
+ "learning_rate": 4.444444444444444e-06,
90
+ "loss": 0.6837,
91
+ "step": 24
92
+ },
93
+ {
94
+ "epoch": 0.93,
95
+ "learning_rate": 4.814814814814815e-06,
96
+ "loss": 0.6743,
97
+ "step": 26
98
+ },
99
+ {
100
+ "epoch": 1.0,
101
+ "learning_rate": 5.185185185185185e-06,
102
+ "loss": 0.6718,
103
+ "step": 28
104
+ },
105
+ {
106
+ "epoch": 1.08,
107
+ "learning_rate": 5.555555555555557e-06,
108
+ "loss": 0.6777,
109
+ "step": 30
110
+ },
111
+ {
112
+ "epoch": 1.15,
113
+ "learning_rate": 5.925925925925926e-06,
114
+ "loss": 0.6702,
115
+ "step": 32
116
+ },
117
+ {
118
+ "epoch": 1.22,
119
+ "learning_rate": 6.296296296296297e-06,
120
+ "loss": 0.6743,
121
+ "step": 34
122
+ },
123
+ {
124
+ "epoch": 1.29,
125
+ "learning_rate": 6.666666666666667e-06,
126
+ "loss": 0.6722,
127
+ "step": 36
128
+ },
129
+ {
130
+ "epoch": 1.36,
131
+ "learning_rate": 7.0370370370370375e-06,
132
+ "loss": 0.6697,
133
+ "step": 38
134
+ },
135
+ {
136
+ "epoch": 1.43,
137
+ "learning_rate": 7.4074074074074075e-06,
138
+ "loss": 0.6654,
139
+ "step": 40
140
+ },
141
+ {
142
+ "epoch": 1.43,
143
+ "eval_accuracy": 0.7080000042915344,
144
+ "eval_average_score": 0.4319504201412201,
145
+ "eval_label_positive_rate": 0.47600001096725464,
146
+ "eval_loss": 0.6608079075813293,
147
+ "eval_runtime": 8.6968,
148
+ "eval_samples_per_second": 57.492,
149
+ "eval_steps_per_second": 7.244,
150
+ "step": 40
151
+ },
152
+ {
153
+ "epoch": 1.51,
154
+ "learning_rate": 7.77777777777778e-06,
155
+ "loss": 0.6572,
156
+ "step": 42
157
+ },
158
+ {
159
+ "epoch": 1.58,
160
+ "learning_rate": 8.148148148148148e-06,
161
+ "loss": 0.6621,
162
+ "step": 44
163
+ },
164
+ {
165
+ "epoch": 1.65,
166
+ "learning_rate": 8.518518518518519e-06,
167
+ "loss": 0.6575,
168
+ "step": 46
169
+ },
170
+ {
171
+ "epoch": 1.72,
172
+ "learning_rate": 8.888888888888888e-06,
173
+ "loss": 0.6455,
174
+ "step": 48
175
+ },
176
+ {
177
+ "epoch": 1.79,
178
+ "learning_rate": 9.25925925925926e-06,
179
+ "loss": 0.6367,
180
+ "step": 50
181
+ },
182
+ {
183
+ "epoch": 1.87,
184
+ "learning_rate": 9.62962962962963e-06,
185
+ "loss": 0.64,
186
+ "step": 52
187
+ },
188
+ {
189
+ "epoch": 1.94,
190
+ "learning_rate": 1e-05,
191
+ "loss": 0.6316,
192
+ "step": 54
193
+ },
194
+ {
195
+ "epoch": 2.01,
196
+ "learning_rate": 1e-05,
197
+ "loss": 0.6121,
198
+ "step": 56
199
+ },
200
+ {
201
+ "epoch": 2.08,
202
+ "learning_rate": 1e-05,
203
+ "loss": 0.6104,
204
+ "step": 58
205
+ },
206
+ {
207
+ "epoch": 2.15,
208
+ "learning_rate": 1e-05,
209
+ "loss": 0.5916,
210
+ "step": 60
211
+ },
212
+ {
213
+ "epoch": 2.15,
214
+ "eval_accuracy": 0.9419999718666077,
215
+ "eval_average_score": 0.13356663286685944,
216
+ "eval_label_positive_rate": 0.47600001096725464,
217
+ "eval_loss": 0.5838245153427124,
218
+ "eval_runtime": 8.6962,
219
+ "eval_samples_per_second": 57.496,
220
+ "eval_steps_per_second": 7.245,
221
+ "step": 60
222
+ },
223
+ {
224
+ "epoch": 2.22,
225
+ "learning_rate": 1e-05,
226
+ "loss": 0.5801,
227
+ "step": 62
228
+ },
229
+ {
230
+ "epoch": 2.3,
231
+ "learning_rate": 1e-05,
232
+ "loss": 0.5677,
233
+ "step": 64
234
+ },
235
+ {
236
+ "epoch": 2.37,
237
+ "learning_rate": 1e-05,
238
+ "loss": 0.5472,
239
+ "step": 66
240
+ },
241
+ {
242
+ "epoch": 2.44,
243
+ "learning_rate": 1e-05,
244
+ "loss": 0.5267,
245
+ "step": 68
246
+ },
247
+ {
248
+ "epoch": 2.51,
249
+ "learning_rate": 1e-05,
250
+ "loss": 0.5224,
251
+ "step": 70
252
+ },
253
+ {
254
+ "epoch": 2.58,
255
+ "learning_rate": 1e-05,
256
+ "loss": 0.49,
257
+ "step": 72
258
+ },
259
+ {
260
+ "epoch": 2.65,
261
+ "learning_rate": 1e-05,
262
+ "loss": 0.4607,
263
+ "step": 74
264
+ },
265
+ {
266
+ "epoch": 2.73,
267
+ "learning_rate": 1e-05,
268
+ "loss": 0.4295,
269
+ "step": 76
270
+ },
271
+ {
272
+ "epoch": 2.8,
273
+ "learning_rate": 1e-05,
274
+ "loss": 0.393,
275
+ "step": 78
276
+ },
277
+ {
278
+ "epoch": 2.87,
279
+ "learning_rate": 1e-05,
280
+ "loss": 0.3463,
281
+ "step": 80
282
+ },
283
+ {
284
+ "epoch": 2.87,
285
+ "eval_accuracy": 0.9800000190734863,
286
+ "eval_average_score": -0.13676369190216064,
287
+ "eval_label_positive_rate": 0.47600001096725464,
288
+ "eval_loss": 0.30906087160110474,
289
+ "eval_runtime": 8.6996,
290
+ "eval_samples_per_second": 57.474,
291
+ "eval_steps_per_second": 7.242,
292
+ "step": 80
293
+ },
294
+ {
295
+ "epoch": 2.94,
296
+ "learning_rate": 1e-05,
297
+ "loss": 0.3167,
298
+ "step": 82
299
+ },
300
+ {
301
+ "epoch": 3.01,
302
+ "learning_rate": 1e-05,
303
+ "loss": 0.2559,
304
+ "step": 84
305
+ },
306
+ {
307
+ "epoch": 3.09,
308
+ "learning_rate": 1e-05,
309
+ "loss": 0.2119,
310
+ "step": 86
311
+ },
312
+ {
313
+ "epoch": 3.16,
314
+ "learning_rate": 1e-05,
315
+ "loss": 0.1874,
316
+ "step": 88
317
+ },
318
+ {
319
+ "epoch": 3.23,
320
+ "learning_rate": 1e-05,
321
+ "loss": 0.1407,
322
+ "step": 90
323
+ },
324
+ {
325
+ "epoch": 3.3,
326
+ "learning_rate": 1e-05,
327
+ "loss": 0.1037,
328
+ "step": 92
329
+ },
330
+ {
331
+ "epoch": 3.37,
332
+ "learning_rate": 1e-05,
333
+ "loss": 0.0717,
334
+ "step": 94
335
+ },
336
+ {
337
+ "epoch": 3.44,
338
+ "learning_rate": 1e-05,
339
+ "loss": 0.0692,
340
+ "step": 96
341
+ },
342
+ {
343
+ "epoch": 3.52,
344
+ "learning_rate": 1e-05,
345
+ "loss": 0.0708,
346
+ "step": 98
347
+ },
348
+ {
349
+ "epoch": 3.59,
350
+ "learning_rate": 1e-05,
351
+ "loss": 0.0426,
352
+ "step": 100
353
+ },
354
+ {
355
+ "epoch": 3.59,
356
+ "eval_accuracy": 0.9879999756813049,
357
+ "eval_average_score": -0.7325789332389832,
358
+ "eval_label_positive_rate": 0.47600001096725464,
359
+ "eval_loss": 0.03151561692357063,
360
+ "eval_runtime": 8.708,
361
+ "eval_samples_per_second": 57.418,
362
+ "eval_steps_per_second": 7.235,
363
+ "step": 100
364
+ },
365
+ {
366
+ "epoch": 3.66,
367
+ "learning_rate": 1e-05,
368
+ "loss": 0.0315,
369
+ "step": 102
370
+ },
371
+ {
372
+ "epoch": 3.73,
373
+ "learning_rate": 1e-05,
374
+ "loss": 0.0227,
375
+ "step": 104
376
+ },
377
+ {
378
+ "epoch": 3.8,
379
+ "learning_rate": 1e-05,
380
+ "loss": 0.0272,
381
+ "step": 106
382
+ },
383
+ {
384
+ "epoch": 3.87,
385
+ "learning_rate": 1e-05,
386
+ "loss": 0.0203,
387
+ "step": 108
388
+ },
389
+ {
390
+ "epoch": 3.95,
391
+ "learning_rate": 1e-05,
392
+ "loss": 0.0151,
393
+ "step": 110
394
+ },
395
+ {
396
+ "epoch": 4.02,
397
+ "learning_rate": 1e-05,
398
+ "loss": 0.0199,
399
+ "step": 112
400
+ },
401
+ {
402
+ "epoch": 4.09,
403
+ "learning_rate": 1e-05,
404
+ "loss": 0.0171,
405
+ "step": 114
406
+ },
407
+ {
408
+ "epoch": 4.16,
409
+ "learning_rate": 1e-05,
410
+ "loss": 0.0094,
411
+ "step": 116
412
+ },
413
+ {
414
+ "epoch": 4.23,
415
+ "learning_rate": 1e-05,
416
+ "loss": 0.0166,
417
+ "step": 118
418
+ },
419
+ {
420
+ "epoch": 4.3,
421
+ "learning_rate": 1e-05,
422
+ "loss": 0.0079,
423
+ "step": 120
424
+ },
425
+ {
426
+ "epoch": 4.3,
427
+ "eval_accuracy": 1.0,
428
+ "eval_average_score": -1.2080471515655518,
429
+ "eval_label_positive_rate": 0.47600001096725464,
430
+ "eval_loss": 0.009560000151395798,
431
+ "eval_runtime": 8.7049,
432
+ "eval_samples_per_second": 57.439,
433
+ "eval_steps_per_second": 7.237,
434
+ "step": 120
435
+ }
436
+ ],
437
+ "max_steps": 270,
438
+ "num_train_epochs": 10,
439
+ "total_flos": 3.914291024788193e+17,
440
+ "trial_name": null,
441
+ "trial_params": null
442
+ }
checkpoint-120/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57b146ab149226a1406ae4dbc206a2fc5340b734501c6a044f3f0daf846ec023
3
+ size 4795