chansung commited on
Commit
a812c8c
·
verified ·
1 Parent(s): 94b20fc

Model save

Browse files
Files changed (4) hide show
  1. README.md +69 -0
  2. all_results.json +9 -0
  3. train_results.json +9 -0
  4. trainer_state.json +414 -0
README.md ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: meta-llama/Meta-Llama-3-8B
3
+ datasets:
4
+ - generator
5
+ library_name: peft
6
+ license: llama3
7
+ tags:
8
+ - trl
9
+ - sft
10
+ - generated_from_trainer
11
+ model-index:
12
+ - name: llama3.1-8b-gpt4o_100k_closedqa-lora
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # llama3.1-8b-gpt4o_100k_closedqa-lora
20
+
21
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the generator dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 1.9340
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 0.0003
43
+ - train_batch_size: 4
44
+ - eval_batch_size: 4
45
+ - seed: 42
46
+ - distributed_type: multi-GPU
47
+ - num_devices: 8
48
+ - gradient_accumulation_steps: 2
49
+ - total_train_batch_size: 64
50
+ - total_eval_batch_size: 32
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: cosine
53
+ - lr_scheduler_warmup_ratio: 0.1
54
+ - num_epochs: 1
55
+
56
+ ### Training results
57
+
58
+ | Training Loss | Epoch | Step | Validation Loss |
59
+ |:-------------:|:-----:|:----:|:---------------:|
60
+ | 0.8262 | 1.0 | 256 | 1.9340 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - PEFT 0.13.0
66
+ - Transformers 4.45.1
67
+ - Pytorch 2.4.1+cu121
68
+ - Datasets 3.0.1
69
+ - Tokenizers 0.20.0
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 7.558142991332803e+17,
4
+ "train_loss": 0.9300689545925707,
5
+ "train_runtime": 2745.9778,
6
+ "train_samples": 111440,
7
+ "train_samples_per_second": 5.956,
8
+ "train_steps_per_second": 0.093
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 7.558142991332803e+17,
4
+ "train_loss": 0.9300689545925707,
5
+ "train_runtime": 2745.9778,
6
+ "train_samples": 111440,
7
+ "train_samples_per_second": 5.956,
8
+ "train_steps_per_second": 0.093
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 256,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.00390625,
13
+ "grad_norm": 1.2192516326904297,
14
+ "learning_rate": 1.1538461538461538e-05,
15
+ "loss": 1.9628,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.01953125,
20
+ "grad_norm": 1.938663125038147,
21
+ "learning_rate": 5.769230769230769e-05,
22
+ "loss": 1.9381,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.0390625,
27
+ "grad_norm": 0.9491540193557739,
28
+ "learning_rate": 0.00011538461538461538,
29
+ "loss": 1.8744,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.05859375,
34
+ "grad_norm": 1.3879050016403198,
35
+ "learning_rate": 0.00017307692307692304,
36
+ "loss": 1.6648,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.078125,
41
+ "grad_norm": 1.1502411365509033,
42
+ "learning_rate": 0.00023076923076923076,
43
+ "loss": 1.2614,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 0.09765625,
48
+ "grad_norm": 0.38527828454971313,
49
+ "learning_rate": 0.00028846153846153843,
50
+ "loss": 1.1411,
51
+ "step": 25
52
+ },
53
+ {
54
+ "epoch": 0.1171875,
55
+ "grad_norm": 0.4007408022880554,
56
+ "learning_rate": 0.00029977617052242417,
57
+ "loss": 1.0619,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 0.13671875,
62
+ "grad_norm": 0.3242458403110504,
63
+ "learning_rate": 0.0002988680080036802,
64
+ "loss": 1.0099,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.15625,
69
+ "grad_norm": 0.3080570697784424,
70
+ "learning_rate": 0.00029726575411133377,
71
+ "loss": 0.9607,
72
+ "step": 40
73
+ },
74
+ {
75
+ "epoch": 0.17578125,
76
+ "grad_norm": 0.2255689948797226,
77
+ "learning_rate": 0.0002949768792926617,
78
+ "loss": 0.9286,
79
+ "step": 45
80
+ },
81
+ {
82
+ "epoch": 0.1953125,
83
+ "grad_norm": 0.18936823308467865,
84
+ "learning_rate": 0.00029201205533865653,
85
+ "loss": 0.9191,
86
+ "step": 50
87
+ },
88
+ {
89
+ "epoch": 0.21484375,
90
+ "grad_norm": 0.182436004281044,
91
+ "learning_rate": 0.00028838510562721075,
92
+ "loss": 0.9273,
93
+ "step": 55
94
+ },
95
+ {
96
+ "epoch": 0.234375,
97
+ "grad_norm": 0.2115677148103714,
98
+ "learning_rate": 0.00028411294067214764,
99
+ "loss": 0.9015,
100
+ "step": 60
101
+ },
102
+ {
103
+ "epoch": 0.25390625,
104
+ "grad_norm": 0.15843912959098816,
105
+ "learning_rate": 0.00027921547927859996,
106
+ "loss": 0.8852,
107
+ "step": 65
108
+ },
109
+ {
110
+ "epoch": 0.2734375,
111
+ "grad_norm": 0.1501840054988861,
112
+ "learning_rate": 0.0002737155556723452,
113
+ "loss": 0.8745,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.29296875,
118
+ "grad_norm": 0.1954830437898636,
119
+ "learning_rate": 0.0002676388130361047,
120
+ "loss": 0.8919,
121
+ "step": 75
122
+ },
123
+ {
124
+ "epoch": 0.3125,
125
+ "grad_norm": 0.15817204117774963,
126
+ "learning_rate": 0.00026101358394918777,
127
+ "loss": 0.8734,
128
+ "step": 80
129
+ },
130
+ {
131
+ "epoch": 0.33203125,
132
+ "grad_norm": 0.14839860796928406,
133
+ "learning_rate": 0.0002538707582879288,
134
+ "loss": 0.8636,
135
+ "step": 85
136
+ },
137
+ {
138
+ "epoch": 0.3515625,
139
+ "grad_norm": 0.1690932661294937,
140
+ "learning_rate": 0.00024624363920282413,
141
+ "loss": 0.8827,
142
+ "step": 90
143
+ },
144
+ {
145
+ "epoch": 0.37109375,
146
+ "grad_norm": 0.1796758621931076,
147
+ "learning_rate": 0.00023816778784387094,
148
+ "loss": 0.8527,
149
+ "step": 95
150
+ },
151
+ {
152
+ "epoch": 0.390625,
153
+ "grad_norm": 0.1769869029521942,
154
+ "learning_rate": 0.0002296808575580705,
155
+ "loss": 0.8605,
156
+ "step": 100
157
+ },
158
+ {
159
+ "epoch": 0.41015625,
160
+ "grad_norm": 0.1564176380634308,
161
+ "learning_rate": 0.0002208224183321428,
162
+ "loss": 0.8553,
163
+ "step": 105
164
+ },
165
+ {
166
+ "epoch": 0.4296875,
167
+ "grad_norm": 0.17047332227230072,
168
+ "learning_rate": 0.00021163377229898225,
169
+ "loss": 0.845,
170
+ "step": 110
171
+ },
172
+ {
173
+ "epoch": 0.44921875,
174
+ "grad_norm": 0.17671431601047516,
175
+ "learning_rate": 0.00020215776116804833,
176
+ "loss": 0.8482,
177
+ "step": 115
178
+ },
179
+ {
180
+ "epoch": 0.46875,
181
+ "grad_norm": 0.16975820064544678,
182
+ "learning_rate": 0.00019243856647753948,
183
+ "loss": 0.852,
184
+ "step": 120
185
+ },
186
+ {
187
+ "epoch": 0.48828125,
188
+ "grad_norm": 0.16823889315128326,
189
+ "learning_rate": 0.00018252150359966712,
190
+ "loss": 0.85,
191
+ "step": 125
192
+ },
193
+ {
194
+ "epoch": 0.5078125,
195
+ "grad_norm": 0.171301007270813,
196
+ "learning_rate": 0.00017245281045947164,
197
+ "loss": 0.8467,
198
+ "step": 130
199
+ },
200
+ {
201
+ "epoch": 0.52734375,
202
+ "grad_norm": 0.17747431993484497,
203
+ "learning_rate": 0.00016227943195227197,
204
+ "loss": 0.8487,
205
+ "step": 135
206
+ },
207
+ {
208
+ "epoch": 0.546875,
209
+ "grad_norm": 0.17524904012680054,
210
+ "learning_rate": 0.00015204880106489262,
211
+ "loss": 0.8462,
212
+ "step": 140
213
+ },
214
+ {
215
+ "epoch": 0.56640625,
216
+ "grad_norm": 0.19776172935962677,
217
+ "learning_rate": 0.0001418086177211835,
218
+ "loss": 0.8403,
219
+ "step": 145
220
+ },
221
+ {
222
+ "epoch": 0.5859375,
223
+ "grad_norm": 0.21995167434215546,
224
+ "learning_rate": 0.00013160662638295526,
225
+ "loss": 0.8287,
226
+ "step": 150
227
+ },
228
+ {
229
+ "epoch": 0.60546875,
230
+ "grad_norm": 0.18319962918758392,
231
+ "learning_rate": 0.00012149039344325893,
232
+ "loss": 0.8337,
233
+ "step": 155
234
+ },
235
+ {
236
+ "epoch": 0.625,
237
+ "grad_norm": 0.22341905534267426,
238
+ "learning_rate": 0.00011150708544990398,
239
+ "loss": 0.8403,
240
+ "step": 160
241
+ },
242
+ {
243
+ "epoch": 0.64453125,
244
+ "grad_norm": 0.1881171315908432,
245
+ "learning_rate": 0.00010170324919323928,
246
+ "loss": 0.8369,
247
+ "step": 165
248
+ },
249
+ {
250
+ "epoch": 0.6640625,
251
+ "grad_norm": 0.1862991750240326,
252
+ "learning_rate": 9.212459468352966e-05,
253
+ "loss": 0.8246,
254
+ "step": 170
255
+ },
256
+ {
257
+ "epoch": 0.68359375,
258
+ "grad_norm": 0.18749593198299408,
259
+ "learning_rate": 8.281578202978773e-05,
260
+ "loss": 0.8247,
261
+ "step": 175
262
+ },
263
+ {
264
+ "epoch": 0.703125,
265
+ "grad_norm": 0.17886489629745483,
266
+ "learning_rate": 7.382021321372908e-05,
267
+ "loss": 0.8304,
268
+ "step": 180
269
+ },
270
+ {
271
+ "epoch": 0.72265625,
272
+ "grad_norm": 0.18453623354434967,
273
+ "learning_rate": 6.517982972969911e-05,
274
+ "loss": 0.8342,
275
+ "step": 185
276
+ },
277
+ {
278
+ "epoch": 0.7421875,
279
+ "grad_norm": 0.24199417233467102,
280
+ "learning_rate": 5.693491703406478e-05,
281
+ "loss": 0.8229,
282
+ "step": 190
283
+ },
284
+ {
285
+ "epoch": 0.76171875,
286
+ "grad_norm": 0.18054385483264923,
287
+ "learning_rate": 4.912391671582092e-05,
288
+ "loss": 0.8397,
289
+ "step": 195
290
+ },
291
+ {
292
+ "epoch": 0.78125,
293
+ "grad_norm": 0.1749253273010254,
294
+ "learning_rate": 4.178324726415664e-05,
295
+ "loss": 0.8263,
296
+ "step": 200
297
+ },
298
+ {
299
+ "epoch": 0.80078125,
300
+ "grad_norm": 0.1956775188446045,
301
+ "learning_rate": 3.494713426864761e-05,
302
+ "loss": 0.8267,
303
+ "step": 205
304
+ },
305
+ {
306
+ "epoch": 0.8203125,
307
+ "grad_norm": 0.18122443556785583,
308
+ "learning_rate": 2.8647450843757897e-05,
309
+ "loss": 0.8382,
310
+ "step": 210
311
+ },
312
+ {
313
+ "epoch": 0.83984375,
314
+ "grad_norm": 0.20789818465709686,
315
+ "learning_rate": 2.291356902166746e-05,
316
+ "loss": 0.8195,
317
+ "step": 215
318
+ },
319
+ {
320
+ "epoch": 0.859375,
321
+ "grad_norm": 0.1800765097141266,
322
+ "learning_rate": 1.7772222806299264e-05,
323
+ "loss": 0.8234,
324
+ "step": 220
325
+ },
326
+ {
327
+ "epoch": 0.87890625,
328
+ "grad_norm": 0.18312864005565643,
329
+ "learning_rate": 1.3247383527051985e-05,
330
+ "loss": 0.8279,
331
+ "step": 225
332
+ },
333
+ {
334
+ "epoch": 0.8984375,
335
+ "grad_norm": 0.17430664598941803,
336
+ "learning_rate": 9.360148073396962e-06,
337
+ "loss": 0.8229,
338
+ "step": 230
339
+ },
340
+ {
341
+ "epoch": 0.91796875,
342
+ "grad_norm": 0.19079521298408508,
343
+ "learning_rate": 6.128640531440515e-06,
344
+ "loss": 0.839,
345
+ "step": 235
346
+ },
347
+ {
348
+ "epoch": 0.9375,
349
+ "grad_norm": 0.18069963157176971,
350
+ "learning_rate": 3.5679276810683167e-06,
351
+ "loss": 0.8181,
352
+ "step": 240
353
+ },
354
+ {
355
+ "epoch": 0.95703125,
356
+ "grad_norm": 0.18298819661140442,
357
+ "learning_rate": 1.6899487476622898e-06,
358
+ "loss": 0.8298,
359
+ "step": 245
360
+ },
361
+ {
362
+ "epoch": 0.9765625,
363
+ "grad_norm": 0.16638913750648499,
364
+ "learning_rate": 5.034597359205639e-07,
365
+ "loss": 0.8299,
366
+ "step": 250
367
+ },
368
+ {
369
+ "epoch": 0.99609375,
370
+ "grad_norm": 0.1831548511981964,
371
+ "learning_rate": 1.3992605321688776e-08,
372
+ "loss": 0.8262,
373
+ "step": 255
374
+ },
375
+ {
376
+ "epoch": 1.0,
377
+ "eval_loss": 1.9340308904647827,
378
+ "eval_runtime": 1.1048,
379
+ "eval_samples_per_second": 5.431,
380
+ "eval_steps_per_second": 0.905,
381
+ "step": 256
382
+ },
383
+ {
384
+ "epoch": 1.0,
385
+ "step": 256,
386
+ "total_flos": 7.558142991332803e+17,
387
+ "train_loss": 0.9300689545925707,
388
+ "train_runtime": 2745.9778,
389
+ "train_samples_per_second": 5.956,
390
+ "train_steps_per_second": 0.093
391
+ }
392
+ ],
393
+ "logging_steps": 5,
394
+ "max_steps": 256,
395
+ "num_input_tokens_seen": 0,
396
+ "num_train_epochs": 1,
397
+ "save_steps": 100,
398
+ "stateful_callbacks": {
399
+ "TrainerControl": {
400
+ "args": {
401
+ "should_epoch_stop": false,
402
+ "should_evaluate": false,
403
+ "should_log": false,
404
+ "should_save": true,
405
+ "should_training_stop": true
406
+ },
407
+ "attributes": {}
408
+ }
409
+ },
410
+ "total_flos": 7.558142991332803e+17,
411
+ "train_batch_size": 4,
412
+ "trial_name": null,
413
+ "trial_params": null
414
+ }