chansung commited on
Commit
c13374e
·
verified ·
1 Parent(s): 27199fa

Model save

Browse files
Files changed (4) hide show
  1. README.md +5 -5
  2. all_results.json +3 -3
  3. train_results.json +3 -3
  4. trainer_state.json +83 -83
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
  base_model: meta-llama/Meta-Llama-3-8B
3
  datasets:
4
- - llama-duo/synth_summarize_dataset_dedup
5
  library_name: peft
6
  license: llama3
7
  tags:
8
- - alignment-handbook
9
  - trl
10
  - sft
 
11
  - generated_from_trainer
12
  model-index:
13
  - name: llama3.1-8b-gpt4o_100k_summarize-k
@@ -19,9 +19,9 @@ should probably proofread and complete it, then remove this comment. -->
19
 
20
  # llama3.1-8b-gpt4o_100k_summarize-k
21
 
22
- This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the llama-duo/synth_summarize_dataset_dedup dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 2.0198
25
 
26
  ## Model description
27
 
@@ -58,7 +58,7 @@ The following hyperparameters were used during training:
58
 
59
  | Training Loss | Epoch | Step | Validation Loss |
60
  |:-------------:|:-----:|:----:|:---------------:|
61
- | 0.8483 | 1.0 | 195 | 2.0198 |
62
 
63
 
64
  ### Framework versions
 
1
  ---
2
  base_model: meta-llama/Meta-Llama-3-8B
3
  datasets:
4
+ - generator
5
  library_name: peft
6
  license: llama3
7
  tags:
 
8
  - trl
9
  - sft
10
+ - alignment-handbook
11
  - generated_from_trainer
12
  model-index:
13
  - name: llama3.1-8b-gpt4o_100k_summarize-k
 
19
 
20
  # llama3.1-8b-gpt4o_100k_summarize-k
21
 
22
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the generator dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 2.0191
25
 
26
  ## Model description
27
 
 
58
 
59
  | Training Loss | Epoch | Step | Validation Loss |
60
  |:-------------:|:-----:|:----:|:---------------:|
61
+ | 0.8477 | 1.0 | 195 | 2.0191 |
62
 
63
 
64
  ### Framework versions
all_results.json CHANGED
@@ -6,9 +6,9 @@
6
  "eval_samples_per_second": 8.579,
7
  "eval_steps_per_second": 0.858,
8
  "total_flos": 5.7571825768464384e+17,
9
- "train_loss": 0.9919692577459873,
10
- "train_runtime": 2099.4843,
11
  "train_samples": 115376,
12
- "train_samples_per_second": 5.943,
13
  "train_steps_per_second": 0.093
14
  }
 
6
  "eval_samples_per_second": 8.579,
7
  "eval_steps_per_second": 0.858,
8
  "total_flos": 5.7571825768464384e+17,
9
+ "train_loss": 0.9919446505033053,
10
+ "train_runtime": 2098.6313,
11
  "train_samples": 115376,
12
+ "train_samples_per_second": 5.945,
13
  "train_steps_per_second": 0.093
14
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 5.7571825768464384e+17,
4
- "train_loss": 0.9919692577459873,
5
- "train_runtime": 2099.4843,
6
  "train_samples": 115376,
7
- "train_samples_per_second": 5.943,
8
  "train_steps_per_second": 0.093
9
  }
 
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 5.7571825768464384e+17,
4
+ "train_loss": 0.9919446505033053,
5
+ "train_runtime": 2098.6313,
6
  "train_samples": 115376,
7
+ "train_samples_per_second": 5.945,
8
  "train_steps_per_second": 0.093
9
  }
trainer_state.json CHANGED
@@ -10,299 +10,299 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.005128205128205128,
13
- "grad_norm": 0.7784392237663269,
14
  "learning_rate": 1.4999999999999999e-05,
15
  "loss": 2.2555,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.02564102564102564,
20
- "grad_norm": 0.9956199526786804,
21
  "learning_rate": 7.5e-05,
22
- "loss": 2.2474,
23
  "step": 5
24
  },
25
  {
26
  "epoch": 0.05128205128205128,
27
- "grad_norm": 1.2538105249404907,
28
  "learning_rate": 0.00015,
29
- "loss": 2.1364,
30
  "step": 10
31
  },
32
  {
33
  "epoch": 0.07692307692307693,
34
- "grad_norm": 1.435035228729248,
35
  "learning_rate": 0.000225,
36
- "loss": 1.6881,
37
  "step": 15
38
  },
39
  {
40
  "epoch": 0.10256410256410256,
41
- "grad_norm": 1.9407316446304321,
42
  "learning_rate": 0.0003,
43
- "loss": 1.2552,
44
  "step": 20
45
  },
46
  {
47
  "epoch": 0.1282051282051282,
48
- "grad_norm": 0.6618439555168152,
49
  "learning_rate": 0.00029939614409928584,
50
- "loss": 1.1476,
51
  "step": 25
52
  },
53
  {
54
  "epoch": 0.15384615384615385,
55
- "grad_norm": 0.4018121063709259,
56
  "learning_rate": 0.00029758943828979444,
57
- "loss": 1.0741,
58
  "step": 30
59
  },
60
  {
61
  "epoch": 0.1794871794871795,
62
- "grad_norm": 0.3278277814388275,
63
  "learning_rate": 0.00029459442910437797,
64
- "loss": 1.0159,
65
  "step": 35
66
  },
67
  {
68
  "epoch": 0.20512820512820512,
69
- "grad_norm": 0.2918931245803833,
70
  "learning_rate": 0.00029043523059596053,
71
- "loss": 0.9818,
72
  "step": 40
73
  },
74
  {
75
  "epoch": 0.23076923076923078,
76
- "grad_norm": 0.21536950767040253,
77
  "learning_rate": 0.0002851453301853628,
78
- "loss": 0.96,
79
  "step": 45
80
  },
81
  {
82
  "epoch": 0.2564102564102564,
83
- "grad_norm": 0.187772735953331,
84
  "learning_rate": 0.0002787673190402799,
85
- "loss": 0.9458,
86
  "step": 50
87
  },
88
  {
89
  "epoch": 0.28205128205128205,
90
- "grad_norm": 0.2083010971546173,
91
  "learning_rate": 0.0002713525491562421,
92
- "loss": 0.9253,
93
  "step": 55
94
  },
95
  {
96
  "epoch": 0.3076923076923077,
97
- "grad_norm": 0.2108624428510666,
98
  "learning_rate": 0.00026296071990054165,
99
- "loss": 0.9218,
100
  "step": 60
101
  },
102
  {
103
  "epoch": 0.3333333333333333,
104
- "grad_norm": 0.18083609640598297,
105
  "learning_rate": 0.0002536593973480297,
106
  "loss": 0.9083,
107
  "step": 65
108
  },
109
  {
110
  "epoch": 0.358974358974359,
111
- "grad_norm": 0.178299680352211,
112
  "learning_rate": 0.00024352347027881003,
113
- "loss": 0.9036,
114
  "step": 70
115
  },
116
  {
117
  "epoch": 0.38461538461538464,
118
- "grad_norm": 0.19863168895244598,
119
  "learning_rate": 0.00023263454721781537,
120
  "loss": 0.9033,
121
  "step": 75
122
  },
123
  {
124
  "epoch": 0.41025641025641024,
125
- "grad_norm": 0.22889919579029083,
126
  "learning_rate": 0.0002210802993709498,
127
- "loss": 0.8912,
128
  "step": 80
129
  },
130
  {
131
  "epoch": 0.4358974358974359,
132
- "grad_norm": 0.20287582278251648,
133
  "learning_rate": 0.00020895375474808852,
134
- "loss": 0.8899,
135
  "step": 85
136
  },
137
  {
138
  "epoch": 0.46153846153846156,
139
- "grad_norm": 0.21162137389183044,
140
  "learning_rate": 0.0001963525491562421,
141
- "loss": 0.8813,
142
  "step": 90
143
  },
144
  {
145
  "epoch": 0.48717948717948717,
146
- "grad_norm": 0.21930070221424103,
147
  "learning_rate": 0.00018337814009344714,
148
- "loss": 0.8795,
149
  "step": 95
150
  },
151
  {
152
  "epoch": 0.5128205128205128,
153
- "grad_norm": 0.23387882113456726,
154
  "learning_rate": 0.00017013498987264832,
155
- "loss": 0.8794,
156
  "step": 100
157
  },
158
  {
159
  "epoch": 0.5384615384615384,
160
- "grad_norm": 0.19868768751621246,
161
  "learning_rate": 0.00015672972455257723,
162
- "loss": 0.8752,
163
  "step": 105
164
  },
165
  {
166
  "epoch": 0.5641025641025641,
167
- "grad_norm": 0.21123158931732178,
168
  "learning_rate": 0.0001432702754474228,
169
- "loss": 0.8524,
170
  "step": 110
171
  },
172
  {
173
  "epoch": 0.5897435897435898,
174
- "grad_norm": 0.25728657841682434,
175
  "learning_rate": 0.0001298650101273517,
176
- "loss": 0.8734,
177
  "step": 115
178
  },
179
  {
180
  "epoch": 0.6153846153846154,
181
- "grad_norm": 0.21154211461544037,
182
  "learning_rate": 0.00011662185990655284,
183
- "loss": 0.8597,
184
  "step": 120
185
  },
186
  {
187
  "epoch": 0.6410256410256411,
188
- "grad_norm": 0.21001510322093964,
189
  "learning_rate": 0.0001036474508437579,
190
- "loss": 0.8606,
191
  "step": 125
192
  },
193
  {
194
  "epoch": 0.6666666666666666,
195
- "grad_norm": 0.2222093790769577,
196
  "learning_rate": 9.104624525191145e-05,
197
- "loss": 0.8563,
198
  "step": 130
199
  },
200
  {
201
  "epoch": 0.6923076923076923,
202
- "grad_norm": 0.19596728682518005,
203
  "learning_rate": 7.89197006290502e-05,
204
- "loss": 0.855,
205
  "step": 135
206
  },
207
  {
208
  "epoch": 0.717948717948718,
209
- "grad_norm": 0.20535971224308014,
210
  "learning_rate": 6.736545278218463e-05,
211
- "loss": 0.8592,
212
  "step": 140
213
  },
214
  {
215
  "epoch": 0.7435897435897436,
216
- "grad_norm": 0.20298361778259277,
217
  "learning_rate": 5.6476529721189974e-05,
218
- "loss": 0.8549,
219
  "step": 145
220
  },
221
  {
222
  "epoch": 0.7692307692307693,
223
- "grad_norm": 0.19454172253608704,
224
  "learning_rate": 4.63406026519703e-05,
225
- "loss": 0.8578,
226
  "step": 150
227
  },
228
  {
229
  "epoch": 0.7948717948717948,
230
- "grad_norm": 0.19015519320964813,
231
  "learning_rate": 3.7039280099458366e-05,
232
- "loss": 0.8579,
233
  "step": 155
234
  },
235
  {
236
  "epoch": 0.8205128205128205,
237
- "grad_norm": 0.20558574795722961,
238
  "learning_rate": 2.8647450843757897e-05,
239
- "loss": 0.8508,
240
  "step": 160
241
  },
242
  {
243
  "epoch": 0.8461538461538461,
244
- "grad_norm": 0.19159014523029327,
245
  "learning_rate": 2.1232680959720082e-05,
246
  "loss": 0.8415,
247
  "step": 165
248
  },
249
  {
250
  "epoch": 0.8717948717948718,
251
- "grad_norm": 0.17908993363380432,
252
  "learning_rate": 1.4854669814637143e-05,
253
- "loss": 0.8514,
254
  "step": 170
255
  },
256
  {
257
  "epoch": 0.8974358974358975,
258
- "grad_norm": 0.18227402865886688,
259
  "learning_rate": 9.564769404039419e-06,
260
- "loss": 0.8503,
261
  "step": 175
262
  },
263
  {
264
  "epoch": 0.9230769230769231,
265
- "grad_norm": 0.2028588354587555,
266
  "learning_rate": 5.405570895622013e-06,
267
- "loss": 0.8449,
268
  "step": 180
269
  },
270
  {
271
  "epoch": 0.9487179487179487,
272
- "grad_norm": 0.19046302139759064,
273
  "learning_rate": 2.4105617102055496e-06,
274
- "loss": 0.8456,
275
  "step": 185
276
  },
277
  {
278
  "epoch": 0.9743589743589743,
279
- "grad_norm": 0.18208374083042145,
280
  "learning_rate": 6.038559007141397e-07,
281
- "loss": 0.854,
282
  "step": 190
283
  },
284
  {
285
  "epoch": 1.0,
286
- "grad_norm": 0.21627038717269897,
287
  "learning_rate": 0.0,
288
- "loss": 0.8483,
289
  "step": 195
290
  },
291
  {
292
  "epoch": 1.0,
293
- "eval_loss": 2.0198426246643066,
294
- "eval_runtime": 1.151,
295
- "eval_samples_per_second": 8.688,
296
- "eval_steps_per_second": 0.869,
297
  "step": 195
298
  },
299
  {
300
  "epoch": 1.0,
301
  "step": 195,
302
  "total_flos": 5.7571825768464384e+17,
303
- "train_loss": 0.9919692577459873,
304
- "train_runtime": 2099.4843,
305
- "train_samples_per_second": 5.943,
306
  "train_steps_per_second": 0.093
307
  }
308
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.005128205128205128,
13
+ "grad_norm": 0.7784475088119507,
14
  "learning_rate": 1.4999999999999999e-05,
15
  "loss": 2.2555,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.02564102564102564,
20
+ "grad_norm": 0.8670551180839539,
21
  "learning_rate": 7.5e-05,
22
+ "loss": 2.2468,
23
  "step": 5
24
  },
25
  {
26
  "epoch": 0.05128205128205128,
27
+ "grad_norm": 1.2530401945114136,
28
  "learning_rate": 0.00015,
29
+ "loss": 2.1352,
30
  "step": 10
31
  },
32
  {
33
  "epoch": 0.07692307692307693,
34
+ "grad_norm": 1.460581660270691,
35
  "learning_rate": 0.000225,
36
+ "loss": 1.6865,
37
  "step": 15
38
  },
39
  {
40
  "epoch": 0.10256410256410256,
41
+ "grad_norm": 2.0667366981506348,
42
  "learning_rate": 0.0003,
43
+ "loss": 1.2589,
44
  "step": 20
45
  },
46
  {
47
  "epoch": 0.1282051282051282,
48
+ "grad_norm": 0.6875319480895996,
49
  "learning_rate": 0.00029939614409928584,
50
+ "loss": 1.1478,
51
  "step": 25
52
  },
53
  {
54
  "epoch": 0.15384615384615385,
55
+ "grad_norm": 0.6013126373291016,
56
  "learning_rate": 0.00029758943828979444,
57
+ "loss": 1.076,
58
  "step": 30
59
  },
60
  {
61
  "epoch": 0.1794871794871795,
62
+ "grad_norm": 0.31481531262397766,
63
  "learning_rate": 0.00029459442910437797,
64
+ "loss": 1.0179,
65
  "step": 35
66
  },
67
  {
68
  "epoch": 0.20512820512820512,
69
+ "grad_norm": 0.3045659065246582,
70
  "learning_rate": 0.00029043523059596053,
71
+ "loss": 0.9824,
72
  "step": 40
73
  },
74
  {
75
  "epoch": 0.23076923076923078,
76
+ "grad_norm": 0.2231951653957367,
77
  "learning_rate": 0.0002851453301853628,
78
+ "loss": 0.9611,
79
  "step": 45
80
  },
81
  {
82
  "epoch": 0.2564102564102564,
83
+ "grad_norm": 0.18813718855381012,
84
  "learning_rate": 0.0002787673190402799,
85
+ "loss": 0.9461,
86
  "step": 50
87
  },
88
  {
89
  "epoch": 0.28205128205128205,
90
+ "grad_norm": 0.21799354255199432,
91
  "learning_rate": 0.0002713525491562421,
92
+ "loss": 0.9254,
93
  "step": 55
94
  },
95
  {
96
  "epoch": 0.3076923076923077,
97
+ "grad_norm": 0.21888460218906403,
98
  "learning_rate": 0.00026296071990054165,
99
+ "loss": 0.9223,
100
  "step": 60
101
  },
102
  {
103
  "epoch": 0.3333333333333333,
104
+ "grad_norm": 0.18513959646224976,
105
  "learning_rate": 0.0002536593973480297,
106
  "loss": 0.9083,
107
  "step": 65
108
  },
109
  {
110
  "epoch": 0.358974358974359,
111
+ "grad_norm": 0.5329850316047668,
112
  "learning_rate": 0.00024352347027881003,
113
+ "loss": 0.9041,
114
  "step": 70
115
  },
116
  {
117
  "epoch": 0.38461538461538464,
118
+ "grad_norm": 0.19489431381225586,
119
  "learning_rate": 0.00023263454721781537,
120
  "loss": 0.9033,
121
  "step": 75
122
  },
123
  {
124
  "epoch": 0.41025641025641024,
125
+ "grad_norm": 0.2275352030992508,
126
  "learning_rate": 0.0002210802993709498,
127
+ "loss": 0.892,
128
  "step": 80
129
  },
130
  {
131
  "epoch": 0.4358974358974359,
132
+ "grad_norm": 0.1980154812335968,
133
  "learning_rate": 0.00020895375474808852,
134
+ "loss": 0.8903,
135
  "step": 85
136
  },
137
  {
138
  "epoch": 0.46153846153846156,
139
+ "grad_norm": 0.20675401389598846,
140
  "learning_rate": 0.0001963525491562421,
141
+ "loss": 0.8815,
142
  "step": 90
143
  },
144
  {
145
  "epoch": 0.48717948717948717,
146
+ "grad_norm": 0.21656563878059387,
147
  "learning_rate": 0.00018337814009344714,
148
+ "loss": 0.879,
149
  "step": 95
150
  },
151
  {
152
  "epoch": 0.5128205128205128,
153
+ "grad_norm": 0.22672973573207855,
154
  "learning_rate": 0.00017013498987264832,
155
+ "loss": 0.879,
156
  "step": 100
157
  },
158
  {
159
  "epoch": 0.5384615384615384,
160
+ "grad_norm": 0.2003437578678131,
161
  "learning_rate": 0.00015672972455257723,
162
+ "loss": 0.8746,
163
  "step": 105
164
  },
165
  {
166
  "epoch": 0.5641025641025641,
167
+ "grad_norm": 0.21689733862876892,
168
  "learning_rate": 0.0001432702754474228,
169
+ "loss": 0.8522,
170
  "step": 110
171
  },
172
  {
173
  "epoch": 0.5897435897435898,
174
+ "grad_norm": 0.23330777883529663,
175
  "learning_rate": 0.0001298650101273517,
176
+ "loss": 0.8732,
177
  "step": 115
178
  },
179
  {
180
  "epoch": 0.6153846153846154,
181
+ "grad_norm": 0.211246058344841,
182
  "learning_rate": 0.00011662185990655284,
183
+ "loss": 0.8594,
184
  "step": 120
185
  },
186
  {
187
  "epoch": 0.6410256410256411,
188
+ "grad_norm": 0.2110731303691864,
189
  "learning_rate": 0.0001036474508437579,
190
+ "loss": 0.8601,
191
  "step": 125
192
  },
193
  {
194
  "epoch": 0.6666666666666666,
195
+ "grad_norm": 0.22622446715831757,
196
  "learning_rate": 9.104624525191145e-05,
197
+ "loss": 0.8555,
198
  "step": 130
199
  },
200
  {
201
  "epoch": 0.6923076923076923,
202
+ "grad_norm": 0.19781175255775452,
203
  "learning_rate": 7.89197006290502e-05,
204
+ "loss": 0.8542,
205
  "step": 135
206
  },
207
  {
208
  "epoch": 0.717948717948718,
209
+ "grad_norm": 0.2107241153717041,
210
  "learning_rate": 6.736545278218463e-05,
211
+ "loss": 0.8587,
212
  "step": 140
213
  },
214
  {
215
  "epoch": 0.7435897435897436,
216
+ "grad_norm": 0.19920115172863007,
217
  "learning_rate": 5.6476529721189974e-05,
218
+ "loss": 0.8544,
219
  "step": 145
220
  },
221
  {
222
  "epoch": 0.7692307692307693,
223
+ "grad_norm": 0.19098255038261414,
224
  "learning_rate": 4.63406026519703e-05,
225
+ "loss": 0.8572,
226
  "step": 150
227
  },
228
  {
229
  "epoch": 0.7948717948717948,
230
+ "grad_norm": 0.18926502764225006,
231
  "learning_rate": 3.7039280099458366e-05,
232
+ "loss": 0.8577,
233
  "step": 155
234
  },
235
  {
236
  "epoch": 0.8205128205128205,
237
+ "grad_norm": 0.20493926107883453,
238
  "learning_rate": 2.8647450843757897e-05,
239
+ "loss": 0.8507,
240
  "step": 160
241
  },
242
  {
243
  "epoch": 0.8461538461538461,
244
+ "grad_norm": 0.18808531761169434,
245
  "learning_rate": 2.1232680959720082e-05,
246
  "loss": 0.8415,
247
  "step": 165
248
  },
249
  {
250
  "epoch": 0.8717948717948718,
251
+ "grad_norm": 0.18048973381519318,
252
  "learning_rate": 1.4854669814637143e-05,
253
+ "loss": 0.8506,
254
  "step": 170
255
  },
256
  {
257
  "epoch": 0.8974358974358975,
258
+ "grad_norm": 0.18150238692760468,
259
  "learning_rate": 9.564769404039419e-06,
260
+ "loss": 0.8499,
261
  "step": 175
262
  },
263
  {
264
  "epoch": 0.9230769230769231,
265
+ "grad_norm": 0.20749987661838531,
266
  "learning_rate": 5.405570895622013e-06,
267
+ "loss": 0.8447,
268
  "step": 180
269
  },
270
  {
271
  "epoch": 0.9487179487179487,
272
+ "grad_norm": 0.19975171983242035,
273
  "learning_rate": 2.4105617102055496e-06,
274
+ "loss": 0.8446,
275
  "step": 185
276
  },
277
  {
278
  "epoch": 0.9743589743589743,
279
+ "grad_norm": 0.19153565168380737,
280
  "learning_rate": 6.038559007141397e-07,
281
+ "loss": 0.8535,
282
  "step": 190
283
  },
284
  {
285
  "epoch": 1.0,
286
+ "grad_norm": 0.21618981659412384,
287
  "learning_rate": 0.0,
288
+ "loss": 0.8477,
289
  "step": 195
290
  },
291
  {
292
  "epoch": 1.0,
293
+ "eval_loss": 2.0191240310668945,
294
+ "eval_runtime": 1.1347,
295
+ "eval_samples_per_second": 8.813,
296
+ "eval_steps_per_second": 0.881,
297
  "step": 195
298
  },
299
  {
300
  "epoch": 1.0,
301
  "step": 195,
302
  "total_flos": 5.7571825768464384e+17,
303
+ "train_loss": 0.9919446505033053,
304
+ "train_runtime": 2098.6313,
305
+ "train_samples_per_second": 5.945,
306
  "train_steps_per_second": 0.093
307
  }
308
  ],