chansung commited on
Commit
3844a34
·
verified ·
1 Parent(s): 167dd5f

Model save

Browse files
Files changed (4) hide show
  1. README.md +7 -7
  2. all_results.json +4 -4
  3. train_results.json +4 -4
  4. trainer_state.json +88 -88
README.md CHANGED
@@ -1,27 +1,27 @@
1
  ---
2
  base_model: meta-llama/Meta-Llama-3-8B
3
  datasets:
4
- - llama-duo/synth_summarize_dataset_dedup
5
  library_name: peft
6
  license: llama3
7
  tags:
8
- - alignment-handbook
9
  - trl
10
  - sft
 
11
  - generated_from_trainer
12
  model-index:
13
- - name: llama3.1-8b-gpt4o_100k_summarize-kasa
14
  results: []
15
  ---
16
 
17
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
  should probably proofread and complete it, then remove this comment. -->
19
 
20
- # llama3.1-8b-gpt4o_100k_summarize-kasa
21
 
22
- This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the llama-duo/synth_summarize_dataset_dedup dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 2.1303
25
 
26
  ## Model description
27
 
@@ -58,7 +58,7 @@ The following hyperparameters were used during training:
58
 
59
  | Training Loss | Epoch | Step | Validation Loss |
60
  |:-------------:|:-----:|:----:|:---------------:|
61
- | 0.9593 | 1.0 | 195 | 2.1303 |
62
 
63
 
64
  ### Framework versions
 
1
  ---
2
  base_model: meta-llama/Meta-Llama-3-8B
3
  datasets:
4
+ - generator
5
  library_name: peft
6
  license: llama3
7
  tags:
 
8
  - trl
9
  - sft
10
+ - alignment-handbook
11
  - generated_from_trainer
12
  model-index:
13
+ - name: llama3.1-8b-gpt4o_100k_summarize-k
14
  results: []
15
  ---
16
 
17
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
  should probably proofread and complete it, then remove this comment. -->
19
 
20
+ # llama3.1-8b-gpt4o_100k_summarize-k
21
 
22
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the generator dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 2.0198
25
 
26
  ## Model description
27
 
 
58
 
59
  | Training Loss | Epoch | Step | Validation Loss |
60
  |:-------------:|:-----:|:----:|:---------------:|
61
+ | 0.8483 | 1.0 | 195 | 2.0198 |
62
 
63
 
64
  ### Framework versions
all_results.json CHANGED
@@ -6,9 +6,9 @@
6
  "eval_samples_per_second": 8.549,
7
  "eval_steps_per_second": 0.855,
8
  "total_flos": 5.7571825768464384e+17,
9
- "train_loss": 1.1034836133321126,
10
- "train_runtime": 2146.0273,
11
  "train_samples": 115376,
12
- "train_samples_per_second": 5.814,
13
- "train_steps_per_second": 0.091
14
  }
 
6
  "eval_samples_per_second": 8.549,
7
  "eval_steps_per_second": 0.855,
8
  "total_flos": 5.7571825768464384e+17,
9
+ "train_loss": 0.9919692577459873,
10
+ "train_runtime": 2099.4843,
11
  "train_samples": 115376,
12
+ "train_samples_per_second": 5.943,
13
+ "train_steps_per_second": 0.093
14
  }
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 5.7571825768464384e+17,
4
- "train_loss": 1.1034836133321126,
5
- "train_runtime": 2146.0273,
6
  "train_samples": 115376,
7
- "train_samples_per_second": 5.814,
8
- "train_steps_per_second": 0.091
9
  }
 
1
  {
2
  "epoch": 1.0,
3
  "total_flos": 5.7571825768464384e+17,
4
+ "train_loss": 0.9919692577459873,
5
+ "train_runtime": 2099.4843,
6
  "train_samples": 115376,
7
+ "train_samples_per_second": 5.943,
8
+ "train_steps_per_second": 0.093
9
  }
trainer_state.json CHANGED
@@ -10,300 +10,300 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.005128205128205128,
13
- "grad_norm": 0.7783320546150208,
14
  "learning_rate": 1.4999999999999999e-05,
15
- "loss": 2.3664,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.02564102564102564,
20
- "grad_norm": 0.969870388507843,
21
  "learning_rate": 7.5e-05,
22
- "loss": 2.3576,
23
  "step": 5
24
  },
25
  {
26
  "epoch": 0.05128205128205128,
27
- "grad_norm": 1.2408868074417114,
28
  "learning_rate": 0.00015,
29
- "loss": 2.2471,
30
  "step": 10
31
  },
32
  {
33
  "epoch": 0.07692307692307693,
34
- "grad_norm": 1.4603853225708008,
35
  "learning_rate": 0.000225,
36
- "loss": 1.8023,
37
  "step": 15
38
  },
39
  {
40
  "epoch": 0.10256410256410256,
41
- "grad_norm": 2.0677075386047363,
42
  "learning_rate": 0.0003,
43
- "loss": 1.3707,
44
  "step": 20
45
  },
46
  {
47
  "epoch": 0.1282051282051282,
48
- "grad_norm": 0.6989911794662476,
49
  "learning_rate": 0.00029939614409928584,
50
- "loss": 1.2607,
51
  "step": 25
52
  },
53
  {
54
  "epoch": 0.15384615384615385,
55
- "grad_norm": 0.4506840705871582,
56
  "learning_rate": 0.00029758943828979444,
57
- "loss": 1.1864,
58
  "step": 30
59
  },
60
  {
61
  "epoch": 0.1794871794871795,
62
- "grad_norm": 0.322531133890152,
63
  "learning_rate": 0.00029459442910437797,
64
- "loss": 1.1286,
65
  "step": 35
66
  },
67
  {
68
  "epoch": 0.20512820512820512,
69
- "grad_norm": 0.2993707060813904,
70
  "learning_rate": 0.00029043523059596053,
71
- "loss": 1.0941,
72
  "step": 40
73
  },
74
  {
75
  "epoch": 0.23076923076923078,
76
- "grad_norm": 0.220694437623024,
77
  "learning_rate": 0.0002851453301853628,
78
- "loss": 1.0717,
79
  "step": 45
80
  },
81
  {
82
  "epoch": 0.2564102564102564,
83
- "grad_norm": 0.2211257368326187,
84
  "learning_rate": 0.0002787673190402799,
85
- "loss": 1.0575,
86
  "step": 50
87
  },
88
  {
89
  "epoch": 0.28205128205128205,
90
- "grad_norm": 0.2125224620103836,
91
  "learning_rate": 0.0002713525491562421,
92
- "loss": 1.0366,
93
  "step": 55
94
  },
95
  {
96
  "epoch": 0.3076923076923077,
97
- "grad_norm": 0.22398926317691803,
98
  "learning_rate": 0.00026296071990054165,
99
- "loss": 1.0334,
100
  "step": 60
101
  },
102
  {
103
  "epoch": 0.3333333333333333,
104
- "grad_norm": 0.1814454346895218,
105
  "learning_rate": 0.0002536593973480297,
106
- "loss": 1.0198,
107
  "step": 65
108
  },
109
  {
110
  "epoch": 0.358974358974359,
111
- "grad_norm": 0.18179959058761597,
112
  "learning_rate": 0.00024352347027881003,
113
- "loss": 1.0152,
114
  "step": 70
115
  },
116
  {
117
  "epoch": 0.38461538461538464,
118
- "grad_norm": 0.18999409675598145,
119
  "learning_rate": 0.00023263454721781537,
120
- "loss": 1.0149,
121
  "step": 75
122
  },
123
  {
124
  "epoch": 0.41025641025641024,
125
- "grad_norm": 0.21783891320228577,
126
  "learning_rate": 0.0002210802993709498,
127
- "loss": 1.0023,
128
  "step": 80
129
  },
130
  {
131
  "epoch": 0.4358974358974359,
132
- "grad_norm": 0.19977697730064392,
133
  "learning_rate": 0.00020895375474808852,
134
- "loss": 1.0016,
135
  "step": 85
136
  },
137
  {
138
  "epoch": 0.46153846153846156,
139
- "grad_norm": 0.21022814512252808,
140
  "learning_rate": 0.0001963525491562421,
141
- "loss": 0.9924,
142
  "step": 90
143
  },
144
  {
145
  "epoch": 0.48717948717948717,
146
- "grad_norm": 0.21291780471801758,
147
  "learning_rate": 0.00018337814009344714,
148
- "loss": 0.9906,
149
  "step": 95
150
  },
151
  {
152
  "epoch": 0.5128205128205128,
153
- "grad_norm": 0.2277144193649292,
154
  "learning_rate": 0.00017013498987264832,
155
- "loss": 0.9901,
156
  "step": 100
157
  },
158
  {
159
  "epoch": 0.5384615384615384,
160
- "grad_norm": 0.20027609169483185,
161
  "learning_rate": 0.00015672972455257723,
162
- "loss": 0.9862,
163
  "step": 105
164
  },
165
  {
166
  "epoch": 0.5641025641025641,
167
- "grad_norm": 0.22872835397720337,
168
  "learning_rate": 0.0001432702754474228,
169
- "loss": 0.9639,
170
  "step": 110
171
  },
172
  {
173
  "epoch": 0.5897435897435898,
174
- "grad_norm": 0.2403298318386078,
175
  "learning_rate": 0.0001298650101273517,
176
- "loss": 0.9843,
177
  "step": 115
178
  },
179
  {
180
  "epoch": 0.6153846153846154,
181
- "grad_norm": 0.21371488273143768,
182
  "learning_rate": 0.00011662185990655284,
183
- "loss": 0.9712,
184
  "step": 120
185
  },
186
  {
187
  "epoch": 0.6410256410256411,
188
- "grad_norm": 0.21134255826473236,
189
  "learning_rate": 0.0001036474508437579,
190
- "loss": 0.9712,
191
  "step": 125
192
  },
193
  {
194
  "epoch": 0.6666666666666666,
195
- "grad_norm": 0.21926414966583252,
196
  "learning_rate": 9.104624525191145e-05,
197
- "loss": 0.9669,
198
  "step": 130
199
  },
200
  {
201
  "epoch": 0.6923076923076923,
202
- "grad_norm": 0.19241148233413696,
203
  "learning_rate": 7.89197006290502e-05,
204
- "loss": 0.966,
205
  "step": 135
206
  },
207
  {
208
  "epoch": 0.717948717948718,
209
- "grad_norm": 0.20280851423740387,
210
  "learning_rate": 6.736545278218463e-05,
211
- "loss": 0.9702,
212
  "step": 140
213
  },
214
  {
215
  "epoch": 0.7435897435897436,
216
- "grad_norm": 0.1985516995191574,
217
  "learning_rate": 5.6476529721189974e-05,
218
- "loss": 0.9658,
219
  "step": 145
220
  },
221
  {
222
  "epoch": 0.7692307692307693,
223
- "grad_norm": 0.19389475882053375,
224
  "learning_rate": 4.63406026519703e-05,
225
- "loss": 0.9687,
226
  "step": 150
227
  },
228
  {
229
  "epoch": 0.7948717948717948,
230
- "grad_norm": 0.1933661550283432,
231
  "learning_rate": 3.7039280099458366e-05,
232
- "loss": 0.9695,
233
  "step": 155
234
  },
235
  {
236
  "epoch": 0.8205128205128205,
237
- "grad_norm": 0.20416520535945892,
238
  "learning_rate": 2.8647450843757897e-05,
239
- "loss": 0.9623,
240
  "step": 160
241
  },
242
  {
243
  "epoch": 0.8461538461538461,
244
- "grad_norm": 0.18719200789928436,
245
  "learning_rate": 2.1232680959720082e-05,
246
- "loss": 0.9527,
247
  "step": 165
248
  },
249
  {
250
  "epoch": 0.8717948717948718,
251
- "grad_norm": 0.18083591759204865,
252
  "learning_rate": 1.4854669814637143e-05,
253
- "loss": 0.9624,
254
  "step": 170
255
  },
256
  {
257
  "epoch": 0.8974358974358975,
258
- "grad_norm": 0.1832273304462433,
259
  "learning_rate": 9.564769404039419e-06,
260
- "loss": 0.9616,
261
  "step": 175
262
  },
263
  {
264
  "epoch": 0.9230769230769231,
265
- "grad_norm": 0.2028147280216217,
266
  "learning_rate": 5.405570895622013e-06,
267
- "loss": 0.9562,
268
  "step": 180
269
  },
270
  {
271
  "epoch": 0.9487179487179487,
272
- "grad_norm": 0.19322623312473297,
273
  "learning_rate": 2.4105617102055496e-06,
274
- "loss": 0.957,
275
  "step": 185
276
  },
277
  {
278
  "epoch": 0.9743589743589743,
279
- "grad_norm": 0.1807137131690979,
280
  "learning_rate": 6.038559007141397e-07,
281
- "loss": 0.9652,
282
  "step": 190
283
  },
284
  {
285
  "epoch": 1.0,
286
- "grad_norm": 0.2156570702791214,
287
  "learning_rate": 0.0,
288
- "loss": 0.9593,
289
  "step": 195
290
  },
291
  {
292
  "epoch": 1.0,
293
- "eval_loss": 2.1302647590637207,
294
- "eval_runtime": 1.1576,
295
- "eval_samples_per_second": 8.639,
296
- "eval_steps_per_second": 0.864,
297
  "step": 195
298
  },
299
  {
300
  "epoch": 1.0,
301
  "step": 195,
302
  "total_flos": 5.7571825768464384e+17,
303
- "train_loss": 1.1034836133321126,
304
- "train_runtime": 2146.0273,
305
- "train_samples_per_second": 5.814,
306
- "train_steps_per_second": 0.091
307
  }
308
  ],
309
  "logging_steps": 5,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.005128205128205128,
13
+ "grad_norm": 0.7784392237663269,
14
  "learning_rate": 1.4999999999999999e-05,
15
+ "loss": 2.2555,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.02564102564102564,
20
+ "grad_norm": 0.9956199526786804,
21
  "learning_rate": 7.5e-05,
22
+ "loss": 2.2474,
23
  "step": 5
24
  },
25
  {
26
  "epoch": 0.05128205128205128,
27
+ "grad_norm": 1.2538105249404907,
28
  "learning_rate": 0.00015,
29
+ "loss": 2.1364,
30
  "step": 10
31
  },
32
  {
33
  "epoch": 0.07692307692307693,
34
+ "grad_norm": 1.435035228729248,
35
  "learning_rate": 0.000225,
36
+ "loss": 1.6881,
37
  "step": 15
38
  },
39
  {
40
  "epoch": 0.10256410256410256,
41
+ "grad_norm": 1.9407316446304321,
42
  "learning_rate": 0.0003,
43
+ "loss": 1.2552,
44
  "step": 20
45
  },
46
  {
47
  "epoch": 0.1282051282051282,
48
+ "grad_norm": 0.6618439555168152,
49
  "learning_rate": 0.00029939614409928584,
50
+ "loss": 1.1476,
51
  "step": 25
52
  },
53
  {
54
  "epoch": 0.15384615384615385,
55
+ "grad_norm": 0.4018121063709259,
56
  "learning_rate": 0.00029758943828979444,
57
+ "loss": 1.0741,
58
  "step": 30
59
  },
60
  {
61
  "epoch": 0.1794871794871795,
62
+ "grad_norm": 0.3278277814388275,
63
  "learning_rate": 0.00029459442910437797,
64
+ "loss": 1.0159,
65
  "step": 35
66
  },
67
  {
68
  "epoch": 0.20512820512820512,
69
+ "grad_norm": 0.2918931245803833,
70
  "learning_rate": 0.00029043523059596053,
71
+ "loss": 0.9818,
72
  "step": 40
73
  },
74
  {
75
  "epoch": 0.23076923076923078,
76
+ "grad_norm": 0.21536950767040253,
77
  "learning_rate": 0.0002851453301853628,
78
+ "loss": 0.96,
79
  "step": 45
80
  },
81
  {
82
  "epoch": 0.2564102564102564,
83
+ "grad_norm": 0.187772735953331,
84
  "learning_rate": 0.0002787673190402799,
85
+ "loss": 0.9458,
86
  "step": 50
87
  },
88
  {
89
  "epoch": 0.28205128205128205,
90
+ "grad_norm": 0.2083010971546173,
91
  "learning_rate": 0.0002713525491562421,
92
+ "loss": 0.9253,
93
  "step": 55
94
  },
95
  {
96
  "epoch": 0.3076923076923077,
97
+ "grad_norm": 0.2108624428510666,
98
  "learning_rate": 0.00026296071990054165,
99
+ "loss": 0.9218,
100
  "step": 60
101
  },
102
  {
103
  "epoch": 0.3333333333333333,
104
+ "grad_norm": 0.18083609640598297,
105
  "learning_rate": 0.0002536593973480297,
106
+ "loss": 0.9083,
107
  "step": 65
108
  },
109
  {
110
  "epoch": 0.358974358974359,
111
+ "grad_norm": 0.178299680352211,
112
  "learning_rate": 0.00024352347027881003,
113
+ "loss": 0.9036,
114
  "step": 70
115
  },
116
  {
117
  "epoch": 0.38461538461538464,
118
+ "grad_norm": 0.19863168895244598,
119
  "learning_rate": 0.00023263454721781537,
120
+ "loss": 0.9033,
121
  "step": 75
122
  },
123
  {
124
  "epoch": 0.41025641025641024,
125
+ "grad_norm": 0.22889919579029083,
126
  "learning_rate": 0.0002210802993709498,
127
+ "loss": 0.8912,
128
  "step": 80
129
  },
130
  {
131
  "epoch": 0.4358974358974359,
132
+ "grad_norm": 0.20287582278251648,
133
  "learning_rate": 0.00020895375474808852,
134
+ "loss": 0.8899,
135
  "step": 85
136
  },
137
  {
138
  "epoch": 0.46153846153846156,
139
+ "grad_norm": 0.21162137389183044,
140
  "learning_rate": 0.0001963525491562421,
141
+ "loss": 0.8813,
142
  "step": 90
143
  },
144
  {
145
  "epoch": 0.48717948717948717,
146
+ "grad_norm": 0.21930070221424103,
147
  "learning_rate": 0.00018337814009344714,
148
+ "loss": 0.8795,
149
  "step": 95
150
  },
151
  {
152
  "epoch": 0.5128205128205128,
153
+ "grad_norm": 0.23387882113456726,
154
  "learning_rate": 0.00017013498987264832,
155
+ "loss": 0.8794,
156
  "step": 100
157
  },
158
  {
159
  "epoch": 0.5384615384615384,
160
+ "grad_norm": 0.19868768751621246,
161
  "learning_rate": 0.00015672972455257723,
162
+ "loss": 0.8752,
163
  "step": 105
164
  },
165
  {
166
  "epoch": 0.5641025641025641,
167
+ "grad_norm": 0.21123158931732178,
168
  "learning_rate": 0.0001432702754474228,
169
+ "loss": 0.8524,
170
  "step": 110
171
  },
172
  {
173
  "epoch": 0.5897435897435898,
174
+ "grad_norm": 0.25728657841682434,
175
  "learning_rate": 0.0001298650101273517,
176
+ "loss": 0.8734,
177
  "step": 115
178
  },
179
  {
180
  "epoch": 0.6153846153846154,
181
+ "grad_norm": 0.21154211461544037,
182
  "learning_rate": 0.00011662185990655284,
183
+ "loss": 0.8597,
184
  "step": 120
185
  },
186
  {
187
  "epoch": 0.6410256410256411,
188
+ "grad_norm": 0.21001510322093964,
189
  "learning_rate": 0.0001036474508437579,
190
+ "loss": 0.8606,
191
  "step": 125
192
  },
193
  {
194
  "epoch": 0.6666666666666666,
195
+ "grad_norm": 0.2222093790769577,
196
  "learning_rate": 9.104624525191145e-05,
197
+ "loss": 0.8563,
198
  "step": 130
199
  },
200
  {
201
  "epoch": 0.6923076923076923,
202
+ "grad_norm": 0.19596728682518005,
203
  "learning_rate": 7.89197006290502e-05,
204
+ "loss": 0.855,
205
  "step": 135
206
  },
207
  {
208
  "epoch": 0.717948717948718,
209
+ "grad_norm": 0.20535971224308014,
210
  "learning_rate": 6.736545278218463e-05,
211
+ "loss": 0.8592,
212
  "step": 140
213
  },
214
  {
215
  "epoch": 0.7435897435897436,
216
+ "grad_norm": 0.20298361778259277,
217
  "learning_rate": 5.6476529721189974e-05,
218
+ "loss": 0.8549,
219
  "step": 145
220
  },
221
  {
222
  "epoch": 0.7692307692307693,
223
+ "grad_norm": 0.19454172253608704,
224
  "learning_rate": 4.63406026519703e-05,
225
+ "loss": 0.8578,
226
  "step": 150
227
  },
228
  {
229
  "epoch": 0.7948717948717948,
230
+ "grad_norm": 0.19015519320964813,
231
  "learning_rate": 3.7039280099458366e-05,
232
+ "loss": 0.8579,
233
  "step": 155
234
  },
235
  {
236
  "epoch": 0.8205128205128205,
237
+ "grad_norm": 0.20558574795722961,
238
  "learning_rate": 2.8647450843757897e-05,
239
+ "loss": 0.8508,
240
  "step": 160
241
  },
242
  {
243
  "epoch": 0.8461538461538461,
244
+ "grad_norm": 0.19159014523029327,
245
  "learning_rate": 2.1232680959720082e-05,
246
+ "loss": 0.8415,
247
  "step": 165
248
  },
249
  {
250
  "epoch": 0.8717948717948718,
251
+ "grad_norm": 0.17908993363380432,
252
  "learning_rate": 1.4854669814637143e-05,
253
+ "loss": 0.8514,
254
  "step": 170
255
  },
256
  {
257
  "epoch": 0.8974358974358975,
258
+ "grad_norm": 0.18227402865886688,
259
  "learning_rate": 9.564769404039419e-06,
260
+ "loss": 0.8503,
261
  "step": 175
262
  },
263
  {
264
  "epoch": 0.9230769230769231,
265
+ "grad_norm": 0.2028588354587555,
266
  "learning_rate": 5.405570895622013e-06,
267
+ "loss": 0.8449,
268
  "step": 180
269
  },
270
  {
271
  "epoch": 0.9487179487179487,
272
+ "grad_norm": 0.19046302139759064,
273
  "learning_rate": 2.4105617102055496e-06,
274
+ "loss": 0.8456,
275
  "step": 185
276
  },
277
  {
278
  "epoch": 0.9743589743589743,
279
+ "grad_norm": 0.18208374083042145,
280
  "learning_rate": 6.038559007141397e-07,
281
+ "loss": 0.854,
282
  "step": 190
283
  },
284
  {
285
  "epoch": 1.0,
286
+ "grad_norm": 0.21627038717269897,
287
  "learning_rate": 0.0,
288
+ "loss": 0.8483,
289
  "step": 195
290
  },
291
  {
292
  "epoch": 1.0,
293
+ "eval_loss": 2.0198426246643066,
294
+ "eval_runtime": 1.151,
295
+ "eval_samples_per_second": 8.688,
296
+ "eval_steps_per_second": 0.869,
297
  "step": 195
298
  },
299
  {
300
  "epoch": 1.0,
301
  "step": 195,
302
  "total_flos": 5.7571825768464384e+17,
303
+ "train_loss": 0.9919692577459873,
304
+ "train_runtime": 2099.4843,
305
+ "train_samples_per_second": 5.943,
306
+ "train_steps_per_second": 0.093
307
  }
308
  ],
309
  "logging_steps": 5,