File size: 17,421 Bytes
cf06f57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 3.0,
  "eval_steps": 100,
  "global_step": 288,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.010416666666666666,
      "grad_norm": 214.15386464224227,
      "learning_rate": 2.8157056131518488e-08,
      "logits/chosen": -2.590585231781006,
      "logits/rejected": -2.5664222240448,
      "logps/chosen": -80.29847717285156,
      "logps/rejected": -53.10200881958008,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.10416666666666667,
      "grad_norm": 194.60148666589583,
      "learning_rate": 2.815705613151849e-07,
      "logits/chosen": -2.556077241897583,
      "logits/rejected": -2.538231611251831,
      "logps/chosen": -87.93022918701172,
      "logps/rejected": -81.02066040039062,
      "loss": 0.6969,
      "rewards/accuracies": 0.1805555522441864,
      "rewards/chosen": -0.004902619402855635,
      "rewards/margins": -0.010706111788749695,
      "rewards/rejected": 0.00580349238589406,
      "step": 10
    },
    {
      "epoch": 0.20833333333333334,
      "grad_norm": 140.77132706513083,
      "learning_rate": 5.631411226303698e-07,
      "logits/chosen": -2.611011505126953,
      "logits/rejected": -2.561887264251709,
      "logps/chosen": -102.95599365234375,
      "logps/rejected": -89.57844543457031,
      "loss": 0.67,
      "rewards/accuracies": 0.3499999940395355,
      "rewards/chosen": 0.12845759093761444,
      "rewards/margins": 0.028963133692741394,
      "rewards/rejected": 0.09949447214603424,
      "step": 20
    },
    {
      "epoch": 0.3125,
      "grad_norm": 216.34715329753536,
      "learning_rate": 8.134019072433256e-07,
      "logits/chosen": -2.5004661083221436,
      "logits/rejected": -2.5154635906219482,
      "logps/chosen": -66.19593048095703,
      "logps/rejected": -74.33724212646484,
      "loss": 0.6736,
      "rewards/accuracies": 0.33125001192092896,
      "rewards/chosen": 0.40372389554977417,
      "rewards/margins": 0.21228119730949402,
      "rewards/rejected": 0.19144263863563538,
      "step": 30
    },
    {
      "epoch": 0.4166666666666667,
      "grad_norm": 168.00522909028686,
      "learning_rate": 7.818747015362199e-07,
      "logits/chosen": -2.5674819946289062,
      "logits/rejected": -2.5553228855133057,
      "logps/chosen": -70.16609191894531,
      "logps/rejected": -69.43184661865234,
      "loss": 0.6795,
      "rewards/accuracies": 0.26875001192092896,
      "rewards/chosen": 0.6441055536270142,
      "rewards/margins": 0.22733807563781738,
      "rewards/rejected": 0.41676750779151917,
      "step": 40
    },
    {
      "epoch": 0.5208333333333334,
      "grad_norm": 180.7175984450008,
      "learning_rate": 7.503474958291143e-07,
      "logits/chosen": -2.4915037155151367,
      "logits/rejected": -2.5040123462677,
      "logps/chosen": -47.24058532714844,
      "logps/rejected": -55.82140350341797,
      "loss": 0.6926,
      "rewards/accuracies": 0.23125000298023224,
      "rewards/chosen": 1.1905267238616943,
      "rewards/margins": 0.18629805743694305,
      "rewards/rejected": 1.0042288303375244,
      "step": 50
    },
    {
      "epoch": 0.625,
      "grad_norm": 145.01566527061468,
      "learning_rate": 7.188202901220087e-07,
      "logits/chosen": -2.6002345085144043,
      "logits/rejected": -2.5831518173217773,
      "logps/chosen": -73.40350341796875,
      "logps/rejected": -74.82201385498047,
      "loss": 0.6936,
      "rewards/accuracies": 0.30000001192092896,
      "rewards/chosen": 1.8426650762557983,
      "rewards/margins": 0.37407809495925903,
      "rewards/rejected": 1.4685871601104736,
      "step": 60
    },
    {
      "epoch": 0.7291666666666666,
      "grad_norm": 179.82436828387043,
      "learning_rate": 6.87293084414903e-07,
      "logits/chosen": -2.5965964794158936,
      "logits/rejected": -2.5863566398620605,
      "logps/chosen": -90.09097290039062,
      "logps/rejected": -80.5506591796875,
      "loss": 0.6864,
      "rewards/accuracies": 0.33125001192092896,
      "rewards/chosen": 2.033968448638916,
      "rewards/margins": 0.44049301743507385,
      "rewards/rejected": 1.5934756994247437,
      "step": 70
    },
    {
      "epoch": 0.8333333333333334,
      "grad_norm": 271.72190744530945,
      "learning_rate": 6.557658787077973e-07,
      "logits/chosen": -2.5700366497039795,
      "logits/rejected": -2.522472620010376,
      "logps/chosen": -79.72940826416016,
      "logps/rejected": -75.4540023803711,
      "loss": 0.6721,
      "rewards/accuracies": 0.4312500059604645,
      "rewards/chosen": 1.9240013360977173,
      "rewards/margins": 0.6463297605514526,
      "rewards/rejected": 1.2776715755462646,
      "step": 80
    },
    {
      "epoch": 0.9375,
      "grad_norm": 209.9071476755963,
      "learning_rate": 6.242386730006917e-07,
      "logits/chosen": -2.461777687072754,
      "logits/rejected": -2.4585537910461426,
      "logps/chosen": -51.07688522338867,
      "logps/rejected": -61.461647033691406,
      "loss": 0.7091,
      "rewards/accuracies": 0.23749999701976776,
      "rewards/chosen": 0.9441378712654114,
      "rewards/margins": 0.2915518581867218,
      "rewards/rejected": 0.6525859832763672,
      "step": 90
    },
    {
      "epoch": 1.0416666666666667,
      "grad_norm": 12.21362061398562,
      "learning_rate": 5.927114672935861e-07,
      "logits/chosen": -2.4158740043640137,
      "logits/rejected": -2.395545244216919,
      "logps/chosen": -66.05335235595703,
      "logps/rejected": -75.04288482666016,
      "loss": 0.5554,
      "rewards/accuracies": 0.39375001192092896,
      "rewards/chosen": 2.4290213584899902,
      "rewards/margins": 3.4105141162872314,
      "rewards/rejected": -0.9814925193786621,
      "step": 100
    },
    {
      "epoch": 1.0416666666666667,
      "eval_logits/chosen": -2.452505588531494,
      "eval_logits/rejected": -2.4356331825256348,
      "eval_logps/chosen": -73.46339416503906,
      "eval_logps/rejected": -81.79998779296875,
      "eval_loss": 0.7309352159500122,
      "eval_rewards/accuracies": 0.3392857015132904,
      "eval_rewards/chosen": 0.33428940176963806,
      "eval_rewards/margins": 0.5527295470237732,
      "eval_rewards/rejected": -0.21844016015529633,
      "eval_runtime": 113.7572,
      "eval_samples_per_second": 17.581,
      "eval_steps_per_second": 0.554,
      "step": 100
    },
    {
      "epoch": 1.1458333333333333,
      "grad_norm": 9.404688750875733,
      "learning_rate": 5.611842615864804e-07,
      "logits/chosen": -2.4337010383605957,
      "logits/rejected": -2.464585781097412,
      "logps/chosen": -55.87408447265625,
      "logps/rejected": -93.3639907836914,
      "loss": 0.3638,
      "rewards/accuracies": 0.48124998807907104,
      "rewards/chosen": 3.0879528522491455,
      "rewards/margins": 6.951683044433594,
      "rewards/rejected": -3.8637306690216064,
      "step": 110
    },
    {
      "epoch": 1.25,
      "grad_norm": 11.93242109022535,
      "learning_rate": 5.296570558793748e-07,
      "logits/chosen": -2.466701030731201,
      "logits/rejected": -2.429455280303955,
      "logps/chosen": -96.85403442382812,
      "logps/rejected": -117.70040130615234,
      "loss": 0.3857,
      "rewards/accuracies": 0.581250011920929,
      "rewards/chosen": 3.6269073486328125,
      "rewards/margins": 11.550947189331055,
      "rewards/rejected": -7.924038887023926,
      "step": 120
    },
    {
      "epoch": 1.3541666666666667,
      "grad_norm": 24.581368760955932,
      "learning_rate": 4.981298501722692e-07,
      "logits/chosen": -2.3588976860046387,
      "logits/rejected": -2.3589892387390137,
      "logps/chosen": -80.81659698486328,
      "logps/rejected": -112.35628509521484,
      "loss": 0.3718,
      "rewards/accuracies": 0.4749999940395355,
      "rewards/chosen": 1.8699009418487549,
      "rewards/margins": 9.356704711914062,
      "rewards/rejected": -7.486802577972412,
      "step": 130
    },
    {
      "epoch": 1.4583333333333333,
      "grad_norm": 22.835446345258582,
      "learning_rate": 4.6660264446516346e-07,
      "logits/chosen": -2.3836002349853516,
      "logits/rejected": -2.4029014110565186,
      "logps/chosen": -48.08272171020508,
      "logps/rejected": -81.94709777832031,
      "loss": 0.3721,
      "rewards/accuracies": 0.4437499940395355,
      "rewards/chosen": 0.3919129967689514,
      "rewards/margins": 5.86413049697876,
      "rewards/rejected": -5.472217559814453,
      "step": 140
    },
    {
      "epoch": 1.5625,
      "grad_norm": 1.3144367942764572,
      "learning_rate": 4.350754387580579e-07,
      "logits/chosen": -2.4465391635894775,
      "logits/rejected": -2.4150843620300293,
      "logps/chosen": -68.7873306274414,
      "logps/rejected": -94.09663391113281,
      "loss": 0.3885,
      "rewards/accuracies": 0.4625000059604645,
      "rewards/chosen": 1.0929625034332275,
      "rewards/margins": 7.766973972320557,
      "rewards/rejected": -6.67401123046875,
      "step": 150
    },
    {
      "epoch": 1.6666666666666665,
      "grad_norm": 5.382498184271839,
      "learning_rate": 4.0354823305095224e-07,
      "logits/chosen": -2.4038569927215576,
      "logits/rejected": -2.408721923828125,
      "logps/chosen": -78.36112976074219,
      "logps/rejected": -118.96110534667969,
      "loss": 0.369,
      "rewards/accuracies": 0.5375000238418579,
      "rewards/chosen": 0.8339115977287292,
      "rewards/margins": 10.923874855041504,
      "rewards/rejected": -10.089962005615234,
      "step": 160
    },
    {
      "epoch": 1.7708333333333335,
      "grad_norm": 29.49369822653688,
      "learning_rate": 3.7202102734384655e-07,
      "logits/chosen": -2.345175266265869,
      "logits/rejected": -2.3313803672790527,
      "logps/chosen": -74.82723999023438,
      "logps/rejected": -113.85337829589844,
      "loss": 0.3923,
      "rewards/accuracies": 0.53125,
      "rewards/chosen": 0.12776944041252136,
      "rewards/margins": 10.1668119430542,
      "rewards/rejected": -10.039042472839355,
      "step": 170
    },
    {
      "epoch": 1.875,
      "grad_norm": 19.55786574804487,
      "learning_rate": 3.404938216367409e-07,
      "logits/chosen": -2.3961374759674072,
      "logits/rejected": -2.376530647277832,
      "logps/chosen": -72.12110137939453,
      "logps/rejected": -100.91358947753906,
      "loss": 0.3623,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": -0.0863330215215683,
      "rewards/margins": 8.792970657348633,
      "rewards/rejected": -8.879304885864258,
      "step": 180
    },
    {
      "epoch": 1.9791666666666665,
      "grad_norm": 21.215069783043003,
      "learning_rate": 3.0896661592963533e-07,
      "logits/chosen": -2.4835853576660156,
      "logits/rejected": -2.4758810997009277,
      "logps/chosen": -83.96082305908203,
      "logps/rejected": -122.45509338378906,
      "loss": 0.3768,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": 1.4638442993164062,
      "rewards/margins": 11.81721305847168,
      "rewards/rejected": -10.353367805480957,
      "step": 190
    },
    {
      "epoch": 2.0833333333333335,
      "grad_norm": 0.14888221388054043,
      "learning_rate": 2.7743941022252964e-07,
      "logits/chosen": -2.4281277656555176,
      "logits/rejected": -2.450599431991577,
      "logps/chosen": -85.7192153930664,
      "logps/rejected": -136.96456909179688,
      "loss": 0.3557,
      "rewards/accuracies": 0.543749988079071,
      "rewards/chosen": 1.0684223175048828,
      "rewards/margins": 12.590888023376465,
      "rewards/rejected": -11.522466659545898,
      "step": 200
    },
    {
      "epoch": 2.0833333333333335,
      "eval_logits/chosen": -2.465620279312134,
      "eval_logits/rejected": -2.4456026554107666,
      "eval_logps/chosen": -84.56539154052734,
      "eval_logps/rejected": -94.31573486328125,
      "eval_loss": 0.8315721750259399,
      "eval_rewards/accuracies": 0.341269850730896,
      "eval_rewards/chosen": -3.2764618396759033,
      "eval_rewards/margins": 1.0125247240066528,
      "eval_rewards/rejected": -4.288985729217529,
      "eval_runtime": 113.6105,
      "eval_samples_per_second": 17.604,
      "eval_steps_per_second": 0.555,
      "step": 200
    },
    {
      "epoch": 2.1875,
      "grad_norm": 0.11711860425549066,
      "learning_rate": 2.45912204515424e-07,
      "logits/chosen": -2.4621033668518066,
      "logits/rejected": -2.448930501937866,
      "logps/chosen": -62.363304138183594,
      "logps/rejected": -93.92267608642578,
      "loss": 0.38,
      "rewards/accuracies": 0.42500001192092896,
      "rewards/chosen": 0.40027302503585815,
      "rewards/margins": 8.606541633605957,
      "rewards/rejected": -8.206268310546875,
      "step": 210
    },
    {
      "epoch": 2.2916666666666665,
      "grad_norm": 2.554878231095572,
      "learning_rate": 2.1438499880831834e-07,
      "logits/chosen": -2.4758799076080322,
      "logits/rejected": -2.428165912628174,
      "logps/chosen": -96.42266845703125,
      "logps/rejected": -130.16854858398438,
      "loss": 0.3276,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": 1.8300745487213135,
      "rewards/margins": 14.0442533493042,
      "rewards/rejected": -12.214178085327148,
      "step": 220
    },
    {
      "epoch": 2.3958333333333335,
      "grad_norm": 0.19011116947531465,
      "learning_rate": 1.8285779310121273e-07,
      "logits/chosen": -2.446868419647217,
      "logits/rejected": -2.4058339595794678,
      "logps/chosen": -58.83857345581055,
      "logps/rejected": -79.16459655761719,
      "loss": 0.3785,
      "rewards/accuracies": 0.4312500059604645,
      "rewards/chosen": 0.32310181856155396,
      "rewards/margins": 7.874607086181641,
      "rewards/rejected": -7.551505088806152,
      "step": 230
    },
    {
      "epoch": 2.5,
      "grad_norm": 71.38953195661851,
      "learning_rate": 1.5133058739410707e-07,
      "logits/chosen": -2.4237632751464844,
      "logits/rejected": -2.3682377338409424,
      "logps/chosen": -86.28116607666016,
      "logps/rejected": -109.75992584228516,
      "loss": 0.3763,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": 0.5250676870346069,
      "rewards/margins": 10.391054153442383,
      "rewards/rejected": -9.865986824035645,
      "step": 240
    },
    {
      "epoch": 2.6041666666666665,
      "grad_norm": 0.21856365716272738,
      "learning_rate": 1.1980338168700146e-07,
      "logits/chosen": -2.3821611404418945,
      "logits/rejected": -2.366961717605591,
      "logps/chosen": -71.19404602050781,
      "logps/rejected": -100.2869644165039,
      "loss": 0.3438,
      "rewards/accuracies": 0.48750001192092896,
      "rewards/chosen": 0.47970885038375854,
      "rewards/margins": 10.1876220703125,
      "rewards/rejected": -9.70791244506836,
      "step": 250
    },
    {
      "epoch": 2.7083333333333335,
      "grad_norm": 0.2713321343286941,
      "learning_rate": 8.82761759798958e-08,
      "logits/chosen": -2.395521402359009,
      "logits/rejected": -2.403886318206787,
      "logps/chosen": -56.680870056152344,
      "logps/rejected": -104.11289978027344,
      "loss": 0.3752,
      "rewards/accuracies": 0.45625001192092896,
      "rewards/chosen": 0.4715859293937683,
      "rewards/margins": 10.60243034362793,
      "rewards/rejected": -10.130845069885254,
      "step": 260
    },
    {
      "epoch": 2.8125,
      "grad_norm": 2.290935978349522,
      "learning_rate": 5.6748970272790155e-08,
      "logits/chosen": -2.407195568084717,
      "logits/rejected": -2.4316117763519287,
      "logps/chosen": -77.73648071289062,
      "logps/rejected": -109.90128326416016,
      "loss": 0.3519,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": -0.3454963266849518,
      "rewards/margins": 9.936100006103516,
      "rewards/rejected": -10.281596183776855,
      "step": 270
    },
    {
      "epoch": 2.9166666666666665,
      "grad_norm": 0.34209721575617014,
      "learning_rate": 2.5221764565684515e-08,
      "logits/chosen": -2.470424175262451,
      "logits/rejected": -2.4299635887145996,
      "logps/chosen": -82.2805404663086,
      "logps/rejected": -117.57357025146484,
      "loss": 0.366,
      "rewards/accuracies": 0.48124998807907104,
      "rewards/chosen": 1.000727891921997,
      "rewards/margins": 12.448894500732422,
      "rewards/rejected": -11.44816780090332,
      "step": 280
    },
    {
      "epoch": 3.0,
      "step": 288,
      "total_flos": 0.0,
      "train_loss": 0.4743386713994874,
      "train_runtime": 3246.9146,
      "train_samples_per_second": 5.648,
      "train_steps_per_second": 0.089
    }
  ],
  "logging_steps": 10,
  "max_steps": 288,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 3,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}