File size: 20,765 Bytes
28e3cae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.9843342036553526,
  "eval_steps": 500,
  "global_step": 190,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.01,
      "grad_norm": 6.530893553837012,
      "learning_rate": 2.6315789473684208e-08,
      "logits/chosen": -2.851747512817383,
      "logits/rejected": -2.833996534347534,
      "logps/chosen": -165.70089721679688,
      "logps/rejected": -198.857666015625,
      "loss": 0.6931,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "epoch": 0.05,
      "grad_norm": 5.940017766790383,
      "learning_rate": 1.3157894736842104e-07,
      "logits/chosen": -2.7694077491760254,
      "logits/rejected": -2.772179365158081,
      "logps/chosen": -171.38229370117188,
      "logps/rejected": -172.59909057617188,
      "loss": 0.6932,
      "rewards/accuracies": 0.3125,
      "rewards/chosen": -0.00019297577091492712,
      "rewards/margins": -0.0003426831099204719,
      "rewards/rejected": 0.00014970726624596864,
      "step": 5
    },
    {
      "epoch": 0.1,
      "grad_norm": 7.22860361762137,
      "learning_rate": 2.631578947368421e-07,
      "logits/chosen": -2.784412384033203,
      "logits/rejected": -2.7932095527648926,
      "logps/chosen": -190.19732666015625,
      "logps/rejected": -194.6861572265625,
      "loss": 0.6929,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": 0.0017464166739955544,
      "rewards/margins": 0.0005834165494889021,
      "rewards/rejected": 0.0011630002409219742,
      "step": 10
    },
    {
      "epoch": 0.16,
      "grad_norm": 7.203100204300102,
      "learning_rate": 3.9473684210526315e-07,
      "logits/chosen": -2.8401434421539307,
      "logits/rejected": -2.8505945205688477,
      "logps/chosen": -198.759521484375,
      "logps/rejected": -188.010986328125,
      "loss": 0.6919,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": 0.012036855332553387,
      "rewards/margins": 0.0030940533615648746,
      "rewards/rejected": 0.0089428024366498,
      "step": 15
    },
    {
      "epoch": 0.21,
      "grad_norm": 6.864101673229198,
      "learning_rate": 4.999578104083306e-07,
      "logits/chosen": -2.8547134399414062,
      "logits/rejected": -2.864978075027466,
      "logps/chosen": -165.3108673095703,
      "logps/rejected": -177.08412170410156,
      "loss": 0.6882,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": 0.034319791942834854,
      "rewards/margins": 0.008450874127447605,
      "rewards/rejected": 0.025868916884064674,
      "step": 20
    },
    {
      "epoch": 0.26,
      "grad_norm": 6.3583895035816855,
      "learning_rate": 4.984826693294873e-07,
      "logits/chosen": -2.8253941535949707,
      "logits/rejected": -2.8230338096618652,
      "logps/chosen": -125.37552642822266,
      "logps/rejected": -145.67837524414062,
      "loss": 0.6822,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": 0.06096485257148743,
      "rewards/margins": 0.02324753999710083,
      "rewards/rejected": 0.0377173088490963,
      "step": 25
    },
    {
      "epoch": 0.31,
      "grad_norm": 6.95430506418983,
      "learning_rate": 4.949122667718934e-07,
      "logits/chosen": -2.797011613845825,
      "logits/rejected": -2.7968087196350098,
      "logps/chosen": -156.46078491210938,
      "logps/rejected": -166.90145874023438,
      "loss": 0.6741,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": 0.030187183991074562,
      "rewards/margins": 0.04637282341718674,
      "rewards/rejected": -0.016185639426112175,
      "step": 30
    },
    {
      "epoch": 0.37,
      "grad_norm": 7.0189636592719085,
      "learning_rate": 4.892767091689785e-07,
      "logits/chosen": -2.849172592163086,
      "logits/rejected": -2.8440544605255127,
      "logps/chosen": -193.01596069335938,
      "logps/rejected": -207.4866180419922,
      "loss": 0.664,
      "rewards/accuracies": 0.574999988079071,
      "rewards/chosen": -0.01744670793414116,
      "rewards/margins": 0.05499963089823723,
      "rewards/rejected": -0.07244633138179779,
      "step": 35
    },
    {
      "epoch": 0.42,
      "grad_norm": 8.51056311217898,
      "learning_rate": 4.816235168037004e-07,
      "logits/chosen": -2.7640957832336426,
      "logits/rejected": -2.776198387145996,
      "logps/chosen": -137.4984130859375,
      "logps/rejected": -163.426513671875,
      "loss": 0.6517,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": -0.046394020318984985,
      "rewards/margins": 0.10987555980682373,
      "rewards/rejected": -0.15626958012580872,
      "step": 40
    },
    {
      "epoch": 0.47,
      "grad_norm": 11.159926429466159,
      "learning_rate": 4.720172231068844e-07,
      "logits/chosen": -2.723865032196045,
      "logits/rejected": -2.7389719486236572,
      "logps/chosen": -207.92044067382812,
      "logps/rejected": -213.3718719482422,
      "loss": 0.6536,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.19272716343402863,
      "rewards/margins": 0.12425712496042252,
      "rewards/rejected": -0.31698426604270935,
      "step": 45
    },
    {
      "epoch": 0.52,
      "grad_norm": 12.393423854122126,
      "learning_rate": 4.605388304968914e-07,
      "logits/chosen": -2.718982696533203,
      "logits/rejected": -2.7207980155944824,
      "logps/chosen": -178.42510986328125,
      "logps/rejected": -211.27377319335938,
      "loss": 0.6282,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -0.040986787527799606,
      "rewards/margins": 0.17269372940063477,
      "rewards/rejected": -0.21368053555488586,
      "step": 50
    },
    {
      "epoch": 0.57,
      "grad_norm": 21.049387798378756,
      "learning_rate": 4.472851273490984e-07,
      "logits/chosen": -2.5929465293884277,
      "logits/rejected": -2.6008853912353516,
      "logps/chosen": -160.97874450683594,
      "logps/rejected": -186.36863708496094,
      "loss": 0.6258,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.12865906953811646,
      "rewards/margins": 0.20724160969257355,
      "rewards/rejected": -0.3359006643295288,
      "step": 55
    },
    {
      "epoch": 0.63,
      "grad_norm": 12.520960096291708,
      "learning_rate": 4.323678718546552e-07,
      "logits/chosen": -2.648369312286377,
      "logits/rejected": -2.645002603530884,
      "logps/chosen": -190.52731323242188,
      "logps/rejected": -241.7861328125,
      "loss": 0.6096,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.18569248914718628,
      "rewards/margins": 0.27379462122917175,
      "rewards/rejected": -0.45948711037635803,
      "step": 60
    },
    {
      "epoch": 0.68,
      "grad_norm": 18.22398171524467,
      "learning_rate": 4.159128496504053e-07,
      "logits/chosen": -2.635317325592041,
      "logits/rejected": -2.6489968299865723,
      "logps/chosen": -185.21597290039062,
      "logps/rejected": -226.77963256835938,
      "loss": 0.5957,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.23138824105262756,
      "rewards/margins": 0.33824095129966736,
      "rewards/rejected": -0.5696292519569397,
      "step": 65
    },
    {
      "epoch": 0.73,
      "grad_norm": 12.486094279730978,
      "learning_rate": 3.9805881316624503e-07,
      "logits/chosen": -2.5691702365875244,
      "logits/rejected": -2.5781631469726562,
      "logps/chosen": -209.5417938232422,
      "logps/rejected": -230.74874877929688,
      "loss": 0.597,
      "rewards/accuracies": 0.675000011920929,
      "rewards/chosen": -0.3240502178668976,
      "rewards/margins": 0.24576255679130554,
      "rewards/rejected": -0.5698127150535583,
      "step": 70
    },
    {
      "epoch": 0.78,
      "grad_norm": 16.18752353365872,
      "learning_rate": 3.78956311633581e-07,
      "logits/chosen": -2.5225658416748047,
      "logits/rejected": -2.519923448562622,
      "logps/chosen": -189.95606994628906,
      "logps/rejected": -244.0750732421875,
      "loss": 0.583,
      "rewards/accuracies": 0.71875,
      "rewards/chosen": -0.18096503615379333,
      "rewards/margins": 0.4236725866794586,
      "rewards/rejected": -0.604637622833252,
      "step": 75
    },
    {
      "epoch": 0.84,
      "grad_norm": 16.73585640066424,
      "learning_rate": 3.587664216205183e-07,
      "logits/chosen": -2.4873414039611816,
      "logits/rejected": -2.488546133041382,
      "logps/chosen": -186.70196533203125,
      "logps/rejected": -229.10800170898438,
      "loss": 0.5896,
      "rewards/accuracies": 0.7250000238418579,
      "rewards/chosen": -0.1871272623538971,
      "rewards/margins": 0.35064736008644104,
      "rewards/rejected": -0.5377745628356934,
      "step": 80
    },
    {
      "epoch": 0.89,
      "grad_norm": 14.48535258453445,
      "learning_rate": 3.376593887981886e-07,
      "logits/chosen": -2.5321877002716064,
      "logits/rejected": -2.5364866256713867,
      "logps/chosen": -201.63038635253906,
      "logps/rejected": -250.297607421875,
      "loss": 0.5943,
      "rewards/accuracies": 0.643750011920929,
      "rewards/chosen": -0.4144672751426697,
      "rewards/margins": 0.3995351493358612,
      "rewards/rejected": -0.8140023946762085,
      "step": 85
    },
    {
      "epoch": 0.94,
      "grad_norm": 24.006493530801364,
      "learning_rate": 3.1581319239114976e-07,
      "logits/chosen": -2.46376633644104,
      "logits/rejected": -2.4762072563171387,
      "logps/chosen": -235.89602661132812,
      "logps/rejected": -286.06207275390625,
      "loss": 0.5905,
      "rewards/accuracies": 0.668749988079071,
      "rewards/chosen": -0.6695653200149536,
      "rewards/margins": 0.43523311614990234,
      "rewards/rejected": -1.1047985553741455,
      "step": 90
    },
    {
      "epoch": 0.99,
      "grad_norm": 13.899105116058017,
      "learning_rate": 2.934120444167326e-07,
      "logits/chosen": -2.5002081394195557,
      "logits/rejected": -2.515636920928955,
      "logps/chosen": -240.7231903076172,
      "logps/rejected": -282.92974853515625,
      "loss": 0.579,
      "rewards/accuracies": 0.6937500238418579,
      "rewards/chosen": -0.5511455535888672,
      "rewards/margins": 0.450370728969574,
      "rewards/rejected": -1.001516342163086,
      "step": 95
    },
    {
      "epoch": 1.04,
      "grad_norm": 12.195720253264156,
      "learning_rate": 2.706448363680831e-07,
      "logits/chosen": -2.492196559906006,
      "logits/rejected": -2.4908487796783447,
      "logps/chosen": -217.4730224609375,
      "logps/rejected": -285.6746826171875,
      "loss": 0.4963,
      "rewards/accuracies": 0.7749999761581421,
      "rewards/chosen": -0.3912954330444336,
      "rewards/margins": 0.6321467161178589,
      "rewards/rejected": -1.023442268371582,
      "step": 100
    },
    {
      "epoch": 1.1,
      "grad_norm": 13.625308588645998,
      "learning_rate": 2.477035464388184e-07,
      "logits/chosen": -2.3943684101104736,
      "logits/rejected": -2.3905301094055176,
      "logps/chosen": -160.0459442138672,
      "logps/rejected": -262.6797790527344,
      "loss": 0.4563,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.28104764223098755,
      "rewards/margins": 0.8251093029975891,
      "rewards/rejected": -1.1061569452285767,
      "step": 105
    },
    {
      "epoch": 1.15,
      "grad_norm": 16.514733311854357,
      "learning_rate": 2.2478162071993296e-07,
      "logits/chosen": -2.367004156112671,
      "logits/rejected": -2.3704023361206055,
      "logps/chosen": -212.1750946044922,
      "logps/rejected": -292.59295654296875,
      "loss": 0.462,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -0.39816007018089294,
      "rewards/margins": 0.7662478089332581,
      "rewards/rejected": -1.1644079685211182,
      "step": 110
    },
    {
      "epoch": 1.2,
      "grad_norm": 15.624289502053674,
      "learning_rate": 2.0207234201906545e-07,
      "logits/chosen": -2.3321692943573,
      "logits/rejected": -2.3237671852111816,
      "logps/chosen": -199.48365783691406,
      "logps/rejected": -294.98834228515625,
      "loss": 0.4647,
      "rewards/accuracies": 0.8062499761581421,
      "rewards/chosen": -0.5382081866264343,
      "rewards/margins": 0.7745588421821594,
      "rewards/rejected": -1.3127670288085938,
      "step": 115
    },
    {
      "epoch": 1.25,
      "grad_norm": 16.693818527228895,
      "learning_rate": 1.7976720005660767e-07,
      "logits/chosen": -2.3294899463653564,
      "logits/rejected": -2.3380885124206543,
      "logps/chosen": -236.26138305664062,
      "logps/rejected": -317.8197326660156,
      "loss": 0.4734,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.5919082164764404,
      "rewards/margins": 0.8398138284683228,
      "rewards/rejected": -1.4317219257354736,
      "step": 120
    },
    {
      "epoch": 1.31,
      "grad_norm": 18.248981197195587,
      "learning_rate": 1.5805427678152674e-07,
      "logits/chosen": -2.3658409118652344,
      "logits/rejected": -2.3801846504211426,
      "logps/chosen": -252.34451293945312,
      "logps/rejected": -347.273193359375,
      "loss": 0.434,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": -0.6862586736679077,
      "rewards/margins": 0.8617037534713745,
      "rewards/rejected": -1.5479624271392822,
      "step": 125
    },
    {
      "epoch": 1.36,
      "grad_norm": 18.599023816787692,
      "learning_rate": 1.371166604222777e-07,
      "logits/chosen": -2.3468356132507324,
      "logits/rejected": -2.3559298515319824,
      "logps/chosen": -222.77267456054688,
      "logps/rejected": -324.4059143066406,
      "loss": 0.4462,
      "rewards/accuracies": 0.831250011920929,
      "rewards/chosen": -0.609306275844574,
      "rewards/margins": 0.9141504168510437,
      "rewards/rejected": -1.5234566926956177,
      "step": 130
    },
    {
      "epoch": 1.41,
      "grad_norm": 18.279705611333885,
      "learning_rate": 1.1713090164588606e-07,
      "logits/chosen": -2.3474090099334717,
      "logits/rejected": -2.3513846397399902,
      "logps/chosen": -237.4060516357422,
      "logps/rejected": -349.4905700683594,
      "loss": 0.4436,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -0.6407004594802856,
      "rewards/margins": 0.92046058177948,
      "rewards/rejected": -1.5611611604690552,
      "step": 135
    },
    {
      "epoch": 1.46,
      "grad_norm": 21.064741442339784,
      "learning_rate": 9.826552484321085e-08,
      "logits/chosen": -2.2716422080993652,
      "logits/rejected": -2.270905017852783,
      "logps/chosen": -254.72885131835938,
      "logps/rejected": -353.58477783203125,
      "loss": 0.4319,
      "rewards/accuracies": 0.8500000238418579,
      "rewards/chosen": -0.7879461050033569,
      "rewards/margins": 1.0126745700836182,
      "rewards/rejected": -1.800620675086975,
      "step": 140
    },
    {
      "epoch": 1.51,
      "grad_norm": 18.966914430346105,
      "learning_rate": 8.067960709356478e-08,
      "logits/chosen": -2.1971030235290527,
      "logits/rejected": -2.2067959308624268,
      "logps/chosen": -210.01760864257812,
      "logps/rejected": -323.2452697753906,
      "loss": 0.422,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -0.6746965646743774,
      "rewards/margins": 1.0784823894500732,
      "rewards/rejected": -1.7531789541244507,
      "step": 145
    },
    {
      "epoch": 1.57,
      "grad_norm": 19.251567468071876,
      "learning_rate": 6.452143679117964e-08,
      "logits/chosen": -2.269256114959717,
      "logits/rejected": -2.2733891010284424,
      "logps/chosen": -269.5815124511719,
      "logps/rejected": -360.28729248046875,
      "loss": 0.4286,
      "rewards/accuracies": 0.793749988079071,
      "rewards/chosen": -0.8714927434921265,
      "rewards/margins": 0.9906436204910278,
      "rewards/rejected": -1.8621364831924438,
      "step": 150
    },
    {
      "epoch": 1.62,
      "grad_norm": 20.881384471810865,
      "learning_rate": 4.992726324427901e-08,
      "logits/chosen": -2.2466838359832764,
      "logits/rejected": -2.2517459392547607,
      "logps/chosen": -229.53665161132812,
      "logps/rejected": -345.5362243652344,
      "loss": 0.4455,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -0.835593581199646,
      "rewards/margins": 0.9400846362113953,
      "rewards/rejected": -1.775678277015686,
      "step": 155
    },
    {
      "epoch": 1.67,
      "grad_norm": 21.71541477038859,
      "learning_rate": 3.702014779041826e-08,
      "logits/chosen": -2.186835765838623,
      "logits/rejected": -2.1860439777374268,
      "logps/chosen": -272.96112060546875,
      "logps/rejected": -380.77685546875,
      "loss": 0.4318,
      "rewards/accuracies": 0.800000011920929,
      "rewards/chosen": -1.01149320602417,
      "rewards/margins": 1.0301023721694946,
      "rewards/rejected": -2.041595697402954,
      "step": 160
    },
    {
      "epoch": 1.72,
      "grad_norm": 23.87035641942232,
      "learning_rate": 2.5908926115744994e-08,
      "logits/chosen": -2.3079283237457275,
      "logits/rejected": -2.320064067840576,
      "logps/chosen": -277.4764099121094,
      "logps/rejected": -361.50311279296875,
      "loss": 0.4383,
      "rewards/accuracies": 0.7562500238418579,
      "rewards/chosen": -1.0418604612350464,
      "rewards/margins": 0.8757144808769226,
      "rewards/rejected": -1.9175748825073242,
      "step": 165
    },
    {
      "epoch": 1.78,
      "grad_norm": 21.532935828831377,
      "learning_rate": 1.6687290528135722e-08,
      "logits/chosen": -2.2234318256378174,
      "logits/rejected": -2.235042095184326,
      "logps/chosen": -243.2294158935547,
      "logps/rejected": -327.7035217285156,
      "loss": 0.433,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -0.8599830865859985,
      "rewards/margins": 0.809087872505188,
      "rewards/rejected": -1.669070839881897,
      "step": 170
    },
    {
      "epoch": 1.83,
      "grad_norm": 22.322246598167897,
      "learning_rate": 9.432999922687396e-09,
      "logits/chosen": -2.2345292568206787,
      "logits/rejected": -2.2446045875549316,
      "logps/chosen": -282.7356872558594,
      "logps/rejected": -373.1545104980469,
      "loss": 0.4213,
      "rewards/accuracies": 0.84375,
      "rewards/chosen": -0.9868669509887695,
      "rewards/margins": 1.0404006242752075,
      "rewards/rejected": -2.0272676944732666,
      "step": 175
    },
    {
      "epoch": 1.88,
      "grad_norm": 19.226686156526466,
      "learning_rate": 4.207224101311246e-09,
      "logits/chosen": -2.2762608528137207,
      "logits/rejected": -2.290148973464966,
      "logps/chosen": -289.68035888671875,
      "logps/rejected": -380.23883056640625,
      "loss": 0.4364,
      "rewards/accuracies": 0.8374999761581421,
      "rewards/chosen": -1.0276674032211304,
      "rewards/margins": 0.9330458641052246,
      "rewards/rejected": -1.9607131481170654,
      "step": 180
    },
    {
      "epoch": 1.93,
      "grad_norm": 21.532509699275593,
      "learning_rate": 1.0540279752731252e-09,
      "logits/chosen": -2.232375383377075,
      "logits/rejected": -2.242682456970215,
      "logps/chosen": -278.67156982421875,
      "logps/rejected": -388.5971374511719,
      "loss": 0.4303,
      "rewards/accuracies": 0.78125,
      "rewards/chosen": -1.0453999042510986,
      "rewards/margins": 0.9826061129570007,
      "rewards/rejected": -2.028006076812744,
      "step": 185
    },
    {
      "epoch": 1.98,
      "grad_norm": 20.268430701124647,
      "learning_rate": 0.0,
      "logits/chosen": -2.247197151184082,
      "logits/rejected": -2.25414776802063,
      "logps/chosen": -272.12359619140625,
      "logps/rejected": -399.1447448730469,
      "loss": 0.4268,
      "rewards/accuracies": 0.8062499761581421,
      "rewards/chosen": -1.060980200767517,
      "rewards/margins": 1.1344108581542969,
      "rewards/rejected": -2.1953911781311035,
      "step": 190
    },
    {
      "epoch": 1.98,
      "step": 190,
      "total_flos": 0.0,
      "train_loss": 0.5396593997353002,
      "train_runtime": 4845.5764,
      "train_samples_per_second": 5.047,
      "train_steps_per_second": 0.039
    }
  ],
  "logging_steps": 5,
  "max_steps": 190,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 500,
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}