DPOAll-zephyr-7b-sft-full / trainer_state.json
TTTXXX01's picture
Model save
3837f59 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1274,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007849293563579278,
"grad_norm": 12.183874888308356,
"learning_rate": 3.90625e-09,
"logits/chosen": 5914.52099609375,
"logits/rejected": 2785.021484375,
"logps/chosen": -212.45889282226562,
"logps/rejected": -98.59669494628906,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.007849293563579277,
"grad_norm": 12.022940194772952,
"learning_rate": 3.9062499999999997e-08,
"logits/chosen": 4973.8388671875,
"logits/rejected": 4328.1435546875,
"logps/chosen": -204.2343292236328,
"logps/rejected": -179.7129669189453,
"loss": 0.693,
"rewards/accuracies": 0.4907407760620117,
"rewards/chosen": 0.0495406910777092,
"rewards/margins": 0.05689071863889694,
"rewards/rejected": -0.0073500219732522964,
"step": 10
},
{
"epoch": 0.015698587127158554,
"grad_norm": 12.39145475940788,
"learning_rate": 7.812499999999999e-08,
"logits/chosen": 6084.19921875,
"logits/rejected": 4834.1728515625,
"logps/chosen": -217.16592407226562,
"logps/rejected": -196.7585906982422,
"loss": 0.6932,
"rewards/accuracies": 0.5583332777023315,
"rewards/chosen": 0.04528522491455078,
"rewards/margins": 0.09032775461673737,
"rewards/rejected": -0.045042525976896286,
"step": 20
},
{
"epoch": 0.023547880690737835,
"grad_norm": 10.95606444265039,
"learning_rate": 1.1718749999999999e-07,
"logits/chosen": 6084.501953125,
"logits/rejected": 5104.7861328125,
"logps/chosen": -250.5545654296875,
"logps/rejected": -209.32327270507812,
"loss": 0.6928,
"rewards/accuracies": 0.5416666269302368,
"rewards/chosen": 0.017780104652047157,
"rewards/margins": 0.043447066098451614,
"rewards/rejected": -0.025666963309049606,
"step": 30
},
{
"epoch": 0.03139717425431711,
"grad_norm": 11.394494245228758,
"learning_rate": 1.5624999999999999e-07,
"logits/chosen": 5311.82470703125,
"logits/rejected": 4346.9248046875,
"logps/chosen": -211.95864868164062,
"logps/rejected": -181.7149200439453,
"loss": 0.6926,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.1206112951040268,
"rewards/margins": 0.14849784970283508,
"rewards/rejected": -0.02788655459880829,
"step": 40
},
{
"epoch": 0.03924646781789639,
"grad_norm": 11.564571430671343,
"learning_rate": 1.9531249999999998e-07,
"logits/chosen": 6424.92578125,
"logits/rejected": 5042.4130859375,
"logps/chosen": -265.3628234863281,
"logps/rejected": -206.7937774658203,
"loss": 0.6917,
"rewards/accuracies": 0.6416667103767395,
"rewards/chosen": 0.26378005743026733,
"rewards/margins": 0.35793131589889526,
"rewards/rejected": -0.09415128082036972,
"step": 50
},
{
"epoch": 0.04709576138147567,
"grad_norm": 11.102599189445138,
"learning_rate": 2.3437499999999998e-07,
"logits/chosen": 5484.16796875,
"logits/rejected": 4559.7890625,
"logps/chosen": -213.72671508789062,
"logps/rejected": -209.1766357421875,
"loss": 0.6904,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": 0.2772645354270935,
"rewards/margins": 0.6537687182426453,
"rewards/rejected": -0.37650415301322937,
"step": 60
},
{
"epoch": 0.054945054945054944,
"grad_norm": 10.661717942311114,
"learning_rate": 2.734375e-07,
"logits/chosen": 5194.5205078125,
"logits/rejected": 4918.86962890625,
"logps/chosen": -178.36367797851562,
"logps/rejected": -177.41653442382812,
"loss": 0.6891,
"rewards/accuracies": 0.7000000476837158,
"rewards/chosen": 0.11722215265035629,
"rewards/margins": 0.8020639419555664,
"rewards/rejected": -0.6848418712615967,
"step": 70
},
{
"epoch": 0.06279434850863422,
"grad_norm": 11.38693784182316,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 5775.18603515625,
"logits/rejected": 5269.86474609375,
"logps/chosen": -196.8929443359375,
"logps/rejected": -183.09988403320312,
"loss": 0.6844,
"rewards/accuracies": 0.6416666507720947,
"rewards/chosen": -0.3243609666824341,
"rewards/margins": 1.1850136518478394,
"rewards/rejected": -1.5093746185302734,
"step": 80
},
{
"epoch": 0.0706436420722135,
"grad_norm": 13.081522033704324,
"learning_rate": 3.5156249999999997e-07,
"logits/chosen": 6037.05810546875,
"logits/rejected": 5177.779296875,
"logps/chosen": -220.82168579101562,
"logps/rejected": -191.5562286376953,
"loss": 0.6753,
"rewards/accuracies": 0.75,
"rewards/chosen": -1.939489722251892,
"rewards/margins": 5.1729536056518555,
"rewards/rejected": -7.112442970275879,
"step": 90
},
{
"epoch": 0.07849293563579278,
"grad_norm": 14.213599415550812,
"learning_rate": 3.9062499999999997e-07,
"logits/chosen": 5946.76953125,
"logits/rejected": 5723.97802734375,
"logps/chosen": -217.86697387695312,
"logps/rejected": -213.96273803710938,
"loss": 0.6721,
"rewards/accuracies": 0.6583333611488342,
"rewards/chosen": -7.636292457580566,
"rewards/margins": 6.884747505187988,
"rewards/rejected": -14.521039962768555,
"step": 100
},
{
"epoch": 0.08634222919937205,
"grad_norm": 13.71113666230255,
"learning_rate": 4.2968749999999996e-07,
"logits/chosen": 6407.90087890625,
"logits/rejected": 5226.25634765625,
"logps/chosen": -202.5398712158203,
"logps/rejected": -207.47341918945312,
"loss": 0.6571,
"rewards/accuracies": 0.6416667103767395,
"rewards/chosen": -19.424999237060547,
"rewards/margins": 9.056200981140137,
"rewards/rejected": -28.481201171875,
"step": 110
},
{
"epoch": 0.09419152276295134,
"grad_norm": 20.966364796722694,
"learning_rate": 4.6874999999999996e-07,
"logits/chosen": 6327.4384765625,
"logits/rejected": 5252.529296875,
"logps/chosen": -235.3113250732422,
"logps/rejected": -244.7873077392578,
"loss": 0.641,
"rewards/accuracies": 0.625,
"rewards/chosen": -32.47461700439453,
"rewards/margins": 14.147608757019043,
"rewards/rejected": -46.622215270996094,
"step": 120
},
{
"epoch": 0.10204081632653061,
"grad_norm": 17.429213091281362,
"learning_rate": 4.999962424962166e-07,
"logits/chosen": 6377.04150390625,
"logits/rejected": 5866.3994140625,
"logps/chosen": -255.5737762451172,
"logps/rejected": -267.1661682128906,
"loss": 0.6219,
"rewards/accuracies": 0.6833333969116211,
"rewards/chosen": -45.765281677246094,
"rewards/margins": 23.75636100769043,
"rewards/rejected": -69.52164459228516,
"step": 130
},
{
"epoch": 0.10989010989010989,
"grad_norm": 22.07418599729826,
"learning_rate": 4.998647417232375e-07,
"logits/chosen": 6324.4208984375,
"logits/rejected": 5559.1845703125,
"logps/chosen": -260.98162841796875,
"logps/rejected": -275.3335266113281,
"loss": 0.6176,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": -75.31417846679688,
"rewards/margins": 22.427417755126953,
"rewards/rejected": -97.74159240722656,
"step": 140
},
{
"epoch": 0.11773940345368916,
"grad_norm": 24.92403786363537,
"learning_rate": 4.995454786965036e-07,
"logits/chosen": 6534.57666015625,
"logits/rejected": 5390.4990234375,
"logps/chosen": -264.1880798339844,
"logps/rejected": -261.91217041015625,
"loss": 0.6065,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -62.50370407104492,
"rewards/margins": 27.11073875427246,
"rewards/rejected": -89.61444854736328,
"step": 150
},
{
"epoch": 0.12558869701726844,
"grad_norm": 19.213920590891107,
"learning_rate": 4.990386933279972e-07,
"logits/chosen": 6421.54638671875,
"logits/rejected": 5690.19921875,
"logps/chosen": -256.95452880859375,
"logps/rejected": -288.18963623046875,
"loss": 0.5998,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": -59.42634963989258,
"rewards/margins": 31.705699920654297,
"rewards/rejected": -91.1320571899414,
"step": 160
},
{
"epoch": 0.13343799058084774,
"grad_norm": 22.56435429980554,
"learning_rate": 4.983447664444096e-07,
"logits/chosen": 6637.67333984375,
"logits/rejected": 5861.8740234375,
"logps/chosen": -295.04107666015625,
"logps/rejected": -311.28521728515625,
"loss": 0.5938,
"rewards/accuracies": 0.6666666865348816,
"rewards/chosen": -85.42039489746094,
"rewards/margins": 29.543254852294922,
"rewards/rejected": -114.96366119384766,
"step": 170
},
{
"epoch": 0.141287284144427,
"grad_norm": 20.82287391215133,
"learning_rate": 4.97464219500968e-07,
"logits/chosen": 5749.5283203125,
"logits/rejected": 4951.72900390625,
"logps/chosen": -271.42596435546875,
"logps/rejected": -299.2519836425781,
"loss": 0.581,
"rewards/accuracies": 0.6666666865348816,
"rewards/chosen": -82.89817810058594,
"rewards/margins": 38.19253158569336,
"rewards/rejected": -121.09071350097656,
"step": 180
},
{
"epoch": 0.14913657770800628,
"grad_norm": 23.06666423712517,
"learning_rate": 4.963977141895843e-07,
"logits/chosen": 5846.3857421875,
"logits/rejected": 4935.0126953125,
"logps/chosen": -281.0151062011719,
"logps/rejected": -333.6980895996094,
"loss": 0.5513,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -77.25616455078125,
"rewards/margins": 65.20240783691406,
"rewards/rejected": -142.4585723876953,
"step": 190
},
{
"epoch": 0.15698587127158556,
"grad_norm": 31.029129327011113,
"learning_rate": 4.951460519416227e-07,
"logits/chosen": 5727.984375,
"logits/rejected": 5259.2265625,
"logps/chosen": -275.1678771972656,
"logps/rejected": -342.01458740234375,
"loss": 0.5495,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -92.62359619140625,
"rewards/margins": 52.71996307373047,
"rewards/rejected": -145.3435516357422,
"step": 200
},
{
"epoch": 0.16483516483516483,
"grad_norm": 37.88663322771198,
"learning_rate": 4.937101733256606e-07,
"logits/chosen": 5231.01171875,
"logits/rejected": 4560.6396484375,
"logps/chosen": -254.2691650390625,
"logps/rejected": -308.4845886230469,
"loss": 0.5518,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -100.35588073730469,
"rewards/margins": 50.94775390625,
"rewards/rejected": -151.30361938476562,
"step": 210
},
{
"epoch": 0.1726844583987441,
"grad_norm": 42.01369994977443,
"learning_rate": 4.920911573406924e-07,
"logits/chosen": 6420.0537109375,
"logits/rejected": 5376.4765625,
"logps/chosen": -278.9444274902344,
"logps/rejected": -297.1525573730469,
"loss": 0.5599,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -82.60784149169922,
"rewards/margins": 50.547996520996094,
"rewards/rejected": -133.1558380126953,
"step": 220
},
{
"epoch": 0.18053375196232338,
"grad_norm": 25.844454826758824,
"learning_rate": 4.902902206053098e-07,
"logits/chosen": 5699.75341796875,
"logits/rejected": 5102.96484375,
"logps/chosen": -319.3672790527344,
"logps/rejected": -360.23077392578125,
"loss": 0.5723,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -132.02853393554688,
"rewards/margins": 48.434696197509766,
"rewards/rejected": -180.46324157714844,
"step": 230
},
{
"epoch": 0.18838304552590268,
"grad_norm": 29.282218573607665,
"learning_rate": 4.883087164434672e-07,
"logits/chosen": 5106.5556640625,
"logits/rejected": 3994.092529296875,
"logps/chosen": -278.4791259765625,
"logps/rejected": -328.39215087890625,
"loss": 0.533,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -110.85009765625,
"rewards/margins": 62.85260009765625,
"rewards/rejected": -173.7026824951172,
"step": 240
},
{
"epoch": 0.19623233908948196,
"grad_norm": 37.575717724945754,
"learning_rate": 4.861481338675183e-07,
"logits/chosen": 6079.2392578125,
"logits/rejected": 5340.40478515625,
"logps/chosen": -309.59576416015625,
"logps/rejected": -396.6541748046875,
"loss": 0.5644,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -142.45187377929688,
"rewards/margins": 69.69562530517578,
"rewards/rejected": -212.1475067138672,
"step": 250
},
{
"epoch": 0.20408163265306123,
"grad_norm": 44.33942565965852,
"learning_rate": 4.838100964592904e-07,
"logits/chosen": 6390.5166015625,
"logits/rejected": 5076.86767578125,
"logps/chosen": -308.7320861816406,
"logps/rejected": -331.1741027832031,
"loss": 0.5705,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -109.01362609863281,
"rewards/margins": 55.936485290527344,
"rewards/rejected": -164.95010375976562,
"step": 260
},
{
"epoch": 0.2119309262166405,
"grad_norm": 29.97430714289517,
"learning_rate": 4.812963611500339e-07,
"logits/chosen": 6214.26171875,
"logits/rejected": 5946.75341796875,
"logps/chosen": -339.47601318359375,
"logps/rejected": -396.38861083984375,
"loss": 0.5107,
"rewards/accuracies": 0.7166666388511658,
"rewards/chosen": -142.72024536132812,
"rewards/margins": 65.039306640625,
"rewards/rejected": -207.759521484375,
"step": 270
},
{
"epoch": 0.21978021978021978,
"grad_norm": 47.08219382703758,
"learning_rate": 4.786088169001671e-07,
"logits/chosen": 5262.19091796875,
"logits/rejected": 4504.5078125,
"logps/chosen": -293.85137939453125,
"logps/rejected": -405.4297180175781,
"loss": 0.5107,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -130.70046997070312,
"rewards/margins": 105.42779541015625,
"rewards/rejected": -236.12826538085938,
"step": 280
},
{
"epoch": 0.22762951334379905,
"grad_norm": 39.25768711746996,
"learning_rate": 4.7574948327980567e-07,
"logits/chosen": 7399.24462890625,
"logits/rejected": 5310.9453125,
"logps/chosen": -376.98614501953125,
"logps/rejected": -440.69683837890625,
"loss": 0.5184,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -139.95767211914062,
"rewards/margins": 118.2234115600586,
"rewards/rejected": -258.18109130859375,
"step": 290
},
{
"epoch": 0.23547880690737832,
"grad_norm": 32.01551957274139,
"learning_rate": 4.727205089511466e-07,
"logits/chosen": 5341.7978515625,
"logits/rejected": 5239.494140625,
"logps/chosen": -307.5780334472656,
"logps/rejected": -380.02752685546875,
"loss": 0.5285,
"rewards/accuracies": 0.7166667580604553,
"rewards/chosen": -140.1055908203125,
"rewards/margins": 69.73551940917969,
"rewards/rejected": -209.84109497070312,
"step": 300
},
{
"epoch": 0.24332810047095763,
"grad_norm": 36.5311548558338,
"learning_rate": 4.6952417005384247e-07,
"logits/chosen": 5977.44677734375,
"logits/rejected": 5285.1455078125,
"logps/chosen": -347.93133544921875,
"logps/rejected": -407.2039794921875,
"loss": 0.5447,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -170.35064697265625,
"rewards/margins": 61.38502883911133,
"rewards/rejected": -231.7356719970703,
"step": 310
},
{
"epoch": 0.25117739403453687,
"grad_norm": 24.836157563558828,
"learning_rate": 4.661628684945851e-07,
"logits/chosen": 5973.8076171875,
"logits/rejected": 5104.85986328125,
"logps/chosen": -428.04632568359375,
"logps/rejected": -524.1506958007812,
"loss": 0.538,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -226.9893035888672,
"rewards/margins": 95.14351654052734,
"rewards/rejected": -322.1328125,
"step": 320
},
{
"epoch": 0.25902668759811615,
"grad_norm": 34.391189449326916,
"learning_rate": 4.626391301421782e-07,
"logits/chosen": 5795.30419921875,
"logits/rejected": 5254.41357421875,
"logps/chosen": -358.1292724609375,
"logps/rejected": -386.48712158203125,
"loss": 0.5679,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -163.67605590820312,
"rewards/margins": 45.48824691772461,
"rewards/rejected": -209.164306640625,
"step": 330
},
{
"epoch": 0.2668759811616955,
"grad_norm": 34.684911754380394,
"learning_rate": 4.5895560292945996e-07,
"logits/chosen": 6155.42236328125,
"logits/rejected": 6263.1552734375,
"logps/chosen": -323.47308349609375,
"logps/rejected": -405.7562561035156,
"loss": 0.5567,
"rewards/accuracies": 0.6833332777023315,
"rewards/chosen": -129.61480712890625,
"rewards/margins": 55.77390670776367,
"rewards/rejected": -185.38870239257812,
"step": 340
},
{
"epoch": 0.27472527472527475,
"grad_norm": 38.28433057811818,
"learning_rate": 4.5511505486349865e-07,
"logits/chosen": 6514.08740234375,
"logits/rejected": 5829.73828125,
"logps/chosen": -400.02203369140625,
"logps/rejected": -506.1766662597656,
"loss": 0.5125,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -205.19638061523438,
"rewards/margins": 94.15825653076172,
"rewards/rejected": -299.3546142578125,
"step": 350
},
{
"epoch": 0.282574568288854,
"grad_norm": 39.15174513036286,
"learning_rate": 4.5112037194555876e-07,
"logits/chosen": 5991.7646484375,
"logits/rejected": 5773.1728515625,
"logps/chosen": -417.256103515625,
"logps/rejected": -550.4620361328125,
"loss": 0.5161,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -239.33267211914062,
"rewards/margins": 111.8475112915039,
"rewards/rejected": -351.1802062988281,
"step": 360
},
{
"epoch": 0.2904238618524333,
"grad_norm": 36.339556524034336,
"learning_rate": 4.4697455600239863e-07,
"logits/chosen": 5439.48193359375,
"logits/rejected": 5042.9970703125,
"logps/chosen": -339.0165100097656,
"logps/rejected": -390.9417724609375,
"loss": 0.5421,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -154.5944366455078,
"rewards/margins": 68.27628326416016,
"rewards/rejected": -222.8707275390625,
"step": 370
},
{
"epoch": 0.29827315541601257,
"grad_norm": 28.743766561280864,
"learning_rate": 4.426807224305315e-07,
"logits/chosen": 6567.15087890625,
"logits/rejected": 5330.1748046875,
"logps/chosen": -347.8895568847656,
"logps/rejected": -393.98114013671875,
"loss": 0.5169,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -119.54854583740234,
"rewards/margins": 91.42303466796875,
"rewards/rejected": -210.9715576171875,
"step": 380
},
{
"epoch": 0.30612244897959184,
"grad_norm": 44.08545311809245,
"learning_rate": 4.3824209785514326e-07,
"logits/chosen": 6613.3876953125,
"logits/rejected": 4959.18310546875,
"logps/chosen": -347.82330322265625,
"logps/rejected": -415.91400146484375,
"loss": 0.5209,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -132.32980346679688,
"rewards/margins": 103.7311019897461,
"rewards/rejected": -236.06088256835938,
"step": 390
},
{
"epoch": 0.3139717425431711,
"grad_norm": 30.048184996812846,
"learning_rate": 4.3366201770542687e-07,
"logits/chosen": 5602.64990234375,
"logits/rejected": 5410.1162109375,
"logps/chosen": -338.90802001953125,
"logps/rejected": -418.0364685058594,
"loss": 0.5462,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -148.11941528320312,
"rewards/margins": 81.17756652832031,
"rewards/rejected": -229.29696655273438,
"step": 400
},
{
"epoch": 0.3218210361067504,
"grad_norm": 77.7552117350084,
"learning_rate": 4.2894392370815567e-07,
"logits/chosen": 5912.51806640625,
"logits/rejected": 5169.0068359375,
"logps/chosen": -460.01947021484375,
"logps/rejected": -579.4528198242188,
"loss": 0.4586,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -247.5325927734375,
"rewards/margins": 123.46797180175781,
"rewards/rejected": -371.0005798339844,
"step": 410
},
{
"epoch": 0.32967032967032966,
"grad_norm": 37.281500617398734,
"learning_rate": 4.2409136130137845e-07,
"logits/chosen": 5630.40283203125,
"logits/rejected": 4971.2314453125,
"logps/chosen": -461.1824645996094,
"logps/rejected": -544.41455078125,
"loss": 0.5151,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -257.15252685546875,
"rewards/margins": 106.0205307006836,
"rewards/rejected": -363.17303466796875,
"step": 420
},
{
"epoch": 0.33751962323390894,
"grad_norm": 34.679660544781555,
"learning_rate": 4.1910797697018017e-07,
"logits/chosen": 5597.75390625,
"logits/rejected": 4594.5517578125,
"logps/chosen": -325.1379699707031,
"logps/rejected": -416.49066162109375,
"loss": 0.5012,
"rewards/accuracies": 0.8083332777023315,
"rewards/chosen": -145.3929901123047,
"rewards/margins": 109.86451721191406,
"rewards/rejected": -255.2574920654297,
"step": 430
},
{
"epoch": 0.3453689167974882,
"grad_norm": 40.341765180642284,
"learning_rate": 4.1399751550651084e-07,
"logits/chosen": 5947.8115234375,
"logits/rejected": 5850.830078125,
"logps/chosen": -339.18011474609375,
"logps/rejected": -431.0130920410156,
"loss": 0.4983,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -156.7867889404297,
"rewards/margins": 82.5427017211914,
"rewards/rejected": -239.32949829101562,
"step": 440
},
{
"epoch": 0.3532182103610675,
"grad_norm": 30.929993300128935,
"learning_rate": 4.087638171951401e-07,
"logits/chosen": 6869.20166015625,
"logits/rejected": 4847.29052734375,
"logps/chosen": -387.536865234375,
"logps/rejected": -458.99505615234375,
"loss": 0.4939,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -178.76812744140625,
"rewards/margins": 117.23429870605469,
"rewards/rejected": -296.00244140625,
"step": 450
},
{
"epoch": 0.36106750392464676,
"grad_norm": 29.883628337635034,
"learning_rate": 4.034108149278543e-07,
"logits/chosen": 7064.83984375,
"logits/rejected": 5341.6416015625,
"logps/chosen": -437.234130859375,
"logps/rejected": -484.71478271484375,
"loss": 0.4845,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -186.80186462402344,
"rewards/margins": 107.70491027832031,
"rewards/rejected": -294.50677490234375,
"step": 460
},
{
"epoch": 0.36891679748822603,
"grad_norm": 58.934422959966476,
"learning_rate": 3.979425312480629e-07,
"logits/chosen": 6009.6279296875,
"logits/rejected": 5187.64013671875,
"logps/chosen": -414.41827392578125,
"logps/rejected": -517.0603637695312,
"loss": 0.5218,
"rewards/accuracies": 0.7666667699813843,
"rewards/chosen": -202.15353393554688,
"rewards/margins": 112.3405532836914,
"rewards/rejected": -314.49407958984375,
"step": 470
},
{
"epoch": 0.37676609105180536,
"grad_norm": 31.016219727020395,
"learning_rate": 3.923630753280357e-07,
"logits/chosen": 6505.29052734375,
"logits/rejected": 5481.3720703125,
"logps/chosen": -350.8402099609375,
"logps/rejected": -435.69970703125,
"loss": 0.4858,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -143.44503784179688,
"rewards/margins": 117.8589096069336,
"rewards/rejected": -261.303955078125,
"step": 480
},
{
"epoch": 0.38461538461538464,
"grad_norm": 36.442966867491265,
"learning_rate": 3.866766398810424e-07,
"logits/chosen": 6063.09228515625,
"logits/rejected": 5686.6787109375,
"logps/chosen": -305.1843566894531,
"logps/rejected": -449.0945739746094,
"loss": 0.4552,
"rewards/accuracies": 0.8166667222976685,
"rewards/chosen": -128.57583618164062,
"rewards/margins": 118.49446105957031,
"rewards/rejected": -247.0703125,
"step": 490
},
{
"epoch": 0.3924646781789639,
"grad_norm": 44.1212055718621,
"learning_rate": 3.8088749801071496e-07,
"logits/chosen": 6434.88427734375,
"logits/rejected": 4857.001953125,
"logps/chosen": -451.16497802734375,
"logps/rejected": -550.4376220703125,
"loss": 0.5046,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -229.2113494873047,
"rewards/margins": 118.71003723144531,
"rewards/rejected": -347.9213562011719,
"step": 500
},
{
"epoch": 0.4003139717425432,
"grad_norm": 36.14821373319329,
"learning_rate": 3.75e-07,
"logits/chosen": 5300.0185546875,
"logits/rejected": 4575.1767578125,
"logps/chosen": -336.89031982421875,
"logps/rejected": -440.952880859375,
"loss": 0.4831,
"rewards/accuracies": 0.7916667461395264,
"rewards/chosen": -147.4073486328125,
"rewards/margins": 127.1801986694336,
"rewards/rejected": -274.5875549316406,
"step": 510
},
{
"epoch": 0.40816326530612246,
"grad_norm": 34.73068121704461,
"learning_rate": 3.6901857004211443e-07,
"logits/chosen": 5561.93115234375,
"logits/rejected": 5117.68359375,
"logps/chosen": -353.75140380859375,
"logps/rejected": -444.4190368652344,
"loss": 0.5383,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -155.285400390625,
"rewards/margins": 93.34559631347656,
"rewards/rejected": -248.63101196289062,
"step": 520
},
{
"epoch": 0.41601255886970173,
"grad_norm": 44.19977871917114,
"learning_rate": 3.6294770291596076e-07,
"logits/chosen": 6318.544921875,
"logits/rejected": 5086.34619140625,
"logps/chosen": -359.4892578125,
"logps/rejected": -433.3750915527344,
"loss": 0.4611,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -146.56655883789062,
"rewards/margins": 88.04823303222656,
"rewards/rejected": -234.6147918701172,
"step": 530
},
{
"epoch": 0.423861852433281,
"grad_norm": 70.50902921574644,
"learning_rate": 3.5679196060850034e-07,
"logits/chosen": 5973.2431640625,
"logits/rejected": 5245.2939453125,
"logps/chosen": -409.8734436035156,
"logps/rejected": -506.63018798828125,
"loss": 0.5026,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -199.05563354492188,
"rewards/margins": 119.3356704711914,
"rewards/rejected": -318.39129638671875,
"step": 540
},
{
"epoch": 0.4317111459968603,
"grad_norm": 39.47957029483566,
"learning_rate": 3.505559688866229e-07,
"logits/chosen": 5781.47998046875,
"logits/rejected": 5275.9345703125,
"logps/chosen": -416.6446228027344,
"logps/rejected": -550.93994140625,
"loss": 0.5,
"rewards/accuracies": 0.7166666388511658,
"rewards/chosen": -201.9041290283203,
"rewards/margins": 123.97164154052734,
"rewards/rejected": -325.87579345703125,
"step": 550
},
{
"epoch": 0.43956043956043955,
"grad_norm": 37.37283391011429,
"learning_rate": 3.4424441382108826e-07,
"logits/chosen": 5830.35400390625,
"logits/rejected": 5423.10205078125,
"logps/chosen": -378.4039001464844,
"logps/rejected": -449.105224609375,
"loss": 0.5029,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -175.1286163330078,
"rewards/margins": 80.7674331665039,
"rewards/rejected": -255.8960418701172,
"step": 560
},
{
"epoch": 0.4474097331240188,
"grad_norm": 49.04946952588071,
"learning_rate": 3.378620382651523e-07,
"logits/chosen": 6153.337890625,
"logits/rejected": 5623.0244140625,
"logps/chosen": -418.72265625,
"logps/rejected": -508.38360595703125,
"loss": 0.4578,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -175.76278686523438,
"rewards/margins": 108.85029602050781,
"rewards/rejected": -284.61309814453125,
"step": 570
},
{
"epoch": 0.4552590266875981,
"grad_norm": 94.33653526750568,
"learning_rate": 3.314136382905234e-07,
"logits/chosen": 6112.7822265625,
"logits/rejected": 5336.7802734375,
"logps/chosen": -423.475830078125,
"logps/rejected": -605.2233276367188,
"loss": 0.4958,
"rewards/accuracies": 0.8166667222976685,
"rewards/chosen": -211.06033325195312,
"rewards/margins": 180.85171508789062,
"rewards/rejected": -391.9119873046875,
"step": 580
},
{
"epoch": 0.4631083202511774,
"grad_norm": 46.07179128100825,
"learning_rate": 3.249040595833274e-07,
"logits/chosen": 6558.09912109375,
"logits/rejected": 5384.94921875,
"logps/chosen": -469.80859375,
"logps/rejected": -568.242431640625,
"loss": 0.4856,
"rewards/accuracies": 0.7750000953674316,
"rewards/chosen": -238.0826873779297,
"rewards/margins": 151.30084228515625,
"rewards/rejected": -389.383544921875,
"step": 590
},
{
"epoch": 0.47095761381475665,
"grad_norm": 42.06146787509943,
"learning_rate": 3.1833819380279023e-07,
"logits/chosen": 6262.25048828125,
"logits/rejected": 5278.5048828125,
"logps/chosen": -404.83843994140625,
"logps/rejected": -520.1326904296875,
"loss": 0.4632,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -227.29013061523438,
"rewards/margins": 103.1563491821289,
"rewards/rejected": -330.44647216796875,
"step": 600
},
{
"epoch": 0.478806907378336,
"grad_norm": 31.484334771440416,
"learning_rate": 3.11720974905373e-07,
"logits/chosen": 6018.0908203125,
"logits/rejected": 5167.93212890625,
"logps/chosen": -411.90301513671875,
"logps/rejected": -502.0513610839844,
"loss": 0.4703,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -205.784912109375,
"rewards/margins": 110.84388732910156,
"rewards/rejected": -316.6288146972656,
"step": 610
},
{
"epoch": 0.48665620094191525,
"grad_norm": 40.57964943811314,
"learning_rate": 3.0505737543712275e-07,
"logits/chosen": 5053.2783203125,
"logits/rejected": 4076.49755859375,
"logps/chosen": -406.8768615722656,
"logps/rejected": -493.7144470214844,
"loss": 0.4557,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -223.1182098388672,
"rewards/margins": 109.49735260009766,
"rewards/rejected": -332.6156005859375,
"step": 620
},
{
"epoch": 0.4945054945054945,
"grad_norm": 56.606856997079554,
"learning_rate": 2.9835240279702513e-07,
"logits/chosen": 6577.0849609375,
"logits/rejected": 5469.5419921875,
"logps/chosen": -429.1509704589844,
"logps/rejected": -543.3411865234375,
"loss": 0.4772,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -186.3350067138672,
"rewards/margins": 162.75811767578125,
"rewards/rejected": -349.0931091308594,
"step": 630
},
{
"epoch": 0.5023547880690737,
"grad_norm": 35.9805606899344,
"learning_rate": 2.9161109547416667e-07,
"logits/chosen": 6271.7060546875,
"logits/rejected": 5318.08642578125,
"logps/chosen": -397.3316955566406,
"logps/rejected": -479.34515380859375,
"loss": 0.4884,
"rewards/accuracies": 0.6833333373069763,
"rewards/chosen": -186.57015991210938,
"rewards/margins": 78.26607513427734,
"rewards/rejected": -264.83624267578125,
"step": 640
},
{
"epoch": 0.5102040816326531,
"grad_norm": 34.871832333344074,
"learning_rate": 2.848385192615339e-07,
"logits/chosen": 5367.185546875,
"logits/rejected": 4309.45849609375,
"logps/chosen": -358.45416259765625,
"logps/rejected": -419.9345703125,
"loss": 0.4945,
"rewards/accuracies": 0.7666667699813843,
"rewards/chosen": -162.11483764648438,
"rewards/margins": 89.09200286865234,
"rewards/rejected": -251.20681762695312,
"step": 650
},
{
"epoch": 0.5180533751962323,
"grad_norm": 39.39801396782938,
"learning_rate": 2.780397634492949e-07,
"logits/chosen": 6041.53271484375,
"logits/rejected": 4703.71630859375,
"logps/chosen": -375.4960632324219,
"logps/rejected": -476.72589111328125,
"loss": 0.4947,
"rewards/accuracies": 0.8333333730697632,
"rewards/chosen": -158.10952758789062,
"rewards/margins": 130.66891479492188,
"rewards/rejected": -288.7784118652344,
"step": 660
},
{
"epoch": 0.5259026687598116,
"grad_norm": 48.84168885678772,
"learning_rate": 2.71219937000424e-07,
"logits/chosen": 6018.71875,
"logits/rejected": 4880.6767578125,
"logps/chosen": -379.6583557128906,
"logps/rejected": -462.1537170410156,
"loss": 0.49,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -172.89889526367188,
"rewards/margins": 100.40042114257812,
"rewards/rejected": -273.29931640625,
"step": 670
},
{
"epoch": 0.533751962323391,
"grad_norm": 39.27542708497692,
"learning_rate": 2.6438416471154273e-07,
"logits/chosen": 5836.3828125,
"logits/rejected": 4801.5009765625,
"logps/chosen": -418.82611083984375,
"logps/rejected": -494.9725646972656,
"loss": 0.4909,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -205.16482543945312,
"rewards/margins": 110.58109283447266,
"rewards/rejected": -315.74591064453125,
"step": 680
},
{
"epoch": 0.5416012558869702,
"grad_norm": 43.52413484751011,
"learning_rate": 2.5753758336186326e-07,
"logits/chosen": 5802.99755859375,
"logits/rejected": 5239.9482421875,
"logps/chosen": -400.54742431640625,
"logps/rejected": -530.6510620117188,
"loss": 0.4365,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -191.44097900390625,
"rewards/margins": 123.01344299316406,
"rewards/rejected": -314.45440673828125,
"step": 690
},
{
"epoch": 0.5494505494505495,
"grad_norm": 33.98315744252712,
"learning_rate": 2.5068533785312666e-07,
"logits/chosen": 5513.54345703125,
"logits/rejected": 5237.0107421875,
"logps/chosen": -386.1233825683594,
"logps/rejected": -490.634765625,
"loss": 0.5316,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -196.86312866210938,
"rewards/margins": 106.7976303100586,
"rewards/rejected": -303.6607666015625,
"step": 700
},
{
"epoch": 0.5572998430141287,
"grad_norm": 30.243640938966188,
"learning_rate": 2.4383257734343794e-07,
"logits/chosen": 5436.87890625,
"logits/rejected": 5391.3916015625,
"logps/chosen": -381.4007263183594,
"logps/rejected": -502.94976806640625,
"loss": 0.4477,
"rewards/accuracies": 0.7416667342185974,
"rewards/chosen": -187.48939514160156,
"rewards/margins": 112.8396987915039,
"rewards/rejected": -300.3290710449219,
"step": 710
},
{
"epoch": 0.565149136577708,
"grad_norm": 46.01339742325359,
"learning_rate": 2.3698445137790258e-07,
"logits/chosen": 5853.4052734375,
"logits/rejected": 4926.12451171875,
"logps/chosen": -404.26129150390625,
"logps/rejected": -511.4066467285156,
"loss": 0.4704,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -185.25808715820312,
"rewards/margins": 126.38338470458984,
"rewards/rejected": -311.6414489746094,
"step": 720
},
{
"epoch": 0.5729984301412873,
"grad_norm": 40.29946581755717,
"learning_rate": 2.3014610601897157e-07,
"logits/chosen": 6333.9560546875,
"logits/rejected": 4724.59375,
"logps/chosen": -436.6024475097656,
"logps/rejected": -527.5242309570312,
"loss": 0.4807,
"rewards/accuracies": 0.8166667819023132,
"rewards/chosen": -211.45657348632812,
"rewards/margins": 140.99398803710938,
"rewards/rejected": -352.4505310058594,
"step": 730
},
{
"epoch": 0.5808477237048666,
"grad_norm": 35.09618871465884,
"learning_rate": 2.2332267997940513e-07,
"logits/chosen": 5268.6943359375,
"logits/rejected": 4408.31884765625,
"logps/chosen": -383.2336120605469,
"logps/rejected": -467.2831115722656,
"loss": 0.4572,
"rewards/accuracies": 0.75,
"rewards/chosen": -190.80917358398438,
"rewards/margins": 113.82789611816406,
"rewards/rejected": -304.6370544433594,
"step": 740
},
{
"epoch": 0.5886970172684458,
"grad_norm": 36.837604192303694,
"learning_rate": 2.1651930076075723e-07,
"logits/chosen": 5756.14599609375,
"logits/rejected": 5088.453125,
"logps/chosen": -394.97528076171875,
"logps/rejected": -478.40118408203125,
"loss": 0.5016,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -214.54934692382812,
"rewards/margins": 96.75755310058594,
"rewards/rejected": -311.3069152832031,
"step": 750
},
{
"epoch": 0.5965463108320251,
"grad_norm": 39.413179562791,
"learning_rate": 2.0974108080028692e-07,
"logits/chosen": 6044.30517578125,
"logits/rejected": 4663.25537109375,
"logps/chosen": -399.56536865234375,
"logps/rejected": -481.95721435546875,
"loss": 0.4806,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -196.31442260742188,
"rewards/margins": 111.11234283447266,
"rewards/rejected": -307.4267578125,
"step": 760
},
{
"epoch": 0.6043956043956044,
"grad_norm": 40.397104252103794,
"learning_rate": 2.0299311362918773e-07,
"logits/chosen": 6256.640625,
"logits/rejected": 5291.82177734375,
"logps/chosen": -435.7190856933594,
"logps/rejected": -538.2561645507812,
"loss": 0.5111,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -205.9853973388672,
"rewards/margins": 108.4106216430664,
"rewards/rejected": -314.3960266113281,
"step": 770
},
{
"epoch": 0.6122448979591837,
"grad_norm": 37.45559651112722,
"learning_rate": 1.962804700450265e-07,
"logits/chosen": 6063.8310546875,
"logits/rejected": 5724.30517578125,
"logps/chosen": -429.6410217285156,
"logps/rejected": -548.0647583007812,
"loss": 0.4863,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -213.5356903076172,
"rewards/margins": 97.14134216308594,
"rewards/rejected": -310.6770324707031,
"step": 780
},
{
"epoch": 0.6200941915227629,
"grad_norm": 52.85538657955723,
"learning_rate": 1.8960819430126334e-07,
"logits/chosen": 5658.54541015625,
"logits/rejected": 4938.47412109375,
"logps/chosen": -424.3460998535156,
"logps/rejected": -543.2542114257812,
"loss": 0.4614,
"rewards/accuracies": 0.8000000715255737,
"rewards/chosen": -226.21511840820312,
"rewards/margins": 128.44137573242188,
"rewards/rejected": -354.6564636230469,
"step": 790
},
{
"epoch": 0.6279434850863422,
"grad_norm": 50.74254631794456,
"learning_rate": 1.8298130031671972e-07,
"logits/chosen": 5633.2216796875,
"logits/rejected": 4882.5830078125,
"logps/chosen": -427.47760009765625,
"logps/rejected": -522.7330322265625,
"loss": 0.495,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -210.75479125976562,
"rewards/margins": 99.28005981445312,
"rewards/rejected": -310.03485107421875,
"step": 800
},
{
"epoch": 0.6357927786499215,
"grad_norm": 45.059226269516955,
"learning_rate": 1.7640476790784075e-07,
"logits/chosen": 5210.11083984375,
"logits/rejected": 4571.79443359375,
"logps/chosen": -389.7716979980469,
"logps/rejected": -539.739013671875,
"loss": 0.4823,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -187.3163299560547,
"rewards/margins": 131.12542724609375,
"rewards/rejected": -318.4417724609375,
"step": 810
},
{
"epoch": 0.6436420722135008,
"grad_norm": 48.50628289552426,
"learning_rate": 1.6988353904658492e-07,
"logits/chosen": 5731.3525390625,
"logits/rejected": 4333.7529296875,
"logps/chosen": -404.9086608886719,
"logps/rejected": -458.85113525390625,
"loss": 0.4789,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -183.69412231445312,
"rewards/margins": 106.46870422363281,
"rewards/rejected": -290.16278076171875,
"step": 820
},
{
"epoch": 0.6514913657770801,
"grad_norm": 59.957195459974976,
"learning_rate": 1.634225141467513e-07,
"logits/chosen": 5635.3466796875,
"logits/rejected": 4940.11083984375,
"logps/chosen": -384.6611633300781,
"logps/rejected": -507.66693115234375,
"loss": 0.467,
"rewards/accuracies": 0.7666667699813843,
"rewards/chosen": -180.47512817382812,
"rewards/margins": 134.7369842529297,
"rewards/rejected": -315.21209716796875,
"step": 830
},
{
"epoch": 0.6593406593406593,
"grad_norm": 34.412374941580325,
"learning_rate": 1.570265483815364e-07,
"logits/chosen": 6203.7373046875,
"logits/rejected": 4987.412109375,
"logps/chosen": -412.3932189941406,
"logps/rejected": -513.5062255859375,
"loss": 0.4826,
"rewards/accuracies": 0.75,
"rewards/chosen": -185.06338500976562,
"rewards/margins": 115.64961242675781,
"rewards/rejected": -300.7129821777344,
"step": 840
},
{
"epoch": 0.6671899529042387,
"grad_norm": 30.134253695911486,
"learning_rate": 1.5070044803508691e-07,
"logits/chosen": 5716.025390625,
"logits/rejected": 5034.046875,
"logps/chosen": -400.2481689453125,
"logps/rejected": -516.7728271484375,
"loss": 0.4309,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -184.7707061767578,
"rewards/margins": 131.54672241210938,
"rewards/rejected": -316.31744384765625,
"step": 850
},
{
"epoch": 0.6750392464678179,
"grad_norm": 71.61040491654043,
"learning_rate": 1.444489668907914e-07,
"logits/chosen": 6135.0205078125,
"logits/rejected": 5152.2236328125,
"logps/chosen": -430.4107971191406,
"logps/rejected": -490.52587890625,
"loss": 0.5146,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -182.64016723632812,
"rewards/margins": 105.2038803100586,
"rewards/rejected": -287.8440246582031,
"step": 860
},
{
"epoch": 0.6828885400313972,
"grad_norm": 38.330063763060664,
"learning_rate": 1.3827680265902232e-07,
"logits/chosen": 6143.13037109375,
"logits/rejected": 5013.3056640625,
"logps/chosen": -382.94140625,
"logps/rejected": -460.19561767578125,
"loss": 0.4819,
"rewards/accuracies": 0.75,
"rewards/chosen": -152.84469604492188,
"rewards/margins": 106.6620101928711,
"rewards/rejected": -259.5067138671875,
"step": 870
},
{
"epoch": 0.6907378335949764,
"grad_norm": 41.567998903722,
"learning_rate": 1.3218859344701632e-07,
"logits/chosen": 5396.58203125,
"logits/rejected": 5105.16162109375,
"logps/chosen": -355.2163391113281,
"logps/rejected": -475.17144775390625,
"loss": 0.4895,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -144.70535278320312,
"rewards/margins": 98.8769302368164,
"rewards/rejected": -243.58230590820312,
"step": 880
},
{
"epoch": 0.6985871271585558,
"grad_norm": 39.34000807890869,
"learning_rate": 1.2618891427354172e-07,
"logits/chosen": 6391.68212890625,
"logits/rejected": 5117.5263671875,
"logps/chosen": -394.2801818847656,
"logps/rejected": -469.7091369628906,
"loss": 0.5119,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -136.68411254882812,
"rewards/margins": 121.24452209472656,
"rewards/rejected": -257.92864990234375,
"step": 890
},
{
"epoch": 0.706436420722135,
"grad_norm": 36.48251928336004,
"learning_rate": 1.202822736309758e-07,
"logits/chosen": 5414.578125,
"logits/rejected": 4911.3544921875,
"logps/chosen": -353.97259521484375,
"logps/rejected": -468.9820861816406,
"loss": 0.4823,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -149.62339782714844,
"rewards/margins": 108.9285659790039,
"rewards/rejected": -258.55194091796875,
"step": 900
},
{
"epoch": 0.7142857142857143,
"grad_norm": 46.978740435502594,
"learning_rate": 1.1447311009737299e-07,
"logits/chosen": 5314.3134765625,
"logits/rejected": 4948.3134765625,
"logps/chosen": -380.0970153808594,
"logps/rejected": -501.011474609375,
"loss": 0.4985,
"rewards/accuracies": 0.8083333969116211,
"rewards/chosen": -171.8706817626953,
"rewards/margins": 121.21226501464844,
"rewards/rejected": -293.08294677734375,
"step": 910
},
{
"epoch": 0.7221350078492935,
"grad_norm": 35.760451689940645,
"learning_rate": 1.0876578900107053e-07,
"logits/chosen": 5861.857421875,
"logits/rejected": 4804.310546875,
"logps/chosen": -409.1855773925781,
"logps/rejected": -478.6873474121094,
"loss": 0.4748,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -176.7639617919922,
"rewards/margins": 103.78104400634766,
"rewards/rejected": -280.5450134277344,
"step": 920
},
{
"epoch": 0.7299843014128728,
"grad_norm": 29.863070324852274,
"learning_rate": 1.0316459914033793e-07,
"logits/chosen": 5825.48583984375,
"logits/rejected": 4247.2919921875,
"logps/chosen": -410.54364013671875,
"logps/rejected": -485.8194885253906,
"loss": 0.4602,
"rewards/accuracies": 0.8083333969116211,
"rewards/chosen": -174.55471801757812,
"rewards/margins": 125.3405990600586,
"rewards/rejected": -299.8953552246094,
"step": 930
},
{
"epoch": 0.7378335949764521,
"grad_norm": 36.61595465244159,
"learning_rate": 9.767374956053584e-08,
"logits/chosen": 5624.4267578125,
"logits/rejected": 4830.0751953125,
"logps/chosen": -399.4177551269531,
"logps/rejected": -526.2559814453125,
"loss": 0.4769,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -181.1240997314453,
"rewards/margins": 140.15200805664062,
"rewards/rejected": -321.2760925292969,
"step": 940
},
{
"epoch": 0.7456828885400314,
"grad_norm": 35.91269585264287,
"learning_rate": 9.229736639120561e-08,
"logits/chosen": 5792.27490234375,
"logits/rejected": 5271.419921875,
"logps/chosen": -408.3707580566406,
"logps/rejected": -491.8653869628906,
"loss": 0.5214,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -190.569580078125,
"rewards/margins": 87.141357421875,
"rewards/rejected": -277.7109069824219,
"step": 950
},
{
"epoch": 0.7535321821036107,
"grad_norm": 46.49700668088038,
"learning_rate": 8.70394897454659e-08,
"logits/chosen": 5691.83203125,
"logits/rejected": 4979.69677734375,
"logps/chosen": -381.69927978515625,
"logps/rejected": -483.9689025878906,
"loss": 0.4743,
"rewards/accuracies": 0.8166667222976685,
"rewards/chosen": -163.27085876464844,
"rewards/margins": 118.2216796875,
"rewards/rejected": -281.4925537109375,
"step": 960
},
{
"epoch": 0.7613814756671899,
"grad_norm": 44.80668878466769,
"learning_rate": 8.19040706840472e-08,
"logits/chosen": 5775.78515625,
"logits/rejected": 4756.72509765625,
"logps/chosen": -418.36865234375,
"logps/rejected": -506.94171142578125,
"loss": 0.478,
"rewards/accuracies": 0.7166666388511658,
"rewards/chosen": -178.1486358642578,
"rewards/margins": 122.10249328613281,
"rewards/rejected": -300.2511291503906,
"step": 970
},
{
"epoch": 0.7692307692307693,
"grad_norm": 126.24912978264581,
"learning_rate": 7.689496824624525e-08,
"logits/chosen": 5510.98681640625,
"logits/rejected": 4350.1923828125,
"logps/chosen": -398.0511169433594,
"logps/rejected": -495.3601989746094,
"loss": 0.4457,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -169.2345428466797,
"rewards/margins": 135.63201904296875,
"rewards/rejected": -304.8665466308594,
"step": 980
},
{
"epoch": 0.7770800627943485,
"grad_norm": 44.017025893728295,
"learning_rate": 7.201594655002458e-08,
"logits/chosen": 5813.82861328125,
"logits/rejected": 4772.67236328125,
"logps/chosen": -419.42626953125,
"logps/rejected": -509.66265869140625,
"loss": 0.4434,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -193.5313262939453,
"rewards/margins": 122.06156921386719,
"rewards/rejected": -315.5929260253906,
"step": 990
},
{
"epoch": 0.7849293563579278,
"grad_norm": 48.20659685918205,
"learning_rate": 6.727067196345099e-08,
"logits/chosen": 5487.9765625,
"logits/rejected": 4555.32568359375,
"logps/chosen": -404.93780517578125,
"logps/rejected": -469.1463317871094,
"loss": 0.4897,
"rewards/accuracies": 0.7416667342185974,
"rewards/chosen": -191.1819610595703,
"rewards/margins": 97.61907196044922,
"rewards/rejected": -288.8010559082031,
"step": 1000
},
{
"epoch": 0.792778649921507,
"grad_norm": 46.023098624202326,
"learning_rate": 6.26627103495786e-08,
"logits/chosen": 5664.5029296875,
"logits/rejected": 4618.43896484375,
"logps/chosen": -403.0906677246094,
"logps/rejected": -496.787841796875,
"loss": 0.4982,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -188.38143920898438,
"rewards/margins": 116.97279357910156,
"rewards/rejected": -305.354248046875,
"step": 1010
},
{
"epoch": 0.8006279434850864,
"grad_norm": 33.147219575476456,
"learning_rate": 5.8195524386862374e-08,
"logits/chosen": 5767.7158203125,
"logits/rejected": 5049.7314453125,
"logps/chosen": -417.450927734375,
"logps/rejected": -534.2034912109375,
"loss": 0.4768,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -169.27230834960938,
"rewards/margins": 138.8505401611328,
"rewards/rejected": -308.12286376953125,
"step": 1020
},
{
"epoch": 0.8084772370486656,
"grad_norm": 44.16363871216782,
"learning_rate": 5.38724709671092e-08,
"logits/chosen": 6172.28564453125,
"logits/rejected": 5719.01806640625,
"logps/chosen": -422.5091857910156,
"logps/rejected": -543.5262451171875,
"loss": 0.452,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -191.04083251953125,
"rewards/margins": 119.36612701416016,
"rewards/rejected": -310.40692138671875,
"step": 1030
},
{
"epoch": 0.8163265306122449,
"grad_norm": 39.656074244171414,
"learning_rate": 4.969679867292276e-08,
"logits/chosen": 5418.7431640625,
"logits/rejected": 4830.5673828125,
"logps/chosen": -418.001220703125,
"logps/rejected": -538.5531005859375,
"loss": 0.4711,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -197.8995361328125,
"rewards/margins": 131.4696044921875,
"rewards/rejected": -329.369140625,
"step": 1040
},
{
"epoch": 0.8241758241758241,
"grad_norm": 42.53717868680899,
"learning_rate": 4.5671645336537416e-08,
"logits/chosen": 5494.4033203125,
"logits/rejected": 4912.06689453125,
"logps/chosen": -436.4764709472656,
"logps/rejected": -538.32861328125,
"loss": 0.474,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -198.99583435058594,
"rewards/margins": 123.53836822509766,
"rewards/rejected": -322.5341796875,
"step": 1050
},
{
"epoch": 0.8320251177394035,
"grad_norm": 80.06540603349922,
"learning_rate": 4.180003568187776e-08,
"logits/chosen": 6801.2900390625,
"logits/rejected": 5265.6240234375,
"logps/chosen": -467.2841796875,
"logps/rejected": -520.5267944335938,
"loss": 0.5077,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -206.2968292236328,
"rewards/margins": 94.19075012207031,
"rewards/rejected": -300.4875183105469,
"step": 1060
},
{
"epoch": 0.8398744113029827,
"grad_norm": 41.88276100093374,
"learning_rate": 3.8084879051612144e-08,
"logits/chosen": 5643.7236328125,
"logits/rejected": 5065.4716796875,
"logps/chosen": -418.366943359375,
"logps/rejected": -503.81024169921875,
"loss": 0.4863,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -193.12388610839844,
"rewards/margins": 117.52203369140625,
"rewards/rejected": -310.64593505859375,
"step": 1070
},
{
"epoch": 0.847723704866562,
"grad_norm": 36.25286342761492,
"learning_rate": 3.452896722091128e-08,
"logits/chosen": 6217.41943359375,
"logits/rejected": 4696.7431640625,
"logps/chosen": -455.5332946777344,
"logps/rejected": -515.8660888671875,
"loss": 0.4523,
"rewards/accuracies": 0.8083333969116211,
"rewards/chosen": -188.9639892578125,
"rewards/margins": 125.92216491699219,
"rewards/rejected": -314.8861389160156,
"step": 1080
},
{
"epoch": 0.8555729984301413,
"grad_norm": 38.21104402933791,
"learning_rate": 3.11349722995527e-08,
"logits/chosen": 6234.50830078125,
"logits/rejected": 4605.3544921875,
"logps/chosen": -427.64154052734375,
"logps/rejected": -516.46533203125,
"loss": 0.4496,
"rewards/accuracies": 0.75,
"rewards/chosen": -200.16696166992188,
"rewards/margins": 103.41390228271484,
"rewards/rejected": -303.58087158203125,
"step": 1090
},
{
"epoch": 0.8634222919937206,
"grad_norm": 41.38019827891624,
"learning_rate": 2.7905444723949762e-08,
"logits/chosen": 6074.0888671875,
"logits/rejected": 4931.25732421875,
"logps/chosen": -438.71826171875,
"logps/rejected": -524.115478515625,
"loss": 0.4767,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -200.18191528320312,
"rewards/margins": 134.4362030029297,
"rewards/rejected": -334.61810302734375,
"step": 1100
},
{
"epoch": 0.8712715855572999,
"grad_norm": 50.96012299878205,
"learning_rate": 2.484281134061142e-08,
"logits/chosen": 6402.4609375,
"logits/rejected": 5064.45458984375,
"logps/chosen": -470.1363830566406,
"logps/rejected": -555.388671875,
"loss": 0.4579,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -205.52273559570312,
"rewards/margins": 127.5979232788086,
"rewards/rejected": -333.12066650390625,
"step": 1110
},
{
"epoch": 0.8791208791208791,
"grad_norm": 79.05286543148162,
"learning_rate": 2.194937358247506e-08,
"logits/chosen": 6290.2275390625,
"logits/rejected": 5008.91064453125,
"logps/chosen": -447.3267517089844,
"logps/rejected": -544.9305419921875,
"loss": 0.4567,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -202.18634033203125,
"rewards/margins": 125.97334289550781,
"rewards/rejected": -328.1596374511719,
"step": 1120
},
{
"epoch": 0.8869701726844584,
"grad_norm": 43.333430022284745,
"learning_rate": 1.9227305739481612e-08,
"logits/chosen": 5735.0810546875,
"logits/rejected": 4401.76904296875,
"logps/chosen": -405.6451416015625,
"logps/rejected": -487.1587829589844,
"loss": 0.4634,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -170.8631134033203,
"rewards/margins": 136.07012939453125,
"rewards/rejected": -306.93328857421875,
"step": 1130
},
{
"epoch": 0.8948194662480377,
"grad_norm": 43.64685469104338,
"learning_rate": 1.6678653324693787e-08,
"logits/chosen": 6344.9375,
"logits/rejected": 5017.90771484375,
"logps/chosen": -448.3443298339844,
"logps/rejected": -535.1290283203125,
"loss": 0.4315,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -190.72360229492188,
"rewards/margins": 123.5924072265625,
"rewards/rejected": -314.31597900390625,
"step": 1140
},
{
"epoch": 0.902668759811617,
"grad_norm": 30.74102105572857,
"learning_rate": 1.4305331537183384e-08,
"logits/chosen": 5594.583984375,
"logits/rejected": 5034.6513671875,
"logps/chosen": -418.12744140625,
"logps/rejected": -520.3704223632812,
"loss": 0.4479,
"rewards/accuracies": 0.6833333373069763,
"rewards/chosen": -191.1439971923828,
"rewards/margins": 109.73844146728516,
"rewards/rejected": -300.8824768066406,
"step": 1150
},
{
"epoch": 0.9105180533751962,
"grad_norm": 44.80220273513666,
"learning_rate": 1.2109123822844653e-08,
"logits/chosen": 5744.36767578125,
"logits/rejected": 4455.0693359375,
"logps/chosen": -426.673095703125,
"logps/rejected": -502.768798828125,
"loss": 0.4666,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -198.11630249023438,
"rewards/margins": 108.71439361572266,
"rewards/rejected": -306.8307189941406,
"step": 1160
},
{
"epoch": 0.9183673469387755,
"grad_norm": 45.0040128824626,
"learning_rate": 1.0091680534213387e-08,
"logits/chosen": 6311.94287109375,
"logits/rejected": 5963.67431640625,
"logps/chosen": -428.43194580078125,
"logps/rejected": -561.5809326171875,
"loss": 0.4716,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -185.48355102539062,
"rewards/margins": 129.35040283203125,
"rewards/rejected": -314.8339538574219,
"step": 1170
},
{
"epoch": 0.9262166405023547,
"grad_norm": 42.78903323769823,
"learning_rate": 8.254517690300944e-09,
"logits/chosen": 5514.55908203125,
"logits/rejected": 4880.208984375,
"logps/chosen": -419.6272888183594,
"logps/rejected": -533.3134765625,
"loss": 0.4456,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -179.68130493164062,
"rewards/margins": 136.81039428710938,
"rewards/rejected": -316.49169921875,
"step": 1180
},
{
"epoch": 0.9340659340659341,
"grad_norm": 28.531293799623864,
"learning_rate": 6.599015837372907e-09,
"logits/chosen": 6016.01953125,
"logits/rejected": 5128.45068359375,
"logps/chosen": -462.04693603515625,
"logps/rejected": -547.386962890625,
"loss": 0.4737,
"rewards/accuracies": 0.75,
"rewards/chosen": -213.16140747070312,
"rewards/margins": 117.0730972290039,
"rewards/rejected": -330.2344970703125,
"step": 1190
},
{
"epoch": 0.9419152276295133,
"grad_norm": 55.0858063276208,
"learning_rate": 5.126419011529992e-09,
"logits/chosen": 6267.1201171875,
"logits/rejected": 5174.8701171875,
"logps/chosen": -440.6876525878906,
"logps/rejected": -556.6735229492188,
"loss": 0.4608,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -185.0259246826172,
"rewards/margins": 152.98207092285156,
"rewards/rejected": -338.00799560546875,
"step": 1200
},
{
"epoch": 0.9497645211930926,
"grad_norm": 47.79283442167676,
"learning_rate": 3.837833803870177e-09,
"logits/chosen": 5824.5322265625,
"logits/rejected": 5009.84765625,
"logps/chosen": -441.38726806640625,
"logps/rejected": -552.630126953125,
"loss": 0.4784,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -201.20175170898438,
"rewards/margins": 133.22714233398438,
"rewards/rejected": -334.42889404296875,
"step": 1210
},
{
"epoch": 0.957613814756672,
"grad_norm": 54.80164391638467,
"learning_rate": 2.734228528934679e-09,
"logits/chosen": 7285.5625,
"logits/rejected": 5203.20263671875,
"logps/chosen": -506.8431701660156,
"logps/rejected": -587.1539916992188,
"loss": 0.473,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -210.07470703125,
"rewards/margins": 131.88209533691406,
"rewards/rejected": -341.956787109375,
"step": 1220
},
{
"epoch": 0.9654631083202512,
"grad_norm": 57.218716588041886,
"learning_rate": 1.8164324970625645e-09,
"logits/chosen": 6470.89599609375,
"logits/rejected": 4952.3310546875,
"logps/chosen": -447.22430419921875,
"logps/rejected": -529.0125732421875,
"loss": 0.5037,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -186.6426544189453,
"rewards/margins": 129.3731689453125,
"rewards/rejected": -316.01580810546875,
"step": 1230
},
{
"epoch": 0.9733124018838305,
"grad_norm": 49.08939659365686,
"learning_rate": 1.0851353912008642e-09,
"logits/chosen": 5508.70849609375,
"logits/rejected": 5003.6572265625,
"logps/chosen": -440.58612060546875,
"logps/rejected": -564.5394287109375,
"loss": 0.4705,
"rewards/accuracies": 0.7166666388511658,
"rewards/chosen": -208.97372436523438,
"rewards/margins": 120.40782165527344,
"rewards/rejected": -329.3815612792969,
"step": 1240
},
{
"epoch": 0.9811616954474097,
"grad_norm": 34.5765498205139,
"learning_rate": 5.408867486384471e-10,
"logits/chosen": 5668.72265625,
"logits/rejected": 4670.9951171875,
"logps/chosen": -410.5791931152344,
"logps/rejected": -494.171630859375,
"loss": 0.4648,
"rewards/accuracies": 0.7999999523162842,
"rewards/chosen": -180.2710723876953,
"rewards/margins": 124.47029876708984,
"rewards/rejected": -304.74139404296875,
"step": 1250
},
{
"epoch": 0.989010989010989,
"grad_norm": 41.41687041502778,
"learning_rate": 1.840955480532924e-10,
"logits/chosen": 5382.2900390625,
"logits/rejected": 4956.3583984375,
"logps/chosen": -417.5318298339844,
"logps/rejected": -528.2335815429688,
"loss": 0.4468,
"rewards/accuracies": 0.8083333969116211,
"rewards/chosen": -185.3712615966797,
"rewards/margins": 126.5372085571289,
"rewards/rejected": -311.9084777832031,
"step": 1260
},
{
"epoch": 0.9968602825745683,
"grad_norm": 49.64487229564236,
"learning_rate": 1.502990218302247e-11,
"logits/chosen": 5616.408203125,
"logits/rejected": 4474.3134765625,
"logps/chosen": -429.5157165527344,
"logps/rejected": -510.53448486328125,
"loss": 0.4854,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -205.04177856445312,
"rewards/margins": 119.1884765625,
"rewards/rejected": -324.230224609375,
"step": 1270
},
{
"epoch": 1.0,
"step": 1274,
"total_flos": 0.0,
"train_loss": 0.5142561288044628,
"train_runtime": 14779.9494,
"train_samples_per_second": 4.136,
"train_steps_per_second": 0.086
}
],
"logging_steps": 10,
"max_steps": 1274,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}