IL_DPOAll-zephyr-7b-sft-full / trainer_state.json
TTTXXX01's picture
Model save
abf7a01 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1274,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007849293563579278,
"grad_norm": 6.093040937403707,
"learning_rate": 3.90625e-09,
"logits/chosen": 5914.52099609375,
"logits/rejected": 2785.021484375,
"logps/chosen": -212.45889282226562,
"logps/rejected": -98.59669494628906,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.007849293563579277,
"grad_norm": 6.05072585015556,
"learning_rate": 3.9062499999999997e-08,
"logits/chosen": 4973.81005859375,
"logits/rejected": 4328.2421875,
"logps/chosen": -204.26458740234375,
"logps/rejected": -179.73109436035156,
"loss": 0.6931,
"rewards/accuracies": 0.5092592835426331,
"rewards/chosen": 0.019286984577775,
"rewards/margins": 0.04475558549165726,
"rewards/rejected": -0.025468600913882256,
"step": 10
},
{
"epoch": 0.015698587127158554,
"grad_norm": 6.19573847618762,
"learning_rate": 7.812499999999999e-08,
"logits/chosen": 6084.08447265625,
"logits/rejected": 4834.19189453125,
"logps/chosen": -217.2179412841797,
"logps/rejected": -196.71627807617188,
"loss": 0.6932,
"rewards/accuracies": 0.491666704416275,
"rewards/chosen": -0.006735034286975861,
"rewards/margins": -0.00402786722406745,
"rewards/rejected": -0.002707168459892273,
"step": 20
},
{
"epoch": 0.023547880690737835,
"grad_norm": 5.478939628494671,
"learning_rate": 1.1718749999999999e-07,
"logits/chosen": 6084.3291015625,
"logits/rejected": 5104.9287109375,
"logps/chosen": -250.56930541992188,
"logps/rejected": -209.365966796875,
"loss": 0.693,
"rewards/accuracies": 0.5916666388511658,
"rewards/chosen": 0.0030434236396104097,
"rewards/margins": 0.07141536474227905,
"rewards/rejected": -0.06837192922830582,
"step": 30
},
{
"epoch": 0.03139717425431711,
"grad_norm": 5.683384309053487,
"learning_rate": 1.5624999999999999e-07,
"logits/chosen": 5311.80126953125,
"logits/rejected": 4346.90771484375,
"logps/chosen": -212.0178985595703,
"logps/rejected": -181.733642578125,
"loss": 0.6928,
"rewards/accuracies": 0.5916666984558105,
"rewards/chosen": 0.06136460229754448,
"rewards/margins": 0.10798362642526627,
"rewards/rejected": -0.04661902040243149,
"step": 40
},
{
"epoch": 0.03924646781789639,
"grad_norm": 5.773772011816319,
"learning_rate": 1.9531249999999998e-07,
"logits/chosen": 6425.0439453125,
"logits/rejected": 5042.47119140625,
"logps/chosen": -265.3039855957031,
"logps/rejected": -206.7879180908203,
"loss": 0.6924,
"rewards/accuracies": 0.6666666865348816,
"rewards/chosen": 0.32259494066238403,
"rewards/margins": 0.4108748435974121,
"rewards/rejected": -0.08827991783618927,
"step": 50
},
{
"epoch": 0.04709576138147567,
"grad_norm": 5.549546838003912,
"learning_rate": 2.3437499999999998e-07,
"logits/chosen": 5484.3701171875,
"logits/rejected": 4559.94970703125,
"logps/chosen": -213.7654266357422,
"logps/rejected": -209.1769256591797,
"loss": 0.6918,
"rewards/accuracies": 0.6666666865348816,
"rewards/chosen": 0.23852241039276123,
"rewards/margins": 0.6153593063354492,
"rewards/rejected": -0.3768369257450104,
"step": 60
},
{
"epoch": 0.054945054945054944,
"grad_norm": 5.3363089121065395,
"learning_rate": 2.734375e-07,
"logits/chosen": 5194.4814453125,
"logits/rejected": 4918.6982421875,
"logps/chosen": -178.33897399902344,
"logps/rejected": -177.4080047607422,
"loss": 0.6911,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": 0.14194367825984955,
"rewards/margins": 0.8182166218757629,
"rewards/rejected": -0.6762728691101074,
"step": 70
},
{
"epoch": 0.06279434850863422,
"grad_norm": 5.589990285987142,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 5774.57080078125,
"logits/rejected": 5269.759765625,
"logps/chosen": -196.86941528320312,
"logps/rejected": -183.05673217773438,
"loss": 0.6889,
"rewards/accuracies": 0.6333333253860474,
"rewards/chosen": -0.3008244037628174,
"rewards/margins": 1.1653871536254883,
"rewards/rejected": -1.4662115573883057,
"step": 80
},
{
"epoch": 0.0706436420722135,
"grad_norm": 6.3326385736616,
"learning_rate": 3.5156249999999997e-07,
"logits/chosen": 6037.1435546875,
"logits/rejected": 5178.23046875,
"logps/chosen": -220.5373077392578,
"logps/rejected": -191.18222045898438,
"loss": 0.6847,
"rewards/accuracies": 0.7583332657814026,
"rewards/chosen": -1.6551120281219482,
"rewards/margins": 5.083296298980713,
"rewards/rejected": -6.738408088684082,
"step": 90
},
{
"epoch": 0.07849293563579278,
"grad_norm": 7.037637671287219,
"learning_rate": 3.9062499999999997e-07,
"logits/chosen": 5949.7509765625,
"logits/rejected": 5727.8193359375,
"logps/chosen": -216.74551391601562,
"logps/rejected": -212.49813842773438,
"loss": 0.684,
"rewards/accuracies": 0.6583333611488342,
"rewards/chosen": -6.514804840087891,
"rewards/margins": 6.541618347167969,
"rewards/rejected": -13.056424140930176,
"step": 100
},
{
"epoch": 0.08634222919937205,
"grad_norm": 6.748245033085064,
"learning_rate": 4.2968749999999996e-07,
"logits/chosen": 6423.7314453125,
"logits/rejected": 5241.818359375,
"logps/chosen": -198.58094787597656,
"logps/rejected": -202.92428588867188,
"loss": 0.6778,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": -15.466090202331543,
"rewards/margins": 8.465994834899902,
"rewards/rejected": -23.932086944580078,
"step": 110
},
{
"epoch": 0.09419152276295134,
"grad_norm": 10.445551519317151,
"learning_rate": 4.6874999999999996e-07,
"logits/chosen": 6351.18212890625,
"logits/rejected": 5273.6953125,
"logps/chosen": -226.8378143310547,
"logps/rejected": -234.71200561523438,
"loss": 0.672,
"rewards/accuracies": 0.625,
"rewards/chosen": -24.001096725463867,
"rewards/margins": 12.545796394348145,
"rewards/rejected": -36.54689025878906,
"step": 120
},
{
"epoch": 0.10204081632653061,
"grad_norm": 7.973080880427375,
"learning_rate": 4.999962424962166e-07,
"logits/chosen": 6361.275390625,
"logits/rejected": 5860.3828125,
"logps/chosen": -239.5140838623047,
"logps/rejected": -246.1350555419922,
"loss": 0.6659,
"rewards/accuracies": 0.6833333373069763,
"rewards/chosen": -29.705577850341797,
"rewards/margins": 18.78491973876953,
"rewards/rejected": -48.490501403808594,
"step": 130
},
{
"epoch": 0.10989010989010989,
"grad_norm": 9.656148828714871,
"learning_rate": 4.998647417232375e-07,
"logits/chosen": 6324.2783203125,
"logits/rejected": 5568.7294921875,
"logps/chosen": -233.6736602783203,
"logps/rejected": -243.1141815185547,
"loss": 0.6643,
"rewards/accuracies": 0.6500000357627869,
"rewards/chosen": -48.00621795654297,
"rewards/margins": 17.51606559753418,
"rewards/rejected": -65.52229309082031,
"step": 140
},
{
"epoch": 0.11773940345368916,
"grad_norm": 12.68709283782261,
"learning_rate": 4.995454786965036e-07,
"logits/chosen": 6543.3310546875,
"logits/rejected": 5417.107421875,
"logps/chosen": -245.687744140625,
"logps/rejected": -238.84017944335938,
"loss": 0.6608,
"rewards/accuracies": 0.6833333373069763,
"rewards/chosen": -44.00338363647461,
"rewards/margins": 22.5390682220459,
"rewards/rejected": -66.5424575805664,
"step": 150
},
{
"epoch": 0.12558869701726844,
"grad_norm": 8.336675270819248,
"learning_rate": 4.990386933279972e-07,
"logits/chosen": 6377.17236328125,
"logits/rejected": 5662.22607421875,
"logps/chosen": -243.30667114257812,
"logps/rejected": -268.36431884765625,
"loss": 0.6573,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": -45.77846145629883,
"rewards/margins": 25.528247833251953,
"rewards/rejected": -71.30671691894531,
"step": 160
},
{
"epoch": 0.13343799058084774,
"grad_norm": 9.518393732214355,
"learning_rate": 4.983447664444096e-07,
"logits/chosen": 6593.54541015625,
"logits/rejected": 5842.90625,
"logps/chosen": -261.85162353515625,
"logps/rejected": -269.01934814453125,
"loss": 0.6572,
"rewards/accuracies": 0.6416667103767395,
"rewards/chosen": -52.23096466064453,
"rewards/margins": 20.466854095458984,
"rewards/rejected": -72.69782257080078,
"step": 170
},
{
"epoch": 0.141287284144427,
"grad_norm": 9.551095723742446,
"learning_rate": 4.97464219500968e-07,
"logits/chosen": 5738.0458984375,
"logits/rejected": 4965.66162109375,
"logps/chosen": -243.9608917236328,
"logps/rejected": -260.8826599121094,
"loss": 0.6523,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -55.4331169128418,
"rewards/margins": 27.288272857666016,
"rewards/rejected": -82.72139739990234,
"step": 180
},
{
"epoch": 0.14913657770800628,
"grad_norm": 9.729373391308025,
"learning_rate": 4.963977141895843e-07,
"logits/chosen": 5827.6171875,
"logits/rejected": 4940.27880859375,
"logps/chosen": -265.14093017578125,
"logps/rejected": -304.3471374511719,
"loss": 0.6414,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -61.3819694519043,
"rewards/margins": 51.72563934326172,
"rewards/rejected": -113.10758972167969,
"step": 190
},
{
"epoch": 0.15698587127158556,
"grad_norm": 12.708150524514142,
"learning_rate": 4.951460519416227e-07,
"logits/chosen": 5715.02197265625,
"logits/rejected": 5263.1376953125,
"logps/chosen": -255.45626831054688,
"logps/rejected": -308.5564880371094,
"loss": 0.6432,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -72.91200256347656,
"rewards/margins": 38.973487854003906,
"rewards/rejected": -111.8854751586914,
"step": 200
},
{
"epoch": 0.16483516483516483,
"grad_norm": 13.38335440738734,
"learning_rate": 4.937101733256606e-07,
"logits/chosen": 5213.759765625,
"logits/rejected": 4587.78564453125,
"logps/chosen": -221.2035675048828,
"logps/rejected": -255.7327423095703,
"loss": 0.645,
"rewards/accuracies": 0.6666666269302368,
"rewards/chosen": -67.2903060913086,
"rewards/margins": 31.2614688873291,
"rewards/rejected": -98.5517807006836,
"step": 210
},
{
"epoch": 0.1726844583987441,
"grad_norm": 13.23088850436655,
"learning_rate": 4.920911573406924e-07,
"logits/chosen": 6382.59326171875,
"logits/rejected": 5369.0458984375,
"logps/chosen": -254.50845336914062,
"logps/rejected": -261.02984619140625,
"loss": 0.6396,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -58.17182159423828,
"rewards/margins": 38.86132049560547,
"rewards/rejected": -97.03313446044922,
"step": 220
},
{
"epoch": 0.18053375196232338,
"grad_norm": 13.7002975170986,
"learning_rate": 4.902902206053098e-07,
"logits/chosen": 5725.72998046875,
"logits/rejected": 5134.1142578125,
"logps/chosen": -295.9504089355469,
"logps/rejected": -334.36383056640625,
"loss": 0.641,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": -108.6116714477539,
"rewards/margins": 45.984703063964844,
"rewards/rejected": -154.59637451171875,
"step": 230
},
{
"epoch": 0.18838304552590268,
"grad_norm": 11.470848035692125,
"learning_rate": 4.883087164434672e-07,
"logits/chosen": 5131.9599609375,
"logits/rejected": 4044.186279296875,
"logps/chosen": -262.76409912109375,
"logps/rejected": -300.7772521972656,
"loss": 0.63,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -95.13509368896484,
"rewards/margins": 50.95269012451172,
"rewards/rejected": -146.08779907226562,
"step": 240
},
{
"epoch": 0.19623233908948196,
"grad_norm": 16.695171401268386,
"learning_rate": 4.861481338675183e-07,
"logits/chosen": 6127.5693359375,
"logits/rejected": 5402.2724609375,
"logps/chosen": -271.9035949707031,
"logps/rejected": -345.59136962890625,
"loss": 0.6424,
"rewards/accuracies": 0.7083333134651184,
"rewards/chosen": -104.75971984863281,
"rewards/margins": 56.324981689453125,
"rewards/rejected": -161.08468627929688,
"step": 250
},
{
"epoch": 0.20408163265306123,
"grad_norm": 15.604975772107185,
"learning_rate": 4.838100964592904e-07,
"logits/chosen": 6368.3525390625,
"logits/rejected": 5054.92529296875,
"logps/chosen": -287.34759521484375,
"logps/rejected": -301.80914306640625,
"loss": 0.6438,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -87.629150390625,
"rewards/margins": 47.95604705810547,
"rewards/rejected": -135.585205078125,
"step": 260
},
{
"epoch": 0.2119309262166405,
"grad_norm": 12.014162614621322,
"learning_rate": 4.812963611500339e-07,
"logits/chosen": 6167.998046875,
"logits/rejected": 5935.1181640625,
"logps/chosen": -288.6238708496094,
"logps/rejected": -318.166015625,
"loss": 0.6315,
"rewards/accuracies": 0.7166667580604553,
"rewards/chosen": -91.86804962158203,
"rewards/margins": 37.66888427734375,
"rewards/rejected": -129.5369415283203,
"step": 270
},
{
"epoch": 0.21978021978021978,
"grad_norm": 25.285219401191153,
"learning_rate": 4.786088169001671e-07,
"logits/chosen": 5262.55029296875,
"logits/rejected": 4536.3759765625,
"logps/chosen": -249.36483764648438,
"logps/rejected": -323.6359558105469,
"loss": 0.6274,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -86.21394348144531,
"rewards/margins": 68.12052917480469,
"rewards/rejected": -154.33448791503906,
"step": 280
},
{
"epoch": 0.22762951334379905,
"grad_norm": 21.48499040413718,
"learning_rate": 4.7574948327980567e-07,
"logits/chosen": 7437.4541015625,
"logits/rejected": 5381.4013671875,
"logps/chosen": -332.65570068359375,
"logps/rejected": -372.79583740234375,
"loss": 0.6225,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -95.62727355957031,
"rewards/margins": 94.65281677246094,
"rewards/rejected": -190.28009033203125,
"step": 290
},
{
"epoch": 0.23547880690737832,
"grad_norm": 14.64094770923456,
"learning_rate": 4.727205089511466e-07,
"logits/chosen": 5407.33056640625,
"logits/rejected": 5314.2958984375,
"logps/chosen": -247.26754760742188,
"logps/rejected": -310.8182678222656,
"loss": 0.6323,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -79.79512023925781,
"rewards/margins": 60.83668899536133,
"rewards/rejected": -140.63180541992188,
"step": 300
},
{
"epoch": 0.24332810047095763,
"grad_norm": 15.88030348707004,
"learning_rate": 4.6952417005384247e-07,
"logits/chosen": 6043.89404296875,
"logits/rejected": 5373.5927734375,
"logps/chosen": -264.2408752441406,
"logps/rejected": -301.0606994628906,
"loss": 0.6422,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -86.66017150878906,
"rewards/margins": 38.932220458984375,
"rewards/rejected": -125.5923843383789,
"step": 310
},
{
"epoch": 0.25117739403453687,
"grad_norm": 12.942810394852572,
"learning_rate": 4.661628684945851e-07,
"logits/chosen": 5981.30615234375,
"logits/rejected": 5134.4638671875,
"logps/chosen": -321.04461669921875,
"logps/rejected": -394.8025817871094,
"loss": 0.6322,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -119.98759460449219,
"rewards/margins": 72.79707336425781,
"rewards/rejected": -192.78469848632812,
"step": 320
},
{
"epoch": 0.25902668759811615,
"grad_norm": 17.65657239960532,
"learning_rate": 4.626391301421782e-07,
"logits/chosen": 5767.1728515625,
"logits/rejected": 5235.9375,
"logps/chosen": -319.49798583984375,
"logps/rejected": -344.1056213378906,
"loss": 0.6455,
"rewards/accuracies": 0.7416667342185974,
"rewards/chosen": -125.0447769165039,
"rewards/margins": 41.73804473876953,
"rewards/rejected": -166.78280639648438,
"step": 330
},
{
"epoch": 0.2668759811616955,
"grad_norm": 10.256221501911574,
"learning_rate": 4.5895560292945996e-07,
"logits/chosen": 6147.9560546875,
"logits/rejected": 6276.4072265625,
"logps/chosen": -274.11529541015625,
"logps/rejected": -340.8870849609375,
"loss": 0.651,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -80.25703430175781,
"rewards/margins": 40.26253128051758,
"rewards/rejected": -120.51956939697266,
"step": 340
},
{
"epoch": 0.27472527472527475,
"grad_norm": 23.69669876872275,
"learning_rate": 4.5511505486349865e-07,
"logits/chosen": 6537.46337890625,
"logits/rejected": 5889.2177734375,
"logps/chosen": -257.62890625,
"logps/rejected": -332.9743957519531,
"loss": 0.6256,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -62.803199768066406,
"rewards/margins": 63.349143981933594,
"rewards/rejected": -126.15234375,
"step": 350
},
{
"epoch": 0.282574568288854,
"grad_norm": 16.915648438332628,
"learning_rate": 4.5112037194555876e-07,
"logits/chosen": 6069.42578125,
"logits/rejected": 5893.24951171875,
"logps/chosen": -293.303955078125,
"logps/rejected": -398.67724609375,
"loss": 0.6222,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -115.38047790527344,
"rewards/margins": 84.01496887207031,
"rewards/rejected": -199.39544677734375,
"step": 360
},
{
"epoch": 0.2904238618524333,
"grad_norm": 12.555265198666554,
"learning_rate": 4.4697455600239863e-07,
"logits/chosen": 5456.2978515625,
"logits/rejected": 5073.7060546875,
"logps/chosen": -296.23944091796875,
"logps/rejected": -330.85284423828125,
"loss": 0.6434,
"rewards/accuracies": 0.7083333134651184,
"rewards/chosen": -111.81736755371094,
"rewards/margins": 50.96443176269531,
"rewards/rejected": -162.78179931640625,
"step": 370
},
{
"epoch": 0.29827315541601257,
"grad_norm": 14.362943259943494,
"learning_rate": 4.426807224305315e-07,
"logits/chosen": 6574.45458984375,
"logits/rejected": 5350.2958984375,
"logps/chosen": -315.9097595214844,
"logps/rejected": -338.9637451171875,
"loss": 0.6267,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -87.56874084472656,
"rewards/margins": 68.38545227050781,
"rewards/rejected": -155.95419311523438,
"step": 380
},
{
"epoch": 0.30612244897959184,
"grad_norm": 17.276156747689946,
"learning_rate": 4.3824209785514326e-07,
"logits/chosen": 6646.9892578125,
"logits/rejected": 5010.45947265625,
"logps/chosen": -303.3409118652344,
"logps/rejected": -347.64373779296875,
"loss": 0.6345,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -87.84745788574219,
"rewards/margins": 79.94316101074219,
"rewards/rejected": -167.79061889648438,
"step": 390
},
{
"epoch": 0.3139717425431711,
"grad_norm": 16.15627046135233,
"learning_rate": 4.3366201770542687e-07,
"logits/chosen": 5679.2353515625,
"logits/rejected": 5516.23583984375,
"logps/chosen": -289.3190612792969,
"logps/rejected": -350.9610900878906,
"loss": 0.637,
"rewards/accuracies": 0.75,
"rewards/chosen": -98.53050994873047,
"rewards/margins": 63.691078186035156,
"rewards/rejected": -162.22158813476562,
"step": 400
},
{
"epoch": 0.3218210361067504,
"grad_norm": 25.935620681417305,
"learning_rate": 4.2894392370815567e-07,
"logits/chosen": 6088.6064453125,
"logits/rejected": 5395.84912109375,
"logps/chosen": -339.7603759765625,
"logps/rejected": -411.3926696777344,
"loss": 0.6019,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -127.27352142333984,
"rewards/margins": 75.66678619384766,
"rewards/rejected": -202.9403076171875,
"step": 410
},
{
"epoch": 0.32967032967032966,
"grad_norm": 21.279196743760856,
"learning_rate": 4.2409136130137845e-07,
"logits/chosen": 5789.5654296875,
"logits/rejected": 5153.6044921875,
"logps/chosen": -319.99505615234375,
"logps/rejected": -372.98162841796875,
"loss": 0.6269,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -115.96516418457031,
"rewards/margins": 75.77505493164062,
"rewards/rejected": -191.74020385742188,
"step": 420
},
{
"epoch": 0.33751962323390894,
"grad_norm": 14.662428194905122,
"learning_rate": 4.1910797697018017e-07,
"logits/chosen": 5631.46337890625,
"logits/rejected": 4653.8154296875,
"logps/chosen": -277.2632141113281,
"logps/rejected": -342.4476623535156,
"loss": 0.624,
"rewards/accuracies": 0.8583332896232605,
"rewards/chosen": -97.5182113647461,
"rewards/margins": 83.69635772705078,
"rewards/rejected": -181.21456909179688,
"step": 430
},
{
"epoch": 0.3453689167974882,
"grad_norm": 20.509381423565515,
"learning_rate": 4.1399751550651084e-07,
"logits/chosen": 5983.00341796875,
"logits/rejected": 5909.9189453125,
"logps/chosen": -294.4239807128906,
"logps/rejected": -366.37548828125,
"loss": 0.6294,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -112.0306396484375,
"rewards/margins": 62.66124725341797,
"rewards/rejected": -174.69189453125,
"step": 440
},
{
"epoch": 0.3532182103610675,
"grad_norm": 11.57774703743698,
"learning_rate": 4.087638171951401e-07,
"logits/chosen": 6951.85302734375,
"logits/rejected": 4954.27978515625,
"logps/chosen": -307.88006591796875,
"logps/rejected": -344.5763244628906,
"loss": 0.6245,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -99.11131286621094,
"rewards/margins": 82.47236633300781,
"rewards/rejected": -181.5836639404297,
"step": 450
},
{
"epoch": 0.36106750392464676,
"grad_norm": 10.747587473405163,
"learning_rate": 4.034108149278543e-07,
"logits/chosen": 7119.5966796875,
"logits/rejected": 5446.58544921875,
"logps/chosen": -365.51141357421875,
"logps/rejected": -376.9080810546875,
"loss": 0.6136,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -115.07914733886719,
"rewards/margins": 71.62091064453125,
"rewards/rejected": -186.7000732421875,
"step": 460
},
{
"epoch": 0.36891679748822603,
"grad_norm": 18.39563106747747,
"learning_rate": 3.979425312480629e-07,
"logits/chosen": 6053.51171875,
"logits/rejected": 5261.99365234375,
"logps/chosen": -349.2318420410156,
"logps/rejected": -409.2492370605469,
"loss": 0.634,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -136.96707153320312,
"rewards/margins": 69.71588134765625,
"rewards/rejected": -206.68295288085938,
"step": 470
},
{
"epoch": 0.37676609105180536,
"grad_norm": 9.88717064834881,
"learning_rate": 3.923630753280357e-07,
"logits/chosen": 6531.0361328125,
"logits/rejected": 5563.5673828125,
"logps/chosen": -304.6759338378906,
"logps/rejected": -353.3858642578125,
"loss": 0.6225,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -97.28072357177734,
"rewards/margins": 81.7093734741211,
"rewards/rejected": -178.9901123046875,
"step": 480
},
{
"epoch": 0.38461538461538464,
"grad_norm": 18.872015793003488,
"learning_rate": 3.866766398810424e-07,
"logits/chosen": 6090.294921875,
"logits/rejected": 5757.2998046875,
"logps/chosen": -270.914794921875,
"logps/rejected": -388.12066650390625,
"loss": 0.6047,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -94.30628967285156,
"rewards/margins": 91.7900619506836,
"rewards/rejected": -186.0963592529297,
"step": 490
},
{
"epoch": 0.3924646781789639,
"grad_norm": 19.921576157729877,
"learning_rate": 3.8088749801071496e-07,
"logits/chosen": 6461.3857421875,
"logits/rejected": 4896.03759765625,
"logps/chosen": -404.9927673339844,
"logps/rejected": -488.85992431640625,
"loss": 0.6363,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -183.0391082763672,
"rewards/margins": 103.30455017089844,
"rewards/rejected": -286.3436584472656,
"step": 500
},
{
"epoch": 0.4003139717425432,
"grad_norm": 14.976740215989906,
"learning_rate": 3.75e-07,
"logits/chosen": 5273.30029296875,
"logits/rejected": 4571.634765625,
"logps/chosen": -316.82159423828125,
"logps/rejected": -388.461669921875,
"loss": 0.6164,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -127.3386001586914,
"rewards/margins": 94.75777435302734,
"rewards/rejected": -222.0963897705078,
"step": 510
},
{
"epoch": 0.40816326530612246,
"grad_norm": 15.310486602213532,
"learning_rate": 3.6901857004211443e-07,
"logits/chosen": 5530.29443359375,
"logits/rejected": 5099.7265625,
"logps/chosen": -315.60919189453125,
"logps/rejected": -381.65789794921875,
"loss": 0.6418,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -117.1432113647461,
"rewards/margins": 68.7266845703125,
"rewards/rejected": -185.86990356445312,
"step": 520
},
{
"epoch": 0.41601255886970173,
"grad_norm": 14.08983541941681,
"learning_rate": 3.6294770291596076e-07,
"logits/chosen": 6285.1962890625,
"logits/rejected": 5097.83935546875,
"logps/chosen": -322.7880554199219,
"logps/rejected": -361.3253479003906,
"loss": 0.6168,
"rewards/accuracies": 0.6916666030883789,
"rewards/chosen": -109.86531829833984,
"rewards/margins": 52.69968795776367,
"rewards/rejected": -162.5650177001953,
"step": 530
},
{
"epoch": 0.423861852433281,
"grad_norm": 20.022842230444766,
"learning_rate": 3.5679196060850034e-07,
"logits/chosen": 5982.83935546875,
"logits/rejected": 5300.0537109375,
"logps/chosen": -333.60455322265625,
"logps/rejected": -389.5378723144531,
"loss": 0.6173,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -122.7867431640625,
"rewards/margins": 78.51233673095703,
"rewards/rejected": -201.29908752441406,
"step": 540
},
{
"epoch": 0.4317111459968603,
"grad_norm": 24.653995851898625,
"learning_rate": 3.505559688866229e-07,
"logits/chosen": 5769.158203125,
"logits/rejected": 5321.18359375,
"logps/chosen": -375.0007019042969,
"logps/rejected": -469.31951904296875,
"loss": 0.6294,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -160.26025390625,
"rewards/margins": 83.9951171875,
"rewards/rejected": -244.25537109375,
"step": 550
},
{
"epoch": 0.43956043956043955,
"grad_norm": 17.972565211019212,
"learning_rate": 3.4424441382108826e-07,
"logits/chosen": 5792.72998046875,
"logits/rejected": 5407.20068359375,
"logps/chosen": -351.23291015625,
"logps/rejected": -403.1977233886719,
"loss": 0.6269,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -147.9576416015625,
"rewards/margins": 62.0308952331543,
"rewards/rejected": -209.98855590820312,
"step": 560
},
{
"epoch": 0.4474097331240188,
"grad_norm": 18.423331599938237,
"learning_rate": 3.378620382651523e-07,
"logits/chosen": 6100.2392578125,
"logits/rejected": 5616.0419921875,
"logps/chosen": -368.9530334472656,
"logps/rejected": -418.500732421875,
"loss": 0.6102,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -125.99314880371094,
"rewards/margins": 68.73707580566406,
"rewards/rejected": -194.730224609375,
"step": 570
},
{
"epoch": 0.4552590266875981,
"grad_norm": 40.58588385026,
"learning_rate": 3.314136382905234e-07,
"logits/chosen": 6104.7314453125,
"logits/rejected": 5396.8330078125,
"logps/chosen": -345.4382629394531,
"logps/rejected": -464.142578125,
"loss": 0.6198,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -133.0227813720703,
"rewards/margins": 117.80845642089844,
"rewards/rejected": -250.8312530517578,
"step": 580
},
{
"epoch": 0.4631083202511774,
"grad_norm": 21.906951775528675,
"learning_rate": 3.249040595833274e-07,
"logits/chosen": 6578.75537109375,
"logits/rejected": 5453.22900390625,
"logps/chosen": -414.60174560546875,
"logps/rejected": -468.26995849609375,
"loss": 0.6167,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -182.8759307861328,
"rewards/margins": 106.53523254394531,
"rewards/rejected": -289.41119384765625,
"step": 590
},
{
"epoch": 0.47095761381475665,
"grad_norm": 22.906733288517135,
"learning_rate": 3.1833819380279023e-07,
"logits/chosen": 6241.1650390625,
"logits/rejected": 5260.3125,
"logps/chosen": -361.357666015625,
"logps/rejected": -456.5220642089844,
"loss": 0.6132,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -183.8093719482422,
"rewards/margins": 83.0264663696289,
"rewards/rejected": -266.8358154296875,
"step": 600
},
{
"epoch": 0.478806907378336,
"grad_norm": 12.469826515590201,
"learning_rate": 3.11720974905373e-07,
"logits/chosen": 6012.91552734375,
"logits/rejected": 5177.1376953125,
"logps/chosen": -345.945068359375,
"logps/rejected": -407.5060729980469,
"loss": 0.6138,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -139.82691955566406,
"rewards/margins": 82.2566146850586,
"rewards/rejected": -222.0835418701172,
"step": 610
},
{
"epoch": 0.48665620094191525,
"grad_norm": 13.764094508128613,
"learning_rate": 3.0505737543712275e-07,
"logits/chosen": 5078.953125,
"logits/rejected": 4125.95361328125,
"logps/chosen": -322.87664794921875,
"logps/rejected": -369.62127685546875,
"loss": 0.6162,
"rewards/accuracies": 0.75,
"rewards/chosen": -139.11798095703125,
"rewards/margins": 69.40438079833984,
"rewards/rejected": -208.52236938476562,
"step": 620
},
{
"epoch": 0.4945054945054945,
"grad_norm": 27.054436253186108,
"learning_rate": 2.9835240279702513e-07,
"logits/chosen": 6608.65478515625,
"logits/rejected": 5554.52587890625,
"logps/chosen": -362.4518127441406,
"logps/rejected": -418.16998291015625,
"loss": 0.6106,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -119.63581848144531,
"rewards/margins": 104.2861328125,
"rewards/rejected": -223.9219512939453,
"step": 630
},
{
"epoch": 0.5023547880690737,
"grad_norm": 17.671754927072275,
"learning_rate": 2.9161109547416667e-07,
"logits/chosen": 6268.4951171875,
"logits/rejected": 5317.8037109375,
"logps/chosen": -364.1328125,
"logps/rejected": -429.2177734375,
"loss": 0.6135,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -153.37124633789062,
"rewards/margins": 61.33759689331055,
"rewards/rejected": -214.70883178710938,
"step": 640
},
{
"epoch": 0.5102040816326531,
"grad_norm": 14.137157591920442,
"learning_rate": 2.848385192615339e-07,
"logits/chosen": 5331.8037109375,
"logits/rejected": 4288.7294921875,
"logps/chosen": -347.09747314453125,
"logps/rejected": -396.7239074707031,
"loss": 0.6211,
"rewards/accuracies": 0.8000000715255737,
"rewards/chosen": -150.75814819335938,
"rewards/margins": 77.23800659179688,
"rewards/rejected": -227.9961395263672,
"step": 650
},
{
"epoch": 0.5180533751962323,
"grad_norm": 16.88587503102184,
"learning_rate": 2.780397634492949e-07,
"logits/chosen": 5999.3125,
"logits/rejected": 4691.3330078125,
"logps/chosen": -358.98828125,
"logps/rejected": -429.1502380371094,
"loss": 0.6216,
"rewards/accuracies": 0.8166666030883789,
"rewards/chosen": -141.60177612304688,
"rewards/margins": 99.60104370117188,
"rewards/rejected": -241.2028045654297,
"step": 660
},
{
"epoch": 0.5259026687598116,
"grad_norm": 22.96076824322748,
"learning_rate": 2.71219937000424e-07,
"logits/chosen": 6003.61962890625,
"logits/rejected": 4884.7255859375,
"logps/chosen": -345.4893493652344,
"logps/rejected": -394.2846374511719,
"loss": 0.6262,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -138.72991943359375,
"rewards/margins": 66.7003402709961,
"rewards/rejected": -205.4302520751953,
"step": 670
},
{
"epoch": 0.533751962323391,
"grad_norm": 22.813069309884035,
"learning_rate": 2.6438416471154273e-07,
"logits/chosen": 5843.70458984375,
"logits/rejected": 4834.39599609375,
"logps/chosen": -359.9562683105469,
"logps/rejected": -401.135498046875,
"loss": 0.6166,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -146.29505920410156,
"rewards/margins": 75.61378479003906,
"rewards/rejected": -221.9088592529297,
"step": 680
},
{
"epoch": 0.5416012558869702,
"grad_norm": 18.765764198829793,
"learning_rate": 2.5753758336186326e-07,
"logits/chosen": 5820.8349609375,
"logits/rejected": 5283.55078125,
"logps/chosen": -351.8568420410156,
"logps/rejected": -444.1844787597656,
"loss": 0.6021,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -142.75038146972656,
"rewards/margins": 85.23744201660156,
"rewards/rejected": -227.9878387451172,
"step": 690
},
{
"epoch": 0.5494505494505495,
"grad_norm": 14.748661687164306,
"learning_rate": 2.5068533785312666e-07,
"logits/chosen": 5523.3955078125,
"logits/rejected": 5268.107421875,
"logps/chosen": -343.63934326171875,
"logps/rejected": -420.27886962890625,
"loss": 0.6369,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -154.3790283203125,
"rewards/margins": 78.92584991455078,
"rewards/rejected": -233.3048553466797,
"step": 700
},
{
"epoch": 0.5572998430141287,
"grad_norm": 15.464054804569159,
"learning_rate": 2.4383257734343794e-07,
"logits/chosen": 5437.234375,
"logits/rejected": 5426.69677734375,
"logps/chosen": -344.1163330078125,
"logps/rejected": -430.07904052734375,
"loss": 0.608,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -150.20494079589844,
"rewards/margins": 77.2534408569336,
"rewards/rejected": -227.4583740234375,
"step": 710
},
{
"epoch": 0.565149136577708,
"grad_norm": 18.732066069845217,
"learning_rate": 2.3698445137790258e-07,
"logits/chosen": 5845.06640625,
"logits/rejected": 4940.61865234375,
"logps/chosen": -361.6242370605469,
"logps/rejected": -430.0552673339844,
"loss": 0.6127,
"rewards/accuracies": 0.75,
"rewards/chosen": -142.62103271484375,
"rewards/margins": 87.6690902709961,
"rewards/rejected": -230.2901153564453,
"step": 720
},
{
"epoch": 0.5729984301412873,
"grad_norm": 17.14795101097142,
"learning_rate": 2.3014610601897157e-07,
"logits/chosen": 6328.41552734375,
"logits/rejected": 4775.5185546875,
"logps/chosen": -369.47235107421875,
"logps/rejected": -404.1590881347656,
"loss": 0.6148,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -144.32643127441406,
"rewards/margins": 84.75894165039062,
"rewards/rejected": -229.08535766601562,
"step": 730
},
{
"epoch": 0.5808477237048666,
"grad_norm": 13.832807015732437,
"learning_rate": 2.2332267997940513e-07,
"logits/chosen": 5258.61572265625,
"logits/rejected": 4415.40576171875,
"logps/chosen": -324.30242919921875,
"logps/rejected": -375.59405517578125,
"loss": 0.6053,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -131.87802124023438,
"rewards/margins": 81.06999206542969,
"rewards/rejected": -212.947998046875,
"step": 740
},
{
"epoch": 0.5886970172684458,
"grad_norm": 15.482096718237434,
"learning_rate": 2.1651930076075723e-07,
"logits/chosen": 5748.50830078125,
"logits/rejected": 5138.494140625,
"logps/chosen": -329.36572265625,
"logps/rejected": -381.31109619140625,
"loss": 0.6253,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -148.93975830078125,
"rewards/margins": 65.27706909179688,
"rewards/rejected": -214.21682739257812,
"step": 750
},
{
"epoch": 0.5965463108320251,
"grad_norm": 16.50457423188824,
"learning_rate": 2.0974108080028692e-07,
"logits/chosen": 6050.1806640625,
"logits/rejected": 4695.55126953125,
"logps/chosen": -332.60772705078125,
"logps/rejected": -383.9396057128906,
"loss": 0.615,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -129.35684204101562,
"rewards/margins": 80.0522232055664,
"rewards/rejected": -209.4090576171875,
"step": 760
},
{
"epoch": 0.6043956043956044,
"grad_norm": 16.19292968682205,
"learning_rate": 2.0299311362918773e-07,
"logits/chosen": 6270.7763671875,
"logits/rejected": 5331.49755859375,
"logps/chosen": -374.3070373535156,
"logps/rejected": -449.4652404785156,
"loss": 0.6274,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -144.57337951660156,
"rewards/margins": 81.0317153930664,
"rewards/rejected": -225.60513305664062,
"step": 770
},
{
"epoch": 0.6122448979591837,
"grad_norm": 13.854569491977498,
"learning_rate": 1.962804700450265e-07,
"logits/chosen": 6101.94970703125,
"logits/rejected": 5776.4658203125,
"logps/chosen": -351.2977294921875,
"logps/rejected": -441.8768005371094,
"loss": 0.6162,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -135.1923828125,
"rewards/margins": 69.29673767089844,
"rewards/rejected": -204.4891357421875,
"step": 780
},
{
"epoch": 0.6200941915227629,
"grad_norm": 26.357276507505432,
"learning_rate": 1.8960819430126334e-07,
"logits/chosen": 5692.7373046875,
"logits/rejected": 4988.34423828125,
"logps/chosen": -352.75152587890625,
"logps/rejected": -436.21380615234375,
"loss": 0.606,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -154.62054443359375,
"rewards/margins": 92.99554443359375,
"rewards/rejected": -247.61605834960938,
"step": 790
},
{
"epoch": 0.6279434850863422,
"grad_norm": 26.128991277323944,
"learning_rate": 1.8298130031671972e-07,
"logits/chosen": 5666.7470703125,
"logits/rejected": 4928.1630859375,
"logps/chosen": -374.0252380371094,
"logps/rejected": -446.89398193359375,
"loss": 0.6227,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -157.30239868164062,
"rewards/margins": 76.89336395263672,
"rewards/rejected": -234.1957550048828,
"step": 800
},
{
"epoch": 0.6357927786499215,
"grad_norm": 21.20668683192694,
"learning_rate": 1.7640476790784075e-07,
"logits/chosen": 5246.98876953125,
"logits/rejected": 4623.890625,
"logps/chosen": -351.83685302734375,
"logps/rejected": -468.95904541015625,
"loss": 0.6118,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -149.3814239501953,
"rewards/margins": 98.28030395507812,
"rewards/rejected": -247.6617431640625,
"step": 810
},
{
"epoch": 0.6436420722135008,
"grad_norm": 20.646225897051636,
"learning_rate": 1.6988353904658492e-07,
"logits/chosen": 5737.23974609375,
"logits/rejected": 4359.0126953125,
"logps/chosen": -371.08319091796875,
"logps/rejected": -399.9681396484375,
"loss": 0.6117,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -149.86865234375,
"rewards/margins": 81.41114807128906,
"rewards/rejected": -231.27981567382812,
"step": 820
},
{
"epoch": 0.6514913657770801,
"grad_norm": 39.77030169382563,
"learning_rate": 1.634225141467513e-07,
"logits/chosen": 5644.4755859375,
"logits/rejected": 4988.80859375,
"logps/chosen": -366.618408203125,
"logps/rejected": -447.57489013671875,
"loss": 0.6127,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -162.43235778808594,
"rewards/margins": 92.68769836425781,
"rewards/rejected": -255.1200408935547,
"step": 830
},
{
"epoch": 0.6593406593406593,
"grad_norm": 13.895052837155353,
"learning_rate": 1.570265483815364e-07,
"logits/chosen": 6220.3994140625,
"logits/rejected": 5031.82080078125,
"logps/chosen": -372.7491760253906,
"logps/rejected": -434.67779541015625,
"loss": 0.6189,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -145.41937255859375,
"rewards/margins": 76.46521759033203,
"rewards/rejected": -221.8845977783203,
"step": 840
},
{
"epoch": 0.6671899529042387,
"grad_norm": 12.774810316086143,
"learning_rate": 1.5070044803508691e-07,
"logits/chosen": 5741.9931640625,
"logits/rejected": 5091.0048828125,
"logps/chosen": -347.2725524902344,
"logps/rejected": -424.1080017089844,
"loss": 0.5974,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -131.7950897216797,
"rewards/margins": 91.85758209228516,
"rewards/rejected": -223.6526641845703,
"step": 850
},
{
"epoch": 0.6750392464678179,
"grad_norm": 17.871490937493935,
"learning_rate": 1.444489668907914e-07,
"logits/chosen": 6167.5400390625,
"logits/rejected": 5200.32568359375,
"logps/chosen": -374.5239562988281,
"logps/rejected": -405.5271301269531,
"loss": 0.6325,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -126.7532958984375,
"rewards/margins": 76.09200286865234,
"rewards/rejected": -202.84530639648438,
"step": 860
},
{
"epoch": 0.6828885400313972,
"grad_norm": 17.06846349127134,
"learning_rate": 1.3827680265902232e-07,
"logits/chosen": 6157.00634765625,
"logits/rejected": 5048.666015625,
"logps/chosen": -344.4704895019531,
"logps/rejected": -391.15289306640625,
"loss": 0.6177,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -114.37376403808594,
"rewards/margins": 76.09016418457031,
"rewards/rejected": -190.4639434814453,
"step": 870
},
{
"epoch": 0.6907378335949764,
"grad_norm": 15.528467832759812,
"learning_rate": 1.3218859344701632e-07,
"logits/chosen": 5415.63671875,
"logits/rejected": 5133.5498046875,
"logps/chosen": -312.1150817871094,
"logps/rejected": -407.8897705078125,
"loss": 0.6164,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -101.6041259765625,
"rewards/margins": 74.69647216796875,
"rewards/rejected": -176.3006134033203,
"step": 880
},
{
"epoch": 0.6985871271585558,
"grad_norm": 17.183055961017374,
"learning_rate": 1.2618891427354172e-07,
"logits/chosen": 6400.65478515625,
"logits/rejected": 5158.58837890625,
"logps/chosen": -360.47662353515625,
"logps/rejected": -399.84051513671875,
"loss": 0.6248,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -102.88056945800781,
"rewards/margins": 85.1794204711914,
"rewards/rejected": -188.0599822998047,
"step": 890
},
{
"epoch": 0.706436420722135,
"grad_norm": 14.954999382868223,
"learning_rate": 1.202822736309758e-07,
"logits/chosen": 5437.9140625,
"logits/rejected": 4952.78515625,
"logps/chosen": -315.7103576660156,
"logps/rejected": -405.45086669921875,
"loss": 0.6149,
"rewards/accuracies": 0.75,
"rewards/chosen": -111.3611831665039,
"rewards/margins": 83.65962982177734,
"rewards/rejected": -195.02081298828125,
"step": 900
},
{
"epoch": 0.7142857142857143,
"grad_norm": 17.482836351512447,
"learning_rate": 1.1447311009737299e-07,
"logits/chosen": 5352.5107421875,
"logits/rejected": 5015.8212890625,
"logps/chosen": -335.0443420410156,
"logps/rejected": -422.0838317871094,
"loss": 0.6238,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -126.8180160522461,
"rewards/margins": 87.3373031616211,
"rewards/rejected": -214.1553497314453,
"step": 910
},
{
"epoch": 0.7221350078492935,
"grad_norm": 14.726061760391564,
"learning_rate": 1.0876578900107053e-07,
"logits/chosen": 5908.712890625,
"logits/rejected": 4864.37109375,
"logps/chosen": -364.682861328125,
"logps/rejected": -407.4881591796875,
"loss": 0.6111,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": -132.26129150390625,
"rewards/margins": 77.08457946777344,
"rewards/rejected": -209.34585571289062,
"step": 920
},
{
"epoch": 0.7299843014128728,
"grad_norm": 12.17491356029005,
"learning_rate": 1.0316459914033793e-07,
"logits/chosen": 5847.6396484375,
"logits/rejected": 4304.47998046875,
"logps/chosen": -374.629638671875,
"logps/rejected": -411.969482421875,
"loss": 0.6084,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -138.6407012939453,
"rewards/margins": 87.40455627441406,
"rewards/rejected": -226.04525756835938,
"step": 930
},
{
"epoch": 0.7378335949764521,
"grad_norm": 14.942393863065405,
"learning_rate": 9.767374956053584e-08,
"logits/chosen": 5655.4033203125,
"logits/rejected": 4892.77978515625,
"logps/chosen": -362.05084228515625,
"logps/rejected": -447.89691162109375,
"loss": 0.616,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -143.75718688964844,
"rewards/margins": 99.15988159179688,
"rewards/rejected": -242.91708374023438,
"step": 940
},
{
"epoch": 0.7456828885400314,
"grad_norm": 15.68275244303951,
"learning_rate": 9.229736639120561e-08,
"logits/chosen": 5820.70068359375,
"logits/rejected": 5320.892578125,
"logps/chosen": -365.70635986328125,
"logps/rejected": -428.5652770996094,
"loss": 0.631,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -147.90518188476562,
"rewards/margins": 66.50559997558594,
"rewards/rejected": -214.4107666015625,
"step": 950
},
{
"epoch": 0.7535321821036107,
"grad_norm": 19.23996720411505,
"learning_rate": 8.70394897454659e-08,
"logits/chosen": 5713.681640625,
"logits/rejected": 5020.9267578125,
"logps/chosen": -342.4471130371094,
"logps/rejected": -417.79901123046875,
"loss": 0.6123,
"rewards/accuracies": 0.8333333730697632,
"rewards/chosen": -124.01869201660156,
"rewards/margins": 91.303955078125,
"rewards/rejected": -215.32266235351562,
"step": 960
},
{
"epoch": 0.7613814756671899,
"grad_norm": 22.33397911127439,
"learning_rate": 8.19040706840472e-08,
"logits/chosen": 5806.43359375,
"logits/rejected": 4807.3515625,
"logps/chosen": -376.2249755859375,
"logps/rejected": -434.6560974121094,
"loss": 0.6119,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -136.00497436523438,
"rewards/margins": 91.96058654785156,
"rewards/rejected": -227.965576171875,
"step": 970
},
{
"epoch": 0.7692307692307693,
"grad_norm": 13.761008157112903,
"learning_rate": 7.689496824624525e-08,
"logits/chosen": 5534.2548828125,
"logits/rejected": 4398.90087890625,
"logps/chosen": -362.1374816894531,
"logps/rejected": -422.9102478027344,
"loss": 0.6029,
"rewards/accuracies": 0.8083332777023315,
"rewards/chosen": -133.32089233398438,
"rewards/margins": 99.09576416015625,
"rewards/rejected": -232.4166717529297,
"step": 980
},
{
"epoch": 0.7770800627943485,
"grad_norm": 18.096681200370565,
"learning_rate": 7.201594655002458e-08,
"logits/chosen": 5845.0439453125,
"logits/rejected": 4824.6376953125,
"logps/chosen": -375.0849914550781,
"logps/rejected": -435.990478515625,
"loss": 0.599,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -149.19004821777344,
"rewards/margins": 92.73072814941406,
"rewards/rejected": -241.92074584960938,
"step": 990
},
{
"epoch": 0.7849293563579278,
"grad_norm": 19.80400883141873,
"learning_rate": 6.727067196345099e-08,
"logits/chosen": 5522.568359375,
"logits/rejected": 4610.0341796875,
"logps/chosen": -366.5689392089844,
"logps/rejected": -410.90081787109375,
"loss": 0.6154,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -152.8131103515625,
"rewards/margins": 77.74247741699219,
"rewards/rejected": -230.55557250976562,
"step": 1000
},
{
"epoch": 0.792778649921507,
"grad_norm": 19.909366846280882,
"learning_rate": 6.26627103495786e-08,
"logits/chosen": 5702.2314453125,
"logits/rejected": 4692.59130859375,
"logps/chosen": -357.6479187011719,
"logps/rejected": -417.7315368652344,
"loss": 0.6253,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -142.938720703125,
"rewards/margins": 83.35922241210938,
"rewards/rejected": -226.2979736328125,
"step": 1010
},
{
"epoch": 0.8006279434850864,
"grad_norm": 13.222506396766855,
"learning_rate": 5.8195524386862374e-08,
"logits/chosen": 5807.0205078125,
"logits/rejected": 5114.4541015625,
"logps/chosen": -379.9997863769531,
"logps/rejected": -456.28125,
"loss": 0.6175,
"rewards/accuracies": 0.8166667222976685,
"rewards/chosen": -131.8211669921875,
"rewards/margins": 98.37938690185547,
"rewards/rejected": -230.20053100585938,
"step": 1020
},
{
"epoch": 0.8084772370486656,
"grad_norm": 14.840716751556135,
"learning_rate": 5.38724709671092e-08,
"logits/chosen": 6213.39697265625,
"logits/rejected": 5793.4111328125,
"logps/chosen": -374.4881286621094,
"logps/rejected": -466.87982177734375,
"loss": 0.6064,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -143.019775390625,
"rewards/margins": 90.74076843261719,
"rewards/rejected": -233.7605438232422,
"step": 1030
},
{
"epoch": 0.8163265306122449,
"grad_norm": 15.902716058683295,
"learning_rate": 4.969679867292276e-08,
"logits/chosen": 5481.22265625,
"logits/rejected": 4915.169921875,
"logps/chosen": -365.6921691894531,
"logps/rejected": -450.8426818847656,
"loss": 0.6114,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -145.59046936035156,
"rewards/margins": 96.06826782226562,
"rewards/rejected": -241.6587371826172,
"step": 1040
},
{
"epoch": 0.8241758241758241,
"grad_norm": 15.329902609184328,
"learning_rate": 4.5671645336537416e-08,
"logits/chosen": 5554.18017578125,
"logits/rejected": 4997.1005859375,
"logps/chosen": -381.65789794921875,
"logps/rejected": -449.450927734375,
"loss": 0.6103,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -144.17726135253906,
"rewards/margins": 89.47932434082031,
"rewards/rejected": -233.6565399169922,
"step": 1050
},
{
"epoch": 0.8320251177394035,
"grad_norm": 55.197735270402006,
"learning_rate": 4.180003568187776e-08,
"logits/chosen": 6873.0361328125,
"logits/rejected": 5345.8271484375,
"logps/chosen": -410.3626403808594,
"logps/rejected": -439.2491149902344,
"loss": 0.6283,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -149.37527465820312,
"rewards/margins": 69.83467102050781,
"rewards/rejected": -219.20993041992188,
"step": 1060
},
{
"epoch": 0.8398744113029827,
"grad_norm": 19.762608641409,
"learning_rate": 3.8084879051612144e-08,
"logits/chosen": 5707.0205078125,
"logits/rejected": 5145.00927734375,
"logps/chosen": -366.8685302734375,
"logps/rejected": -423.092529296875,
"loss": 0.6147,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -141.62551879882812,
"rewards/margins": 88.30266571044922,
"rewards/rejected": -229.9281768798828,
"step": 1070
},
{
"epoch": 0.847723704866562,
"grad_norm": 16.196550037488134,
"learning_rate": 3.452896722091128e-08,
"logits/chosen": 6264.1298828125,
"logits/rejected": 4766.61083984375,
"logps/chosen": -405.56341552734375,
"logps/rejected": -432.5325622558594,
"loss": 0.6033,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -138.9940948486328,
"rewards/margins": 92.55856323242188,
"rewards/rejected": -231.55264282226562,
"step": 1080
},
{
"epoch": 0.8555729984301413,
"grad_norm": 20.291014842445787,
"learning_rate": 3.11349722995527e-08,
"logits/chosen": 6290.5830078125,
"logits/rejected": 4671.265625,
"logps/chosen": -373.1634826660156,
"logps/rejected": -431.679931640625,
"loss": 0.6037,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -145.68887329101562,
"rewards/margins": 73.10652160644531,
"rewards/rejected": -218.79537963867188,
"step": 1090
},
{
"epoch": 0.8634222919937206,
"grad_norm": 17.99288812895582,
"learning_rate": 2.7905444723949762e-08,
"logits/chosen": 6118.28271484375,
"logits/rejected": 5002.5498046875,
"logps/chosen": -382.5477294921875,
"logps/rejected": -433.43798828125,
"loss": 0.6151,
"rewards/accuracies": 0.7750000953674316,
"rewards/chosen": -144.01138305664062,
"rewards/margins": 99.92918395996094,
"rewards/rejected": -243.9405517578125,
"step": 1100
},
{
"epoch": 0.8712715855572999,
"grad_norm": 18.552548907991827,
"learning_rate": 2.484281134061142e-08,
"logits/chosen": 6458.51123046875,
"logits/rejected": 5143.70263671875,
"logps/chosen": -413.1473693847656,
"logps/rejected": -460.0634765625,
"loss": 0.6059,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -148.53378295898438,
"rewards/margins": 89.26164245605469,
"rewards/rejected": -237.79541015625,
"step": 1110
},
{
"epoch": 0.8791208791208791,
"grad_norm": 39.56227188436796,
"learning_rate": 2.194937358247506e-08,
"logits/chosen": 6335.6669921875,
"logits/rejected": 5074.86474609375,
"logps/chosen": -396.36737060546875,
"logps/rejected": -461.27423095703125,
"loss": 0.608,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -151.22695922851562,
"rewards/margins": 93.27647399902344,
"rewards/rejected": -244.50341796875,
"step": 1120
},
{
"epoch": 0.8869701726844584,
"grad_norm": 18.148758631996266,
"learning_rate": 1.9227305739481612e-08,
"logits/chosen": 5765.23486328125,
"logits/rejected": 4463.29541015625,
"logps/chosen": -364.4913330078125,
"logps/rejected": -408.61187744140625,
"loss": 0.6017,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -129.7092742919922,
"rewards/margins": 98.6770248413086,
"rewards/rejected": -228.3863067626953,
"step": 1130
},
{
"epoch": 0.8948194662480377,
"grad_norm": 16.994877433404973,
"learning_rate": 1.6678653324693787e-08,
"logits/chosen": 6387.3359375,
"logits/rejected": 5097.41015625,
"logps/chosen": -400.7387390136719,
"logps/rejected": -451.8038024902344,
"loss": 0.5946,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -143.11801147460938,
"rewards/margins": 87.87274932861328,
"rewards/rejected": -230.99075317382812,
"step": 1140
},
{
"epoch": 0.902668759811617,
"grad_norm": 11.193775448088072,
"learning_rate": 1.4305331537183384e-08,
"logits/chosen": 5634.30419921875,
"logits/rejected": 5102.05615234375,
"logps/chosen": -367.156005859375,
"logps/rejected": -442.4237365722656,
"loss": 0.6015,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -140.172607421875,
"rewards/margins": 82.76316833496094,
"rewards/rejected": -222.935791015625,
"step": 1150
},
{
"epoch": 0.9105180533751962,
"grad_norm": 18.50142880229726,
"learning_rate": 1.2109123822844653e-08,
"logits/chosen": 5791.82861328125,
"logits/rejected": 4512.34228515625,
"logps/chosen": -374.96600341796875,
"logps/rejected": -425.4412536621094,
"loss": 0.6118,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -146.40921020507812,
"rewards/margins": 83.09394836425781,
"rewards/rejected": -229.50314331054688,
"step": 1160
},
{
"epoch": 0.9183673469387755,
"grad_norm": 16.45322143829113,
"learning_rate": 1.0091680534213387e-08,
"logits/chosen": 6355.1123046875,
"logits/rejected": 6031.12744140625,
"logps/chosen": -378.3938903808594,
"logps/rejected": -480.53863525390625,
"loss": 0.6105,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -135.44552612304688,
"rewards/margins": 98.34616088867188,
"rewards/rejected": -233.7917022705078,
"step": 1170
},
{
"epoch": 0.9262166405023547,
"grad_norm": 23.32993982299152,
"learning_rate": 8.254517690300944e-09,
"logits/chosen": 5549.52392578125,
"logits/rejected": 4957.7138671875,
"logps/chosen": -378.49566650390625,
"logps/rejected": -448.3548889160156,
"loss": 0.6038,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -138.54971313476562,
"rewards/margins": 92.9834213256836,
"rewards/rejected": -231.5331268310547,
"step": 1180
},
{
"epoch": 0.9340659340659341,
"grad_norm": 11.697466694155857,
"learning_rate": 6.599015837372907e-09,
"logits/chosen": 6053.71044921875,
"logits/rejected": 5201.91845703125,
"logps/chosen": -410.1524963378906,
"logps/rejected": -463.343994140625,
"loss": 0.6132,
"rewards/accuracies": 0.7166666388511658,
"rewards/chosen": -161.26699829101562,
"rewards/margins": 84.92450714111328,
"rewards/rejected": -246.1914825439453,
"step": 1190
},
{
"epoch": 0.9419152276295133,
"grad_norm": 29.00561816867174,
"learning_rate": 5.126419011529992e-09,
"logits/chosen": 6306.44140625,
"logits/rejected": 5252.5625,
"logps/chosen": -392.0524597167969,
"logps/rejected": -466.8185119628906,
"loss": 0.6043,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": -136.3907470703125,
"rewards/margins": 111.76222229003906,
"rewards/rejected": -248.15292358398438,
"step": 1200
},
{
"epoch": 0.9497645211930926,
"grad_norm": 24.03652143626847,
"learning_rate": 3.837833803870177e-09,
"logits/chosen": 5867.791015625,
"logits/rejected": 5074.435546875,
"logps/chosen": -392.6157531738281,
"logps/rejected": -466.23956298828125,
"loss": 0.6164,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -152.4302520751953,
"rewards/margins": 95.60807037353516,
"rewards/rejected": -248.03829956054688,
"step": 1210
},
{
"epoch": 0.957613814756672,
"grad_norm": 23.781485972909927,
"learning_rate": 2.734228528934679e-09,
"logits/chosen": 7340.81494140625,
"logits/rejected": 5272.85400390625,
"logps/chosen": -449.55072021484375,
"logps/rejected": -496.0115661621094,
"loss": 0.6098,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -152.78236389160156,
"rewards/margins": 98.0320053100586,
"rewards/rejected": -250.8143768310547,
"step": 1220
},
{
"epoch": 0.9654631083202512,
"grad_norm": 25.628197989466248,
"learning_rate": 1.8164324970625645e-09,
"logits/chosen": 6506.375,
"logits/rejected": 5016.78759765625,
"logps/chosen": -400.7377624511719,
"logps/rejected": -448.590576171875,
"loss": 0.624,
"rewards/accuracies": 0.75,
"rewards/chosen": -140.1560821533203,
"rewards/margins": 95.43777465820312,
"rewards/rejected": -235.59384155273438,
"step": 1230
},
{
"epoch": 0.9733124018838305,
"grad_norm": 19.957302011366814,
"learning_rate": 1.0851353912008642e-09,
"logits/chosen": 5557.27685546875,
"logits/rejected": 5068.2919921875,
"logps/chosen": -379.62493896484375,
"logps/rejected": -472.33477783203125,
"loss": 0.6141,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -148.0125732421875,
"rewards/margins": 89.1643295288086,
"rewards/rejected": -237.17691040039062,
"step": 1240
},
{
"epoch": 0.9811616954474097,
"grad_norm": 15.727811449834665,
"learning_rate": 5.408867486384471e-10,
"logits/chosen": 5699.9697265625,
"logits/rejected": 4726.44580078125,
"logps/chosen": -367.1297912597656,
"logps/rejected": -415.16668701171875,
"loss": 0.6068,
"rewards/accuracies": 0.7833332419395447,
"rewards/chosen": -136.82162475585938,
"rewards/margins": 88.91475677490234,
"rewards/rejected": -225.7363739013672,
"step": 1250
},
{
"epoch": 0.989010989010989,
"grad_norm": 18.48673738798219,
"learning_rate": 1.840955480532924e-10,
"logits/chosen": 5415.2001953125,
"logits/rejected": 5022.5712890625,
"logps/chosen": -370.6057434082031,
"logps/rejected": -442.42633056640625,
"loss": 0.6035,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -138.44517517089844,
"rewards/margins": 87.65608215332031,
"rewards/rejected": -226.10122680664062,
"step": 1260
},
{
"epoch": 0.9968602825745683,
"grad_norm": 20.059405514745603,
"learning_rate": 1.502990218302247e-11,
"logits/chosen": 5661.7509765625,
"logits/rejected": 4540.9052734375,
"logps/chosen": -372.86297607421875,
"logps/rejected": -427.48162841796875,
"loss": 0.6166,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -148.38900756835938,
"rewards/margins": 92.78841400146484,
"rewards/rejected": -241.1774139404297,
"step": 1270
},
{
"epoch": 1.0,
"step": 1274,
"total_flos": 0.0,
"train_loss": 0.6278583559922558,
"train_runtime": 14837.7437,
"train_samples_per_second": 4.12,
"train_steps_per_second": 0.086
}
],
"logging_steps": 10,
"max_steps": 1274,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}