TTTXXX01's picture
Model save
073c3b5 verified
raw
history blame contribute delete
No virus
68 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1274,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007849293563579278,
"grad_norm": 76.09110305026772,
"learning_rate": 3.90625e-09,
"logits/chosen": 5881.4375,
"logits/rejected": 2834.66162109375,
"logps/chosen": -257.5969543457031,
"logps/rejected": -120.09489440917969,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.007849293563579277,
"grad_norm": 84.53465777823584,
"learning_rate": 3.9062499999999997e-08,
"logits/chosen": 4946.009765625,
"logits/rejected": 4332.37451171875,
"logps/chosen": -248.8301544189453,
"logps/rejected": -214.27517700195312,
"loss": 0.6939,
"rewards/accuracies": 0.43518519401550293,
"rewards/chosen": -0.013952851295471191,
"rewards/margins": -0.030882200226187706,
"rewards/rejected": 0.016929350793361664,
"step": 10
},
{
"epoch": 0.015698587127158554,
"grad_norm": 70.05213184547209,
"learning_rate": 7.812499999999999e-08,
"logits/chosen": 6023.31396484375,
"logits/rejected": 4842.8310546875,
"logps/chosen": -266.2083435058594,
"logps/rejected": -232.37954711914062,
"loss": 0.6938,
"rewards/accuracies": 0.5000000596046448,
"rewards/chosen": 0.10495556890964508,
"rewards/margins": 0.032393090426921844,
"rewards/rejected": 0.07256247103214264,
"step": 20
},
{
"epoch": 0.023547880690737835,
"grad_norm": 69.00995049951652,
"learning_rate": 1.1718749999999999e-07,
"logits/chosen": 6032.76123046875,
"logits/rejected": 5038.65087890625,
"logps/chosen": -310.43328857421875,
"logps/rejected": -262.861572265625,
"loss": 0.6926,
"rewards/accuracies": 0.5416666865348816,
"rewards/chosen": 0.17605653405189514,
"rewards/margins": 0.0010276169050484896,
"rewards/rejected": 0.17502892017364502,
"step": 30
},
{
"epoch": 0.03139717425431711,
"grad_norm": 69.7664006852952,
"learning_rate": 1.5624999999999999e-07,
"logits/chosen": 5242.107421875,
"logits/rejected": 4344.2041015625,
"logps/chosen": -263.4407043457031,
"logps/rejected": -224.73001098632812,
"loss": 0.6854,
"rewards/accuracies": 0.6000000238418579,
"rewards/chosen": 0.5795542001724243,
"rewards/margins": 0.2455345094203949,
"rewards/rejected": 0.33401963114738464,
"step": 40
},
{
"epoch": 0.03924646781789639,
"grad_norm": 75.39254669427922,
"learning_rate": 1.9531249999999998e-07,
"logits/chosen": 6364.07373046875,
"logits/rejected": 4975.279296875,
"logps/chosen": -324.3768615722656,
"logps/rejected": -257.8438415527344,
"loss": 0.6764,
"rewards/accuracies": 0.7083333134651184,
"rewards/chosen": 1.7327311038970947,
"rewards/margins": 1.0267372131347656,
"rewards/rejected": 0.7059938907623291,
"step": 50
},
{
"epoch": 0.04709576138147567,
"grad_norm": 71.83951557562865,
"learning_rate": 2.3437499999999998e-07,
"logits/chosen": 5382.095703125,
"logits/rejected": 4503.6728515625,
"logps/chosen": -268.0951843261719,
"logps/rejected": -261.60919189453125,
"loss": 0.6609,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": 1.8319120407104492,
"rewards/margins": 1.2502626180648804,
"rewards/rejected": 0.5816493630409241,
"step": 60
},
{
"epoch": 0.054945054945054944,
"grad_norm": 58.133090222213596,
"learning_rate": 2.734375e-07,
"logits/chosen": 5104.4208984375,
"logits/rejected": 4838.51025390625,
"logps/chosen": -231.4933624267578,
"logps/rejected": -232.9977264404297,
"loss": 0.6474,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": 1.7646211385726929,
"rewards/margins": 2.1093406677246094,
"rewards/rejected": -0.3447194993495941,
"step": 70
},
{
"epoch": 0.06279434850863422,
"grad_norm": 57.9049780596852,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 5690.080078125,
"logits/rejected": 5160.9189453125,
"logps/chosen": -255.88723754882812,
"logps/rejected": -245.4872283935547,
"loss": 0.6237,
"rewards/accuracies": 0.6666666269302368,
"rewards/chosen": 0.0911921039223671,
"rewards/margins": 3.2660727500915527,
"rewards/rejected": -3.1748805046081543,
"step": 80
},
{
"epoch": 0.0706436420722135,
"grad_norm": 60.22967129442178,
"learning_rate": 3.5156249999999997e-07,
"logits/chosen": 5870.26171875,
"logits/rejected": 5052.40869140625,
"logps/chosen": -284.24462890625,
"logps/rejected": -248.9542236328125,
"loss": 0.5984,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": 0.9464850425720215,
"rewards/margins": 6.434256076812744,
"rewards/rejected": -5.487771034240723,
"step": 90
},
{
"epoch": 0.07849293563579278,
"grad_norm": 69.2909935844847,
"learning_rate": 3.9062499999999997e-07,
"logits/chosen": 5756.40869140625,
"logits/rejected": 5542.99267578125,
"logps/chosen": -278.2003173828125,
"logps/rejected": -275.8293151855469,
"loss": 0.61,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": 0.07480049133300781,
"rewards/margins": 7.715832710266113,
"rewards/rejected": -7.641031742095947,
"step": 100
},
{
"epoch": 0.08634222919937205,
"grad_norm": 63.59587178292999,
"learning_rate": 4.2968749999999996e-07,
"logits/chosen": 6190.86376953125,
"logits/rejected": 5089.04150390625,
"logps/chosen": -257.26153564453125,
"logps/rejected": -252.33932495117188,
"loss": 0.5655,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -0.12369728088378906,
"rewards/margins": 8.602740287780762,
"rewards/rejected": -8.726436614990234,
"step": 110
},
{
"epoch": 0.09419152276295134,
"grad_norm": 75.09104325187393,
"learning_rate": 4.6874999999999996e-07,
"logits/chosen": 6041.5400390625,
"logits/rejected": 5059.2177734375,
"logps/chosen": -280.3619079589844,
"logps/rejected": -283.30877685546875,
"loss": 0.5634,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -0.7750555276870728,
"rewards/margins": 12.931465148925781,
"rewards/rejected": -13.706521987915039,
"step": 120
},
{
"epoch": 0.10204081632653061,
"grad_norm": 56.6049419522278,
"learning_rate": 4.999962424962166e-07,
"logits/chosen": 6000.89306640625,
"logits/rejected": 5544.8837890625,
"logps/chosen": -286.98211669921875,
"logps/rejected": -278.4353332519531,
"loss": 0.5788,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": 3.1251087188720703,
"rewards/margins": 8.129714012145996,
"rewards/rejected": -5.004604816436768,
"step": 130
},
{
"epoch": 0.10989010989010989,
"grad_norm": 66.17159473008851,
"learning_rate": 4.998647417232375e-07,
"logits/chosen": 5857.5234375,
"logits/rejected": 5142.9794921875,
"logps/chosen": -259.30584716796875,
"logps/rejected": -257.5000915527344,
"loss": 0.5949,
"rewards/accuracies": 0.6666666269302368,
"rewards/chosen": 0.936366081237793,
"rewards/margins": 8.080657005310059,
"rewards/rejected": -7.144291877746582,
"step": 140
},
{
"epoch": 0.11773940345368916,
"grad_norm": 149.49556516257127,
"learning_rate": 4.995454786965036e-07,
"logits/chosen": 5945.76806640625,
"logits/rejected": 4966.1328125,
"logps/chosen": -276.73760986328125,
"logps/rejected": -253.0986785888672,
"loss": 0.5912,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": 1.1343941688537598,
"rewards/margins": 12.343668937683105,
"rewards/rejected": -11.20927619934082,
"step": 150
},
{
"epoch": 0.12558869701726844,
"grad_norm": 59.806353223597256,
"learning_rate": 4.990386933279972e-07,
"logits/chosen": 5893.48095703125,
"logits/rejected": 5249.22216796875,
"logps/chosen": -283.9483642578125,
"logps/rejected": -286.0248107910156,
"loss": 0.5564,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -10.384180068969727,
"rewards/margins": 9.5074462890625,
"rewards/rejected": -19.891626358032227,
"step": 160
},
{
"epoch": 0.13343799058084774,
"grad_norm": 62.325385969713366,
"learning_rate": 4.983447664444096e-07,
"logits/chosen": 6131.96435546875,
"logits/rejected": 5474.5771484375,
"logps/chosen": -294.34442138671875,
"logps/rejected": -284.6779479980469,
"loss": 0.5598,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -4.54689884185791,
"rewards/margins": 11.124954223632812,
"rewards/rejected": -15.671854972839355,
"step": 170
},
{
"epoch": 0.141287284144427,
"grad_norm": 70.02419866191639,
"learning_rate": 4.97464219500968e-07,
"logits/chosen": 5411.71435546875,
"logits/rejected": 4724.818359375,
"logps/chosen": -262.61627197265625,
"logps/rejected": -257.47509765625,
"loss": 0.5817,
"rewards/accuracies": 0.6666666269302368,
"rewards/chosen": 0.8280799984931946,
"rewards/margins": 11.036195755004883,
"rewards/rejected": -10.20811653137207,
"step": 180
},
{
"epoch": 0.14913657770800628,
"grad_norm": 61.31639433830006,
"learning_rate": 4.963977141895843e-07,
"logits/chosen": 5467.083984375,
"logits/rejected": 4708.22216796875,
"logps/chosen": -287.8260803222656,
"logps/rejected": -282.094482421875,
"loss": 0.5171,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -0.541407585144043,
"rewards/margins": 20.091650009155273,
"rewards/rejected": -20.633060455322266,
"step": 190
},
{
"epoch": 0.15698587127158556,
"grad_norm": 89.05267401282032,
"learning_rate": 4.951460519416227e-07,
"logits/chosen": 5396.1455078125,
"logits/rejected": 4986.21142578125,
"logps/chosen": -257.17950439453125,
"logps/rejected": -285.96527099609375,
"loss": 0.545,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -5.9294867515563965,
"rewards/margins": 16.56216812133789,
"rewards/rejected": -22.491649627685547,
"step": 200
},
{
"epoch": 0.16483516483516483,
"grad_norm": 71.11721989569415,
"learning_rate": 4.937101733256606e-07,
"logits/chosen": 4882.0869140625,
"logits/rejected": 4334.1650390625,
"logps/chosen": -225.06906127929688,
"logps/rejected": -236.1047821044922,
"loss": 0.5541,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -0.35613110661506653,
"rewards/margins": 13.287859916687012,
"rewards/rejected": -13.643989562988281,
"step": 210
},
{
"epoch": 0.1726844583987441,
"grad_norm": 95.02411341600269,
"learning_rate": 4.920911573406924e-07,
"logits/chosen": 5941.76708984375,
"logits/rejected": 5054.4560546875,
"logps/chosen": -286.0159912109375,
"logps/rejected": -250.0142059326172,
"loss": 0.5552,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -3.383108615875244,
"rewards/margins": 13.090596199035645,
"rewards/rejected": -16.473705291748047,
"step": 220
},
{
"epoch": 0.18053375196232338,
"grad_norm": 61.44548957102422,
"learning_rate": 4.902902206053098e-07,
"logits/chosen": 5438.53564453125,
"logits/rejected": 4923.58203125,
"logps/chosen": -280.6929626464844,
"logps/rejected": -282.5475769042969,
"loss": 0.5384,
"rewards/accuracies": 0.6916666626930237,
"rewards/chosen": -10.415714263916016,
"rewards/margins": 14.109655380249023,
"rewards/rejected": -24.525371551513672,
"step": 230
},
{
"epoch": 0.18838304552590268,
"grad_norm": 57.61246221015275,
"learning_rate": 4.883087164434672e-07,
"logits/chosen": 4929.82470703125,
"logits/rejected": 3957.67041015625,
"logps/chosen": -247.64553833007812,
"logps/rejected": -235.91378784179688,
"loss": 0.5196,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -7.9520063400268555,
"rewards/margins": 15.035487174987793,
"rewards/rejected": -22.98749351501465,
"step": 240
},
{
"epoch": 0.19623233908948196,
"grad_norm": 81.52336264216343,
"learning_rate": 4.861481338675183e-07,
"logits/chosen": 5786.1494140625,
"logits/rejected": 5151.611328125,
"logps/chosen": -243.9603729248047,
"logps/rejected": -279.42626953125,
"loss": 0.5503,
"rewards/accuracies": 0.6666666865348816,
"rewards/chosen": -8.31295394897461,
"rewards/margins": 16.721200942993164,
"rewards/rejected": -25.03415298461914,
"step": 250
},
{
"epoch": 0.20408163265306123,
"grad_norm": 161.8757450979854,
"learning_rate": 4.838100964592904e-07,
"logits/chosen": 5958.8291015625,
"logits/rejected": 4793.7353515625,
"logps/chosen": -300.9515686035156,
"logps/rejected": -272.099609375,
"loss": 0.5609,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -16.198909759521484,
"rewards/margins": 17.37653160095215,
"rewards/rejected": -33.575443267822266,
"step": 260
},
{
"epoch": 0.2119309262166405,
"grad_norm": 55.67188711424705,
"learning_rate": 4.812963611500339e-07,
"logits/chosen": 5856.1484375,
"logits/rejected": 5652.234375,
"logps/chosen": -285.68646240234375,
"logps/rejected": -292.1693115234375,
"loss": 0.4969,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -11.177817344665527,
"rewards/margins": 14.163126945495605,
"rewards/rejected": -25.3409423828125,
"step": 270
},
{
"epoch": 0.21978021978021978,
"grad_norm": 85.12978558309311,
"learning_rate": 4.786088169001671e-07,
"logits/chosen": 5007.3935546875,
"logits/rejected": 4349.2841796875,
"logps/chosen": -232.29440307617188,
"logps/rejected": -257.35736083984375,
"loss": 0.502,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -3.5790748596191406,
"rewards/margins": 19.368267059326172,
"rewards/rejected": -22.947338104248047,
"step": 280
},
{
"epoch": 0.22762951334379905,
"grad_norm": 66.34271443416326,
"learning_rate": 4.7574948327980567e-07,
"logits/chosen": 6927.55078125,
"logits/rejected": 5120.35498046875,
"logps/chosen": -338.6285705566406,
"logps/rejected": -294.0506896972656,
"loss": 0.508,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -13.838041305541992,
"rewards/margins": 22.446279525756836,
"rewards/rejected": -36.28432083129883,
"step": 290
},
{
"epoch": 0.23547880690737832,
"grad_norm": 70.84013248396022,
"learning_rate": 4.727205089511466e-07,
"logits/chosen": 5050.2119140625,
"logits/rejected": 4979.74609375,
"logps/chosen": -250.53823852539062,
"logps/rejected": -271.61199951171875,
"loss": 0.5375,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -13.79767894744873,
"rewards/margins": 13.887298583984375,
"rewards/rejected": -27.684978485107422,
"step": 300
},
{
"epoch": 0.24332810047095763,
"grad_norm": 63.39626059992431,
"learning_rate": 4.6952417005384247e-07,
"logits/chosen": 5686.9599609375,
"logits/rejected": 5094.78076171875,
"logps/chosen": -264.87811279296875,
"logps/rejected": -264.01177978515625,
"loss": 0.5307,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -9.209539413452148,
"rewards/margins": 13.136590957641602,
"rewards/rejected": -22.34613037109375,
"step": 310
},
{
"epoch": 0.25117739403453687,
"grad_norm": 51.01706515176811,
"learning_rate": 4.661628684945851e-07,
"logits/chosen": 5712.5009765625,
"logits/rejected": 4960.869140625,
"logps/chosen": -284.8694763183594,
"logps/rejected": -297.58978271484375,
"loss": 0.5275,
"rewards/accuracies": 0.7000000476837158,
"rewards/chosen": -7.582206726074219,
"rewards/margins": 18.082399368286133,
"rewards/rejected": -25.664602279663086,
"step": 320
},
{
"epoch": 0.25902668759811615,
"grad_norm": 67.84592644137479,
"learning_rate": 4.626391301421782e-07,
"logits/chosen": 5515.0732421875,
"logits/rejected": 5026.42431640625,
"logps/chosen": -276.51904296875,
"logps/rejected": -269.2738342285156,
"loss": 0.5399,
"rewards/accuracies": 0.7000000476837158,
"rewards/chosen": -6.420998573303223,
"rewards/margins": 13.4607572555542,
"rewards/rejected": -19.881755828857422,
"step": 330
},
{
"epoch": 0.2668759811616955,
"grad_norm": 60.95792670490128,
"learning_rate": 4.5895560292945996e-07,
"logits/chosen": 5796.21826171875,
"logits/rejected": 5875.0517578125,
"logps/chosen": -270.3171081542969,
"logps/rejected": -319.8937072753906,
"loss": 0.5257,
"rewards/accuracies": 0.7000000476837158,
"rewards/chosen": -2.2340104579925537,
"rewards/margins": 15.778246879577637,
"rewards/rejected": -18.012258529663086,
"step": 340
},
{
"epoch": 0.27472527472527475,
"grad_norm": 84.59524350470046,
"learning_rate": 4.5511505486349865e-07,
"logits/chosen": 6091.1845703125,
"logits/rejected": 5517.7958984375,
"logps/chosen": -275.9291076660156,
"logps/rejected": -308.3497314453125,
"loss": 0.5434,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -3.021336078643799,
"rewards/margins": 21.829858779907227,
"rewards/rejected": -24.851192474365234,
"step": 350
},
{
"epoch": 0.282574568288854,
"grad_norm": 77.73130050367033,
"learning_rate": 4.5112037194555876e-07,
"logits/chosen": 5645.1474609375,
"logits/rejected": 5508.9814453125,
"logps/chosen": -256.37933349609375,
"logps/rejected": -305.8360900878906,
"loss": 0.5344,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -12.694351196289062,
"rewards/margins": 19.168682098388672,
"rewards/rejected": -31.863033294677734,
"step": 360
},
{
"epoch": 0.2904238618524333,
"grad_norm": 112.03101565670647,
"learning_rate": 4.4697455600239863e-07,
"logits/chosen": 5141.33544921875,
"logits/rejected": 4822.3193359375,
"logps/chosen": -271.9143371582031,
"logps/rejected": -263.94171142578125,
"loss": 0.559,
"rewards/accuracies": 0.7083333134651184,
"rewards/chosen": -13.011955261230469,
"rewards/margins": 12.512086868286133,
"rewards/rejected": -25.524044036865234,
"step": 370
},
{
"epoch": 0.29827315541601257,
"grad_norm": 63.51891587893552,
"learning_rate": 4.426807224305315e-07,
"logits/chosen": 6163.4716796875,
"logits/rejected": 5088.28125,
"logps/chosen": -318.40228271484375,
"logps/rejected": -287.8067321777344,
"loss": 0.5253,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -10.425338745117188,
"rewards/margins": 21.47298812866211,
"rewards/rejected": -31.898324966430664,
"step": 380
},
{
"epoch": 0.30612244897959184,
"grad_norm": 75.92756479977929,
"learning_rate": 4.3824209785514326e-07,
"logits/chosen": 6272.45703125,
"logits/rejected": 4800.2626953125,
"logps/chosen": -301.3458251953125,
"logps/rejected": -278.79302978515625,
"loss": 0.5135,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -9.819374084472656,
"rewards/margins": 23.481924057006836,
"rewards/rejected": -33.301300048828125,
"step": 390
},
{
"epoch": 0.3139717425431711,
"grad_norm": 66.47374556483102,
"learning_rate": 4.3366201770542687e-07,
"logits/chosen": 5328.0849609375,
"logits/rejected": 5181.458984375,
"logps/chosen": -295.9270324707031,
"logps/rejected": -328.08013916015625,
"loss": 0.519,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -21.309566497802734,
"rewards/margins": 20.160045623779297,
"rewards/rejected": -41.46961212158203,
"step": 400
},
{
"epoch": 0.3218210361067504,
"grad_norm": 123.73907215954236,
"learning_rate": 4.2894392370815567e-07,
"logits/chosen": 5759.8017578125,
"logits/rejected": 5182.2548828125,
"logps/chosen": -308.05609130859375,
"logps/rejected": -322.4450988769531,
"loss": 0.4596,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -16.187286376953125,
"rewards/margins": 23.63322639465332,
"rewards/rejected": -39.820518493652344,
"step": 410
},
{
"epoch": 0.32967032967032966,
"grad_norm": 79.2606182693223,
"learning_rate": 4.2409136130137845e-07,
"logits/chosen": 5431.6875,
"logits/rejected": 4942.8408203125,
"logps/chosen": -294.1439514160156,
"logps/rejected": -284.1300964355469,
"loss": 0.5317,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -8.250704765319824,
"rewards/margins": 22.796977996826172,
"rewards/rejected": -31.047687530517578,
"step": 420
},
{
"epoch": 0.33751962323390894,
"grad_norm": 68.54405429145365,
"learning_rate": 4.1910797697018017e-07,
"logits/chosen": 5246.7197265625,
"logits/rejected": 4415.2548828125,
"logps/chosen": -265.8824157714844,
"logps/rejected": -262.2781677246094,
"loss": 0.4959,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": -9.087538719177246,
"rewards/margins": 28.186351776123047,
"rewards/rejected": -37.27389144897461,
"step": 430
},
{
"epoch": 0.3453689167974882,
"grad_norm": 83.24939415391015,
"learning_rate": 4.1399751550651084e-07,
"logits/chosen": 5628.4384765625,
"logits/rejected": 5584.71240234375,
"logps/chosen": -259.50360107421875,
"logps/rejected": -286.0181884765625,
"loss": 0.5124,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -8.73564624786377,
"rewards/margins": 17.06700325012207,
"rewards/rejected": -25.802648544311523,
"step": 440
},
{
"epoch": 0.3532182103610675,
"grad_norm": 57.277868825940814,
"learning_rate": 4.087638171951401e-07,
"logits/chosen": 6479.9736328125,
"logits/rejected": 4679.1142578125,
"logps/chosen": -297.46343994140625,
"logps/rejected": -259.014404296875,
"loss": 0.5167,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -10.406542778015137,
"rewards/margins": 22.608623504638672,
"rewards/rejected": -33.015159606933594,
"step": 450
},
{
"epoch": 0.36106750392464676,
"grad_norm": 65.31589437332453,
"learning_rate": 4.034108149278543e-07,
"logits/chosen": 6699.00390625,
"logits/rejected": 5186.462890625,
"logps/chosen": -343.70062255859375,
"logps/rejected": -298.53692626953125,
"loss": 0.4645,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -10.98952865600586,
"rewards/margins": 22.895919799804688,
"rewards/rejected": -33.88544845581055,
"step": 460
},
{
"epoch": 0.36891679748822603,
"grad_norm": 96.27449799872079,
"learning_rate": 3.979425312480629e-07,
"logits/chosen": 5711.05224609375,
"logits/rejected": 5017.3291015625,
"logps/chosen": -306.458251953125,
"logps/rejected": -319.9109802246094,
"loss": 0.5455,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -18.672470092773438,
"rewards/margins": 23.139759063720703,
"rewards/rejected": -41.81222915649414,
"step": 470
},
{
"epoch": 0.37676609105180536,
"grad_norm": 55.55795766142816,
"learning_rate": 3.923630753280357e-07,
"logits/chosen": 6194.65673828125,
"logits/rejected": 5330.5810546875,
"logps/chosen": -294.33331298828125,
"logps/rejected": -285.3929138183594,
"loss": 0.5026,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -14.526718139648438,
"rewards/margins": 24.07291030883789,
"rewards/rejected": -38.59962844848633,
"step": 480
},
{
"epoch": 0.38461538461538464,
"grad_norm": 78.10688192233438,
"learning_rate": 3.866766398810424e-07,
"logits/chosen": 5823.94140625,
"logits/rejected": 5543.9892578125,
"logps/chosen": -248.40328979492188,
"logps/rejected": -305.0126037597656,
"loss": 0.4751,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -7.462246894836426,
"rewards/margins": 24.950969696044922,
"rewards/rejected": -32.41321563720703,
"step": 490
},
{
"epoch": 0.3924646781789639,
"grad_norm": 70.24124257704634,
"learning_rate": 3.8088749801071496e-07,
"logits/chosen": 6291.517578125,
"logits/rejected": 4903.115234375,
"logps/chosen": -314.2850646972656,
"logps/rejected": -301.9085388183594,
"loss": 0.5189,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -15.795580863952637,
"rewards/margins": 21.68023109436035,
"rewards/rejected": -37.47581100463867,
"step": 500
},
{
"epoch": 0.4003139717425432,
"grad_norm": 53.69297743558368,
"learning_rate": 3.75e-07,
"logits/chosen": 5080.75390625,
"logits/rejected": 4507.572265625,
"logps/chosen": -273.0891418457031,
"logps/rejected": -269.4468688964844,
"loss": 0.4849,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -16.22928237915039,
"rewards/margins": 24.09990692138672,
"rewards/rejected": -40.329185485839844,
"step": 510
},
{
"epoch": 0.40816326530612246,
"grad_norm": 80.31768469560811,
"learning_rate": 3.6901857004211443e-07,
"logits/chosen": 5401.982421875,
"logits/rejected": 4994.92626953125,
"logps/chosen": -286.5435791015625,
"logps/rejected": -307.6182556152344,
"loss": 0.5504,
"rewards/accuracies": 0.6916666626930237,
"rewards/chosen": -20.370731353759766,
"rewards/margins": 18.12123680114746,
"rewards/rejected": -38.491966247558594,
"step": 520
},
{
"epoch": 0.41601255886970173,
"grad_norm": 58.78911634916461,
"learning_rate": 3.6294770291596076e-07,
"logits/chosen": 6102.56005859375,
"logits/rejected": 5029.2578125,
"logps/chosen": -299.22210693359375,
"logps/rejected": -295.10443115234375,
"loss": 0.482,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -14.929966926574707,
"rewards/margins": 16.103656768798828,
"rewards/rejected": -31.03362464904785,
"step": 530
},
{
"epoch": 0.423861852433281,
"grad_norm": 80.65951431247957,
"learning_rate": 3.5679196060850034e-07,
"logits/chosen": 5810.88525390625,
"logits/rejected": 5194.83935546875,
"logps/chosen": -288.0130920410156,
"logps/rejected": -287.81170654296875,
"loss": 0.5021,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -10.629480361938477,
"rewards/margins": 21.78488540649414,
"rewards/rejected": -32.414371490478516,
"step": 540
},
{
"epoch": 0.4317111459968603,
"grad_norm": 75.60934568083842,
"learning_rate": 3.505559688866229e-07,
"logits/chosen": 5616.5888671875,
"logits/rejected": 5239.4462890625,
"logps/chosen": -293.79791259765625,
"logps/rejected": -327.1747131347656,
"loss": 0.5091,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -12.461262702941895,
"rewards/margins": 23.530132293701172,
"rewards/rejected": -35.99140167236328,
"step": 550
},
{
"epoch": 0.43956043956043955,
"grad_norm": 84.86627370577622,
"learning_rate": 3.4424441382108826e-07,
"logits/chosen": 5615.79833984375,
"logits/rejected": 5273.56591796875,
"logps/chosen": -290.41864013671875,
"logps/rejected": -298.9422607421875,
"loss": 0.5446,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -14.291006088256836,
"rewards/margins": 17.873065948486328,
"rewards/rejected": -32.1640739440918,
"step": 560
},
{
"epoch": 0.4474097331240188,
"grad_norm": 76.55175648368798,
"learning_rate": 3.378620382651523e-07,
"logits/chosen": 5950.259765625,
"logits/rejected": 5518.1162109375,
"logps/chosen": -326.0985412597656,
"logps/rejected": -325.46246337890625,
"loss": 0.4799,
"rewards/accuracies": 0.7833333015441895,
"rewards/chosen": -11.533608436584473,
"rewards/margins": 21.987844467163086,
"rewards/rejected": -33.521453857421875,
"step": 570
},
{
"epoch": 0.4552590266875981,
"grad_norm": 81.1438191032448,
"learning_rate": 3.314136382905234e-07,
"logits/chosen": 5932.8876953125,
"logits/rejected": 5372.55078125,
"logps/chosen": -293.031494140625,
"logps/rejected": -319.82513427734375,
"loss": 0.495,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": -5.784700393676758,
"rewards/margins": 29.190006256103516,
"rewards/rejected": -34.974708557128906,
"step": 580
},
{
"epoch": 0.4631083202511774,
"grad_norm": 62.68029788470684,
"learning_rate": 3.249040595833274e-07,
"logits/chosen": 6448.5634765625,
"logits/rejected": 5459.8603515625,
"logps/chosen": -316.6751708984375,
"logps/rejected": -283.12286376953125,
"loss": 0.4824,
"rewards/accuracies": 0.8083333969116211,
"rewards/chosen": -10.52171516418457,
"rewards/margins": 26.76095199584961,
"rewards/rejected": -37.28267288208008,
"step": 590
},
{
"epoch": 0.47095761381475665,
"grad_norm": 85.34315127293867,
"learning_rate": 3.1833819380279023e-07,
"logits/chosen": 6080.92626953125,
"logits/rejected": 5186.0732421875,
"logps/chosen": -270.80426025390625,
"logps/rejected": -296.1933898925781,
"loss": 0.4672,
"rewards/accuracies": 0.8333333730697632,
"rewards/chosen": -18.313228607177734,
"rewards/margins": 22.91641616821289,
"rewards/rejected": -41.22964096069336,
"step": 600
},
{
"epoch": 0.478806907378336,
"grad_norm": 58.08462591355949,
"learning_rate": 3.11720974905373e-07,
"logits/chosen": 5836.931640625,
"logits/rejected": 5111.87548828125,
"logps/chosen": -290.5270080566406,
"logps/rejected": -293.91949462890625,
"loss": 0.461,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -17.68552017211914,
"rewards/margins": 25.742305755615234,
"rewards/rejected": -43.427825927734375,
"step": 610
},
{
"epoch": 0.48665620094191525,
"grad_norm": 59.24314457292362,
"learning_rate": 3.0505737543712275e-07,
"logits/chosen": 4960.1611328125,
"logits/rejected": 4097.82275390625,
"logps/chosen": -264.2720031738281,
"logps/rejected": -249.32626342773438,
"loss": 0.4882,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -16.4460506439209,
"rewards/margins": 19.30292510986328,
"rewards/rejected": -35.74897766113281,
"step": 620
},
{
"epoch": 0.4945054945054945,
"grad_norm": 78.04378850224133,
"learning_rate": 2.9835240279702513e-07,
"logits/chosen": 6457.73974609375,
"logits/rejected": 5500.73681640625,
"logps/chosen": -325.96942138671875,
"logps/rejected": -301.5501403808594,
"loss": 0.4744,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -9.257902145385742,
"rewards/margins": 26.257373809814453,
"rewards/rejected": -35.51527404785156,
"step": 630
},
{
"epoch": 0.5023547880690737,
"grad_norm": 72.15746448236564,
"learning_rate": 2.9161109547416667e-07,
"logits/chosen": 6160.0927734375,
"logits/rejected": 5292.87158203125,
"logps/chosen": -296.0101318359375,
"logps/rejected": -312.3030700683594,
"loss": 0.5101,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -17.3197078704834,
"rewards/margins": 17.142587661743164,
"rewards/rejected": -34.46229553222656,
"step": 640
},
{
"epoch": 0.5102040816326531,
"grad_norm": 67.34813898160822,
"learning_rate": 2.848385192615339e-07,
"logits/chosen": 5268.1103515625,
"logits/rejected": 4316.62451171875,
"logps/chosen": -275.38983154296875,
"logps/rejected": -265.4394836425781,
"loss": 0.4756,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -17.851306915283203,
"rewards/margins": 20.486316680908203,
"rewards/rejected": -38.337623596191406,
"step": 650
},
{
"epoch": 0.5180533751962323,
"grad_norm": 73.13924713953126,
"learning_rate": 2.780397634492949e-07,
"logits/chosen": 5867.3603515625,
"logits/rejected": 4723.8388671875,
"logps/chosen": -306.2716979980469,
"logps/rejected": -297.45367431640625,
"loss": 0.5284,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -15.953409194946289,
"rewards/margins": 29.22943115234375,
"rewards/rejected": -45.18284606933594,
"step": 660
},
{
"epoch": 0.5259026687598116,
"grad_norm": 92.53126905748795,
"learning_rate": 2.71219937000424e-07,
"logits/chosen": 5880.07958984375,
"logits/rejected": 4863.40576171875,
"logps/chosen": -291.5413513183594,
"logps/rejected": -287.3309326171875,
"loss": 0.5249,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -15.330841064453125,
"rewards/margins": 20.9111385345459,
"rewards/rejected": -36.24197769165039,
"step": 670
},
{
"epoch": 0.533751962323391,
"grad_norm": 67.62676868019088,
"learning_rate": 2.6438416471154273e-07,
"logits/chosen": 5770.3818359375,
"logits/rejected": 4856.85791015625,
"logps/chosen": -291.76312255859375,
"logps/rejected": -270.06256103515625,
"loss": 0.4897,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -13.935272216796875,
"rewards/margins": 21.12171173095703,
"rewards/rejected": -35.056983947753906,
"step": 680
},
{
"epoch": 0.5416012558869702,
"grad_norm": 73.05663859356332,
"learning_rate": 2.5753758336186326e-07,
"logits/chosen": 5670.96142578125,
"logits/rejected": 5242.6416015625,
"logps/chosen": -284.61090087890625,
"logps/rejected": -310.2216796875,
"loss": 0.4463,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -9.5217924118042,
"rewards/margins": 23.241046905517578,
"rewards/rejected": -32.76283645629883,
"step": 690
},
{
"epoch": 0.5494505494505495,
"grad_norm": 54.85355662529876,
"learning_rate": 2.5068533785312666e-07,
"logits/chosen": 5440.59716796875,
"logits/rejected": 5240.3203125,
"logps/chosen": -265.55499267578125,
"logps/rejected": -282.92803955078125,
"loss": 0.5399,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -15.438737869262695,
"rewards/margins": 22.037721633911133,
"rewards/rejected": -37.47645950317383,
"step": 700
},
{
"epoch": 0.5572998430141287,
"grad_norm": 60.42827942241951,
"learning_rate": 2.4383257734343794e-07,
"logits/chosen": 5401.58203125,
"logits/rejected": 5404.7314453125,
"logps/chosen": -260.6871643066406,
"logps/rejected": -296.1017761230469,
"loss": 0.4756,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -11.44128704071045,
"rewards/margins": 20.571271896362305,
"rewards/rejected": -32.0125617980957,
"step": 710
},
{
"epoch": 0.565149136577708,
"grad_norm": 67.98797911861922,
"learning_rate": 2.3698445137790258e-07,
"logits/chosen": 5796.3447265625,
"logits/rejected": 5025.68603515625,
"logps/chosen": -291.19708251953125,
"logps/rejected": -288.0618591308594,
"loss": 0.4765,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -6.432301998138428,
"rewards/margins": 23.511104583740234,
"rewards/rejected": -29.943408966064453,
"step": 720
},
{
"epoch": 0.5729984301412873,
"grad_norm": 61.33927353442037,
"learning_rate": 2.3014610601897157e-07,
"logits/chosen": 6256.431640625,
"logits/rejected": 4830.943359375,
"logps/chosen": -312.23529052734375,
"logps/rejected": -280.59759521484375,
"loss": 0.4773,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -17.176259994506836,
"rewards/margins": 23.136552810668945,
"rewards/rejected": -40.31281280517578,
"step": 730
},
{
"epoch": 0.5808477237048666,
"grad_norm": 58.17800831625638,
"learning_rate": 2.2332267997940513e-07,
"logits/chosen": 5208.26708984375,
"logits/rejected": 4448.9052734375,
"logps/chosen": -262.63885498046875,
"logps/rejected": -255.3593292236328,
"loss": 0.466,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -14.630803108215332,
"rewards/margins": 23.1756591796875,
"rewards/rejected": -37.806461334228516,
"step": 740
},
{
"epoch": 0.5886970172684458,
"grad_norm": 110.94118329677751,
"learning_rate": 2.1651930076075723e-07,
"logits/chosen": 5666.19775390625,
"logits/rejected": 5149.2490234375,
"logps/chosen": -257.9913635253906,
"logps/rejected": -259.7269592285156,
"loss": 0.497,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -17.729137420654297,
"rewards/margins": 19.4478759765625,
"rewards/rejected": -37.17700958251953,
"step": 750
},
{
"epoch": 0.5965463108320251,
"grad_norm": 76.60199494528777,
"learning_rate": 2.0974108080028692e-07,
"logits/chosen": 5950.09814453125,
"logits/rejected": 4712.52197265625,
"logps/chosen": -279.6956787109375,
"logps/rejected": -278.8898010253906,
"loss": 0.4727,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -16.335081100463867,
"rewards/margins": 24.634830474853516,
"rewards/rejected": -40.969913482666016,
"step": 760
},
{
"epoch": 0.6043956043956044,
"grad_norm": 80.46613010942175,
"learning_rate": 2.0299311362918773e-07,
"logits/chosen": 6163.73193359375,
"logits/rejected": 5307.6240234375,
"logps/chosen": -311.0682373046875,
"logps/rejected": -323.7105407714844,
"loss": 0.5202,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -16.42329216003418,
"rewards/margins": 22.489013671875,
"rewards/rejected": -38.91231155395508,
"step": 770
},
{
"epoch": 0.6122448979591837,
"grad_norm": 60.074991801115765,
"learning_rate": 1.962804700450265e-07,
"logits/chosen": 5973.45068359375,
"logits/rejected": 5675.47412109375,
"logps/chosen": -304.04522705078125,
"logps/rejected": -360.00689697265625,
"loss": 0.495,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -20.91264533996582,
"rewards/margins": 24.53109359741211,
"rewards/rejected": -45.4437370300293,
"step": 780
},
{
"epoch": 0.6200941915227629,
"grad_norm": 121.27262746222853,
"learning_rate": 1.8960819430126334e-07,
"logits/chosen": 5599.5244140625,
"logits/rejected": 4970.3056640625,
"logps/chosen": -278.2356262207031,
"logps/rejected": -305.4681396484375,
"loss": 0.4691,
"rewards/accuracies": 0.8166667222976685,
"rewards/chosen": -23.784244537353516,
"rewards/margins": 26.33816909790039,
"rewards/rejected": -50.122413635253906,
"step": 790
},
{
"epoch": 0.6279434850863422,
"grad_norm": 82.44548535694025,
"learning_rate": 1.8298130031671972e-07,
"logits/chosen": 5606.4052734375,
"logits/rejected": 4922.90283203125,
"logps/chosen": -291.6396179199219,
"logps/rejected": -304.04962158203125,
"loss": 0.5104,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -17.903173446655273,
"rewards/margins": 19.6292667388916,
"rewards/rejected": -37.532440185546875,
"step": 800
},
{
"epoch": 0.6357927786499215,
"grad_norm": 70.1420150131459,
"learning_rate": 1.7640476790784075e-07,
"logits/chosen": 5104.7275390625,
"logits/rejected": 4670.8662109375,
"logps/chosen": -292.78948974609375,
"logps/rejected": -312.7052917480469,
"loss": 0.5138,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -18.959609985351562,
"rewards/margins": 19.970327377319336,
"rewards/rejected": -38.92993927001953,
"step": 810
},
{
"epoch": 0.6436420722135008,
"grad_norm": 89.22388286182063,
"learning_rate": 1.6988353904658492e-07,
"logits/chosen": 5619.841796875,
"logits/rejected": 4398.35595703125,
"logps/chosen": -298.38201904296875,
"logps/rejected": -252.72518920898438,
"loss": 0.4789,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -15.309239387512207,
"rewards/margins": 21.590890884399414,
"rewards/rejected": -36.90012741088867,
"step": 820
},
{
"epoch": 0.6514913657770801,
"grad_norm": 78.38923638779514,
"learning_rate": 1.634225141467513e-07,
"logits/chosen": 5558.37255859375,
"logits/rejected": 4999.9423828125,
"logps/chosen": -281.6241455078125,
"logps/rejected": -291.132568359375,
"loss": 0.4816,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -18.16968536376953,
"rewards/margins": 25.177610397338867,
"rewards/rejected": -43.34729766845703,
"step": 830
},
{
"epoch": 0.6593406593406593,
"grad_norm": 60.020636851213084,
"learning_rate": 1.570265483815364e-07,
"logits/chosen": 6103.8017578125,
"logits/rejected": 5022.09130859375,
"logps/chosen": -300.73687744140625,
"logps/rejected": -309.49188232421875,
"loss": 0.5073,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -15.203628540039062,
"rewards/margins": 20.09307289123535,
"rewards/rejected": -35.29670333862305,
"step": 840
},
{
"epoch": 0.6671899529042387,
"grad_norm": 58.32492360881753,
"learning_rate": 1.5070044803508691e-07,
"logits/chosen": 5640.41357421875,
"logits/rejected": 5135.97119140625,
"logps/chosen": -290.51226806640625,
"logps/rejected": -285.50213623046875,
"loss": 0.4609,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -17.97010040283203,
"rewards/margins": 21.574066162109375,
"rewards/rejected": -39.544166564941406,
"step": 850
},
{
"epoch": 0.6750392464678179,
"grad_norm": 79.07046685006041,
"learning_rate": 1.444489668907914e-07,
"logits/chosen": 6106.01513671875,
"logits/rejected": 5220.31689453125,
"logps/chosen": -316.23028564453125,
"logps/rejected": -288.64398193359375,
"loss": 0.5233,
"rewards/accuracies": 0.75,
"rewards/chosen": -13.361892700195312,
"rewards/margins": 20.976970672607422,
"rewards/rejected": -34.33885955810547,
"step": 860
},
{
"epoch": 0.6828885400313972,
"grad_norm": 47.127373222510464,
"learning_rate": 1.3827680265902232e-07,
"logits/chosen": 6052.7939453125,
"logits/rejected": 5035.3828125,
"logps/chosen": -300.17950439453125,
"logps/rejected": -286.375244140625,
"loss": 0.4843,
"rewards/accuracies": 0.7833333015441895,
"rewards/chosen": -10.767036437988281,
"rewards/margins": 21.399105072021484,
"rewards/rejected": -32.16614532470703,
"step": 870
},
{
"epoch": 0.6907378335949764,
"grad_norm": 74.09900524632,
"learning_rate": 1.3218859344701632e-07,
"logits/chosen": 5362.17041015625,
"logits/rejected": 5107.20361328125,
"logps/chosen": -265.02392578125,
"logps/rejected": -314.38690185546875,
"loss": 0.4774,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -6.2712297439575195,
"rewards/margins": 19.80803680419922,
"rewards/rejected": -26.079265594482422,
"step": 880
},
{
"epoch": 0.6985871271585558,
"grad_norm": 66.6052674675123,
"learning_rate": 1.2618891427354172e-07,
"logits/chosen": 6284.64111328125,
"logits/rejected": 5166.4462890625,
"logps/chosen": -317.2557067871094,
"logps/rejected": -291.3585205078125,
"loss": 0.4871,
"rewards/accuracies": 0.8166667222976685,
"rewards/chosen": -4.727200984954834,
"rewards/margins": 24.60157012939453,
"rewards/rejected": -29.328771591186523,
"step": 890
},
{
"epoch": 0.706436420722135,
"grad_norm": 68.28337798829429,
"learning_rate": 1.202822736309758e-07,
"logits/chosen": 5346.580078125,
"logits/rejected": 4938.1474609375,
"logps/chosen": -267.27410888671875,
"logps/rejected": -296.4985046386719,
"loss": 0.4859,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -9.232672691345215,
"rewards/margins": 22.844303131103516,
"rewards/rejected": -32.07697296142578,
"step": 900
},
{
"epoch": 0.7142857142857143,
"grad_norm": 63.48835238056808,
"learning_rate": 1.1447311009737299e-07,
"logits/chosen": 5272.83837890625,
"logits/rejected": 5023.6845703125,
"logps/chosen": -267.2757263183594,
"logps/rejected": -292.3034362792969,
"loss": 0.5038,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -9.708333015441895,
"rewards/margins": 24.619693756103516,
"rewards/rejected": -34.328025817871094,
"step": 910
},
{
"epoch": 0.7221350078492935,
"grad_norm": 60.97912439150742,
"learning_rate": 1.0876578900107053e-07,
"logits/chosen": 5796.75146484375,
"logits/rejected": 4881.2841796875,
"logps/chosen": -297.802001953125,
"logps/rejected": -278.0216369628906,
"loss": 0.5153,
"rewards/accuracies": 0.8166667222976685,
"rewards/chosen": -11.080281257629395,
"rewards/margins": 21.987628936767578,
"rewards/rejected": -33.067909240722656,
"step": 920
},
{
"epoch": 0.7299843014128728,
"grad_norm": 60.28907574898199,
"learning_rate": 1.0316459914033793e-07,
"logits/chosen": 5755.77197265625,
"logits/rejected": 4348.8896484375,
"logps/chosen": -303.0055236816406,
"logps/rejected": -268.995849609375,
"loss": 0.474,
"rewards/accuracies": 0.7666667699813843,
"rewards/chosen": -15.051956176757812,
"rewards/margins": 20.99700355529785,
"rewards/rejected": -36.04895782470703,
"step": 930
},
{
"epoch": 0.7378335949764521,
"grad_norm": 63.03798083693648,
"learning_rate": 9.767374956053584e-08,
"logits/chosen": 5592.4560546875,
"logits/rejected": 4937.5419921875,
"logps/chosen": -278.56890869140625,
"logps/rejected": -285.54351806640625,
"loss": 0.4986,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -13.702900886535645,
"rewards/margins": 24.679929733276367,
"rewards/rejected": -38.38282775878906,
"step": 940
},
{
"epoch": 0.7456828885400314,
"grad_norm": 61.55603916303844,
"learning_rate": 9.229736639120561e-08,
"logits/chosen": 5777.404296875,
"logits/rejected": 5323.2900390625,
"logps/chosen": -278.0633850097656,
"logps/rejected": -295.2113037109375,
"loss": 0.5241,
"rewards/accuracies": 0.7166666388511658,
"rewards/chosen": -14.50324821472168,
"rewards/margins": 17.914897918701172,
"rewards/rejected": -32.41815185546875,
"step": 950
},
{
"epoch": 0.7535321821036107,
"grad_norm": 84.55760580604866,
"learning_rate": 8.70394897454659e-08,
"logits/chosen": 5628.0771484375,
"logits/rejected": 5055.1318359375,
"logps/chosen": -275.655517578125,
"logps/rejected": -281.5873718261719,
"loss": 0.4733,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -11.641939163208008,
"rewards/margins": 23.622745513916016,
"rewards/rejected": -35.26468276977539,
"step": 960
},
{
"epoch": 0.7613814756671899,
"grad_norm": 77.38466285245907,
"learning_rate": 8.19040706840472e-08,
"logits/chosen": 5724.43994140625,
"logits/rejected": 4841.40234375,
"logps/chosen": -301.0986328125,
"logps/rejected": -283.05474853515625,
"loss": 0.4868,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -11.967236518859863,
"rewards/margins": 24.7530574798584,
"rewards/rejected": -36.720298767089844,
"step": 970
},
{
"epoch": 0.7692307692307693,
"grad_norm": 76.26753722537488,
"learning_rate": 7.689496824624525e-08,
"logits/chosen": 5441.85009765625,
"logits/rejected": 4387.2412109375,
"logps/chosen": -287.865966796875,
"logps/rejected": -277.3480224609375,
"loss": 0.4696,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -11.223980903625488,
"rewards/margins": 27.436904907226562,
"rewards/rejected": -38.660888671875,
"step": 980
},
{
"epoch": 0.7770800627943485,
"grad_norm": 70.69138991463052,
"learning_rate": 7.201594655002458e-08,
"logits/chosen": 5780.7158203125,
"logits/rejected": 4848.8974609375,
"logps/chosen": -282.12384033203125,
"logps/rejected": -271.3438720703125,
"loss": 0.4823,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -14.01646900177002,
"rewards/margins": 21.042787551879883,
"rewards/rejected": -35.05925369262695,
"step": 990
},
{
"epoch": 0.7849293563579278,
"grad_norm": 67.08952299091601,
"learning_rate": 6.727067196345099e-08,
"logits/chosen": 5443.533203125,
"logits/rejected": 4615.63134765625,
"logps/chosen": -269.97344970703125,
"logps/rejected": -256.36163330078125,
"loss": 0.4742,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -9.651910781860352,
"rewards/margins": 21.033966064453125,
"rewards/rejected": -30.68587875366211,
"step": 1000
},
{
"epoch": 0.792778649921507,
"grad_norm": 68.03255688040626,
"learning_rate": 6.26627103495786e-08,
"logits/chosen": 5637.0078125,
"logits/rejected": 4722.42919921875,
"logps/chosen": -267.38934326171875,
"logps/rejected": -263.6007995605469,
"loss": 0.4984,
"rewards/accuracies": 0.6916666626930237,
"rewards/chosen": -7.973989009857178,
"rewards/margins": 20.111682891845703,
"rewards/rejected": -28.085674285888672,
"step": 1010
},
{
"epoch": 0.8006279434850864,
"grad_norm": 50.612164014066174,
"learning_rate": 5.8195524386862374e-08,
"logits/chosen": 5714.32763671875,
"logits/rejected": 5093.7119140625,
"logps/chosen": -302.93975830078125,
"logps/rejected": -304.6034851074219,
"loss": 0.4762,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -5.043163299560547,
"rewards/margins": 25.959774017333984,
"rewards/rejected": -31.002941131591797,
"step": 1020
},
{
"epoch": 0.8084772370486656,
"grad_norm": 85.87448292761121,
"learning_rate": 5.38724709671092e-08,
"logits/chosen": 6109.154296875,
"logits/rejected": 5799.1044921875,
"logps/chosen": -286.541015625,
"logps/rejected": -309.1804504394531,
"loss": 0.4762,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -8.740842819213867,
"rewards/margins": 25.329326629638672,
"rewards/rejected": -34.070167541503906,
"step": 1030
},
{
"epoch": 0.8163265306122449,
"grad_norm": 66.40755472143236,
"learning_rate": 4.969679867292276e-08,
"logits/chosen": 5434.1982421875,
"logits/rejected": 4960.38525390625,
"logps/chosen": -274.160888671875,
"logps/rejected": -286.50177001953125,
"loss": 0.475,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -8.917675018310547,
"rewards/margins": 24.010303497314453,
"rewards/rejected": -32.927978515625,
"step": 1040
},
{
"epoch": 0.8241758241758241,
"grad_norm": 75.62013708498031,
"learning_rate": 4.5671645336537416e-08,
"logits/chosen": 5481.43359375,
"logits/rejected": 4999.4345703125,
"logps/chosen": -289.73046875,
"logps/rejected": -297.23944091796875,
"loss": 0.4654,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -8.197317123413086,
"rewards/margins": 23.31808090209961,
"rewards/rejected": -31.515399932861328,
"step": 1050
},
{
"epoch": 0.8320251177394035,
"grad_norm": 103.29471977365169,
"learning_rate": 4.180003568187776e-08,
"logits/chosen": 6785.52099609375,
"logits/rejected": 5364.03271484375,
"logps/chosen": -318.3176574707031,
"logps/rejected": -291.8903503417969,
"loss": 0.5068,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -10.395001411437988,
"rewards/margins": 16.960371017456055,
"rewards/rejected": -27.355377197265625,
"step": 1060
},
{
"epoch": 0.8398744113029827,
"grad_norm": 115.05057020857156,
"learning_rate": 3.8084879051612144e-08,
"logits/chosen": 5640.74072265625,
"logits/rejected": 5185.1337890625,
"logps/chosen": -280.8153991699219,
"logps/rejected": -267.6299743652344,
"loss": 0.5102,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -7.923087120056152,
"rewards/margins": 22.471633911132812,
"rewards/rejected": -30.39472007751465,
"step": 1070
},
{
"epoch": 0.847723704866562,
"grad_norm": 51.487360894657684,
"learning_rate": 3.452896722091128e-08,
"logits/chosen": 6193.6533203125,
"logits/rejected": 4817.9892578125,
"logps/chosen": -319.95849609375,
"logps/rejected": -274.63543701171875,
"loss": 0.4487,
"rewards/accuracies": 0.8083333969116211,
"rewards/chosen": -7.470693111419678,
"rewards/margins": 25.923675537109375,
"rewards/rejected": -33.394371032714844,
"step": 1080
},
{
"epoch": 0.8555729984301413,
"grad_norm": 59.46047724312089,
"learning_rate": 3.11349722995527e-08,
"logits/chosen": 6239.1337890625,
"logits/rejected": 4728.5390625,
"logps/chosen": -285.4170227050781,
"logps/rejected": -280.19549560546875,
"loss": 0.4794,
"rewards/accuracies": 0.7166667580604553,
"rewards/chosen": -11.619339942932129,
"rewards/margins": 18.907350540161133,
"rewards/rejected": -30.526691436767578,
"step": 1090
},
{
"epoch": 0.8634222919937206,
"grad_norm": 67.790017961291,
"learning_rate": 2.7905444723949762e-08,
"logits/chosen": 6030.23193359375,
"logits/rejected": 5021.29248046875,
"logps/chosen": -295.6999816894531,
"logps/rejected": -271.0659484863281,
"loss": 0.5037,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -12.477197647094727,
"rewards/margins": 26.47970199584961,
"rewards/rejected": -38.95690155029297,
"step": 1100
},
{
"epoch": 0.8712715855572999,
"grad_norm": 76.11643315595336,
"learning_rate": 2.484281134061142e-08,
"logits/chosen": 6397.3251953125,
"logits/rejected": 5203.5615234375,
"logps/chosen": -322.0311584472656,
"logps/rejected": -299.73577880859375,
"loss": 0.4742,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -12.75678825378418,
"rewards/margins": 24.258211135864258,
"rewards/rejected": -37.01499938964844,
"step": 1110
},
{
"epoch": 0.8791208791208791,
"grad_norm": 66.07828474570364,
"learning_rate": 2.194937358247506e-08,
"logits/chosen": 6264.66796875,
"logits/rejected": 5134.44189453125,
"logps/chosen": -303.7208557128906,
"logps/rejected": -294.8185729980469,
"loss": 0.4697,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -12.382735252380371,
"rewards/margins": 22.952373504638672,
"rewards/rejected": -35.335105895996094,
"step": 1120
},
{
"epoch": 0.8869701726844584,
"grad_norm": 70.44227234088717,
"learning_rate": 1.9227305739481612e-08,
"logits/chosen": 5715.337890625,
"logits/rejected": 4504.00537109375,
"logps/chosen": -285.2997741699219,
"logps/rejected": -257.0673522949219,
"loss": 0.4646,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -6.729764461517334,
"rewards/margins": 25.735387802124023,
"rewards/rejected": -32.46515655517578,
"step": 1130
},
{
"epoch": 0.8948194662480377,
"grad_norm": 61.208533994127144,
"learning_rate": 1.6678653324693787e-08,
"logits/chosen": 6276.97900390625,
"logits/rejected": 5115.7861328125,
"logps/chosen": -315.98541259765625,
"logps/rejected": -297.05108642578125,
"loss": 0.4635,
"rewards/accuracies": 0.75,
"rewards/chosen": -10.545991897583008,
"rewards/margins": 24.89821434020996,
"rewards/rejected": -35.44420623779297,
"step": 1140
},
{
"epoch": 0.902668759811617,
"grad_norm": 65.39132230299009,
"learning_rate": 1.4305331537183384e-08,
"logits/chosen": 5591.3408203125,
"logits/rejected": 5121.63916015625,
"logps/chosen": -279.34210205078125,
"logps/rejected": -297.5921936035156,
"loss": 0.47,
"rewards/accuracies": 0.7000000476837158,
"rewards/chosen": -12.146747589111328,
"rewards/margins": 21.974903106689453,
"rewards/rejected": -34.12165069580078,
"step": 1150
},
{
"epoch": 0.9105180533751962,
"grad_norm": 75.9548272739494,
"learning_rate": 1.2109123822844653e-08,
"logits/chosen": 5726.4619140625,
"logits/rejected": 4543.42919921875,
"logps/chosen": -283.1075134277344,
"logps/rejected": -266.6970520019531,
"loss": 0.4804,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -12.42963981628418,
"rewards/margins": 20.248872756958008,
"rewards/rejected": -32.67850875854492,
"step": 1160
},
{
"epoch": 0.9183673469387755,
"grad_norm": 81.7313814355171,
"learning_rate": 1.0091680534213387e-08,
"logits/chosen": 6237.8369140625,
"logits/rejected": 6018.0205078125,
"logps/chosen": -303.80438232421875,
"logps/rejected": -340.64825439453125,
"loss": 0.5009,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -11.677328109741211,
"rewards/margins": 25.162853240966797,
"rewards/rejected": -36.840179443359375,
"step": 1170
},
{
"epoch": 0.9262166405023547,
"grad_norm": 68.30654325467928,
"learning_rate": 8.254517690300944e-09,
"logits/chosen": 5522.61376953125,
"logits/rejected": 4998.84326171875,
"logps/chosen": -286.980224609375,
"logps/rejected": -291.48883056640625,
"loss": 0.4715,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -8.028700828552246,
"rewards/margins": 24.647029876708984,
"rewards/rejected": -32.67572784423828,
"step": 1180
},
{
"epoch": 0.9340659340659341,
"grad_norm": 51.93666448252585,
"learning_rate": 6.599015837372907e-09,
"logits/chosen": 6022.2080078125,
"logits/rejected": 5236.82177734375,
"logps/chosen": -306.70306396484375,
"logps/rejected": -300.5201110839844,
"loss": 0.4905,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -14.384405136108398,
"rewards/margins": 22.299158096313477,
"rewards/rejected": -36.683563232421875,
"step": 1190
},
{
"epoch": 0.9419152276295133,
"grad_norm": 129.14533113743295,
"learning_rate": 5.126419011529992e-09,
"logits/chosen": 6208.3291015625,
"logits/rejected": 5302.0283203125,
"logps/chosen": -313.00469970703125,
"logps/rejected": -295.9210205078125,
"loss": 0.4549,
"rewards/accuracies": 0.841666579246521,
"rewards/chosen": -8.256613731384277,
"rewards/margins": 29.51548194885254,
"rewards/rejected": -37.7720947265625,
"step": 1200
},
{
"epoch": 0.9497645211930926,
"grad_norm": 75.35776224261268,
"learning_rate": 3.837833803870177e-09,
"logits/chosen": 5844.8037109375,
"logits/rejected": 5141.6005859375,
"logps/chosen": -290.4355773925781,
"logps/rejected": -290.68804931640625,
"loss": 0.4652,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -10.632657051086426,
"rewards/margins": 27.73859214782715,
"rewards/rejected": -38.371253967285156,
"step": 1210
},
{
"epoch": 0.957613814756672,
"grad_norm": 120.6788482461967,
"learning_rate": 2.734228528934679e-09,
"logits/chosen": 7238.70458984375,
"logits/rejected": 5344.28125,
"logps/chosen": -358.9611511230469,
"logps/rejected": -323.251953125,
"loss": 0.4772,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -12.545631408691406,
"rewards/margins": 25.407411575317383,
"rewards/rejected": -37.953041076660156,
"step": 1220
},
{
"epoch": 0.9654631083202512,
"grad_norm": 67.03962393569711,
"learning_rate": 1.8164324970625645e-09,
"logits/chosen": 6408.5908203125,
"logits/rejected": 5049.8037109375,
"logps/chosen": -316.5514831542969,
"logps/rejected": -297.5099182128906,
"loss": 0.5263,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -8.196043014526367,
"rewards/margins": 24.2534122467041,
"rewards/rejected": -32.44945526123047,
"step": 1230
},
{
"epoch": 0.9733124018838305,
"grad_norm": 66.59250985044233,
"learning_rate": 1.0851353912008642e-09,
"logits/chosen": 5524.4716796875,
"logits/rejected": 5108.125,
"logps/chosen": -285.8993835449219,
"logps/rejected": -311.83306884765625,
"loss": 0.4822,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -12.945849418640137,
"rewards/margins": 22.67105484008789,
"rewards/rejected": -35.61690139770508,
"step": 1240
},
{
"epoch": 0.9811616954474097,
"grad_norm": 63.8522226531417,
"learning_rate": 5.408867486384471e-10,
"logits/chosen": 5626.33837890625,
"logits/rejected": 4768.8837890625,
"logps/chosen": -280.3611755371094,
"logps/rejected": -256.4848327636719,
"loss": 0.4825,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -9.922576904296875,
"rewards/margins": 23.209577560424805,
"rewards/rejected": -33.13215255737305,
"step": 1250
},
{
"epoch": 0.989010989010989,
"grad_norm": 66.09847251390043,
"learning_rate": 1.840955480532924e-10,
"logits/chosen": 5381.37744140625,
"logits/rejected": 5083.3779296875,
"logps/chosen": -277.90594482421875,
"logps/rejected": -292.15655517578125,
"loss": 0.4461,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -11.253137588500977,
"rewards/margins": 23.19699478149414,
"rewards/rejected": -34.45013427734375,
"step": 1260
},
{
"epoch": 0.9968602825745683,
"grad_norm": 81.13121346879733,
"learning_rate": 1.502990218302247e-11,
"logits/chosen": 5605.18603515625,
"logits/rejected": 4559.984375,
"logps/chosen": -273.18988037109375,
"logps/rejected": -262.9356994628906,
"loss": 0.4819,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -11.716361045837402,
"rewards/margins": 23.061031341552734,
"rewards/rejected": -34.77738952636719,
"step": 1270
},
{
"epoch": 1.0,
"step": 1274,
"total_flos": 0.0,
"train_loss": 0.515247391381855,
"train_runtime": 13095.6882,
"train_samples_per_second": 4.668,
"train_steps_per_second": 0.097
}
],
"logging_steps": 10,
"max_steps": 1274,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}