TTTXXX01's picture
Model save
ed09f4d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1274,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007849293563579278,
"grad_norm": 6.033449115554074,
"learning_rate": 2.34375e-09,
"logits/chosen": 5914.86328125,
"logits/rejected": 2785.05078125,
"logps/chosen": -212.39956665039062,
"logps/rejected": -98.58319854736328,
"loss": 1.6265,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.007849293563579277,
"grad_norm": 6.334644894564701,
"learning_rate": 2.34375e-08,
"logits/chosen": 4973.83203125,
"logits/rejected": 4328.20947265625,
"logps/chosen": -204.1883544921875,
"logps/rejected": -179.72076416015625,
"loss": 1.6265,
"rewards/accuracies": 0.5277777910232544,
"rewards/chosen": 0.01907867193222046,
"rewards/margins": 0.04872569814324379,
"rewards/rejected": -0.029647022485733032,
"step": 10
},
{
"epoch": 0.015698587127158554,
"grad_norm": 5.9916217113774,
"learning_rate": 4.6875e-08,
"logits/chosen": 6084.1591796875,
"logits/rejected": 4833.9228515625,
"logps/chosen": -217.2135467529297,
"logps/rejected": -196.72286987304688,
"loss": 1.6265,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.02391243539750576,
"rewards/margins": -0.033898498862981796,
"rewards/rejected": 0.009986063465476036,
"step": 20
},
{
"epoch": 0.023547880690737835,
"grad_norm": 5.7641173333433775,
"learning_rate": 7.03125e-08,
"logits/chosen": 6082.0751953125,
"logits/rejected": 5103.20458984375,
"logps/chosen": -250.56283569335938,
"logps/rejected": -209.3272705078125,
"loss": 1.6265,
"rewards/accuracies": 0.5083333253860474,
"rewards/chosen": 0.023494163528084755,
"rewards/margins": 0.016794539988040924,
"rewards/rejected": 0.006699626334011555,
"step": 30
},
{
"epoch": 0.03139717425431711,
"grad_norm": 5.454897887476028,
"learning_rate": 9.375e-08,
"logits/chosen": 5302.84423828125,
"logits/rejected": 4339.72900390625,
"logps/chosen": -212.10537719726562,
"logps/rejected": -181.6745147705078,
"loss": 1.6265,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": -0.0172278955578804,
"rewards/margins": -0.01973465457558632,
"rewards/rejected": 0.0025067573878914118,
"step": 40
},
{
"epoch": 0.03924646781789639,
"grad_norm": 6.38829428743751,
"learning_rate": 1.1718749999999999e-07,
"logits/chosen": 6392.1474609375,
"logits/rejected": 5017.2919921875,
"logps/chosen": -265.69232177734375,
"logps/rejected": -206.754150390625,
"loss": 1.6261,
"rewards/accuracies": 0.4583333432674408,
"rewards/chosen": -0.07548192888498306,
"rewards/margins": -0.040568120777606964,
"rewards/rejected": -0.03491383045911789,
"step": 50
},
{
"epoch": 0.04709576138147567,
"grad_norm": 6.31748832974153,
"learning_rate": 1.40625e-07,
"logits/chosen": 5422.49609375,
"logits/rejected": 4506.4208984375,
"logps/chosen": -214.3790740966797,
"logps/rejected": -209.01828002929688,
"loss": 1.6253,
"rewards/accuracies": 0.3166666626930237,
"rewards/chosen": -0.40180811285972595,
"rewards/margins": -0.24679434299468994,
"rewards/rejected": -0.1550137847661972,
"step": 60
},
{
"epoch": 0.054945054945054944,
"grad_norm": 7.1862223915388315,
"learning_rate": 1.640625e-07,
"logits/chosen": 5049.93408203125,
"logits/rejected": 4787.21630859375,
"logps/chosen": -179.75225830078125,
"logps/rejected": -177.83863830566406,
"loss": 1.6216,
"rewards/accuracies": 0.44166669249534607,
"rewards/chosen": -1.2514512538909912,
"rewards/margins": -0.1759890913963318,
"rewards/rejected": -1.0754622220993042,
"step": 70
},
{
"epoch": 0.06279434850863422,
"grad_norm": 8.327706520679039,
"learning_rate": 1.875e-07,
"logits/chosen": 5466.95947265625,
"logits/rejected": 5001.8310546875,
"logps/chosen": -199.88381958007812,
"logps/rejected": -184.80918884277344,
"loss": 1.6118,
"rewards/accuracies": 0.4583333432674408,
"rewards/chosen": -3.306250810623169,
"rewards/margins": -0.05246015638113022,
"rewards/rejected": -3.2537906169891357,
"step": 80
},
{
"epoch": 0.0706436420722135,
"grad_norm": 12.704545270148827,
"learning_rate": 2.109375e-07,
"logits/chosen": 5456.98583984375,
"logits/rejected": 4672.60986328125,
"logps/chosen": -231.42489624023438,
"logps/rejected": -196.01947021484375,
"loss": 1.5778,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -12.56345272064209,
"rewards/margins": -0.9916127324104309,
"rewards/rejected": -11.571840286254883,
"step": 90
},
{
"epoch": 0.07849293563579278,
"grad_norm": 7.59113886837309,
"learning_rate": 2.3437499999999998e-07,
"logits/chosen": 4993.03955078125,
"logits/rejected": 4812.00146484375,
"logps/chosen": -246.10147094726562,
"logps/rejected": -233.54025268554688,
"loss": 1.5199,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": -35.86996078491211,
"rewards/margins": -1.7471039295196533,
"rewards/rejected": -34.122859954833984,
"step": 100
},
{
"epoch": 0.08634222919937205,
"grad_norm": 9.015849618304676,
"learning_rate": 2.5781249999999997e-07,
"logits/chosen": 4971.4384765625,
"logits/rejected": 4100.7958984375,
"logps/chosen": -258.1769104003906,
"logps/rejected": -242.14089965820312,
"loss": 1.5138,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -75.07661437988281,
"rewards/margins": -11.86035442352295,
"rewards/rejected": -63.21626663208008,
"step": 110
},
{
"epoch": 0.09419152276295134,
"grad_norm": 10.274748882232315,
"learning_rate": 2.8125e-07,
"logits/chosen": 4943.21923828125,
"logits/rejected": 4107.5322265625,
"logps/chosen": -274.8851318359375,
"logps/rejected": -264.3096008300781,
"loss": 1.4822,
"rewards/accuracies": 0.5250000357627869,
"rewards/chosen": -72.01099395751953,
"rewards/margins": -5.851285934448242,
"rewards/rejected": -66.15970611572266,
"step": 120
},
{
"epoch": 0.10204081632653061,
"grad_norm": 7.144257110565484,
"learning_rate": 2.999977454977299e-07,
"logits/chosen": 4948.9169921875,
"logits/rejected": 4537.37646484375,
"logps/chosen": -287.4667663574219,
"logps/rejected": -281.4789123535156,
"loss": 1.4274,
"rewards/accuracies": 0.5,
"rewards/chosen": -77.65235137939453,
"rewards/margins": 6.16226863861084,
"rewards/rejected": -83.81462097167969,
"step": 130
},
{
"epoch": 0.10989010989010989,
"grad_norm": 8.226551521487808,
"learning_rate": 2.999188450339425e-07,
"logits/chosen": 4828.34375,
"logits/rejected": 4192.6845703125,
"logps/chosen": -281.3506164550781,
"logps/rejected": -279.585205078125,
"loss": 1.3961,
"rewards/accuracies": 0.5666666626930237,
"rewards/chosen": -95.67347717285156,
"rewards/margins": 6.334682464599609,
"rewards/rejected": -102.00816345214844,
"step": 140
},
{
"epoch": 0.11773940345368916,
"grad_norm": 11.507860546274475,
"learning_rate": 2.997272872179022e-07,
"logits/chosen": 4970.33837890625,
"logits/rejected": 4065.40673828125,
"logps/chosen": -291.5596923828125,
"logps/rejected": -275.5636291503906,
"loss": 1.3885,
"rewards/accuracies": 0.625,
"rewards/chosen": -89.9407730102539,
"rewards/margins": 13.317327499389648,
"rewards/rejected": -103.25809478759766,
"step": 150
},
{
"epoch": 0.12558869701726844,
"grad_norm": 10.994369595848676,
"learning_rate": 2.994232159967983e-07,
"logits/chosen": 4982.4130859375,
"logits/rejected": 4372.6259765625,
"logps/chosen": -284.61468505859375,
"logps/rejected": -296.10565185546875,
"loss": 1.3818,
"rewards/accuracies": 0.6333333849906921,
"rewards/chosen": -87.11192321777344,
"rewards/margins": 11.931414604187012,
"rewards/rejected": -99.0433349609375,
"step": 160
},
{
"epoch": 0.13343799058084774,
"grad_norm": 9.481017294519704,
"learning_rate": 2.990068598666458e-07,
"logits/chosen": 5221.8046875,
"logits/rejected": 4557.0537109375,
"logps/chosen": -301.30633544921875,
"logps/rejected": -305.81646728515625,
"loss": 1.3726,
"rewards/accuracies": 0.6333333253860474,
"rewards/chosen": -91.6949234008789,
"rewards/margins": 17.794422149658203,
"rewards/rejected": -109.48934173583984,
"step": 170
},
{
"epoch": 0.141287284144427,
"grad_norm": 14.020275412135652,
"learning_rate": 2.984785317005808e-07,
"logits/chosen": 4500.76220703125,
"logits/rejected": 3867.770751953125,
"logps/chosen": -281.3055114746094,
"logps/rejected": -287.490478515625,
"loss": 1.3686,
"rewards/accuracies": 0.6333333253860474,
"rewards/chosen": -92.7432861328125,
"rewards/margins": 16.572912216186523,
"rewards/rejected": -109.31620025634766,
"step": 180
},
{
"epoch": 0.14913657770800628,
"grad_norm": 15.646225937667412,
"learning_rate": 2.978386285137506e-07,
"logits/chosen": 4642.57958984375,
"logits/rejected": 3846.667236328125,
"logps/chosen": -290.1266174316406,
"logps/rejected": -312.9205627441406,
"loss": 1.355,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -86.35670471191406,
"rewards/margins": 35.351932525634766,
"rewards/rejected": -121.70863342285156,
"step": 190
},
{
"epoch": 0.15698587127158556,
"grad_norm": 11.466454800803227,
"learning_rate": 2.9708763116497365e-07,
"logits/chosen": 4456.6640625,
"logits/rejected": 4046.647705078125,
"logps/chosen": -272.8510437011719,
"logps/rejected": -312.5729675292969,
"loss": 1.3576,
"rewards/accuracies": 0.7166666388511658,
"rewards/chosen": -90.31404876708984,
"rewards/margins": 25.549606323242188,
"rewards/rejected": -115.86366271972656,
"step": 200
},
{
"epoch": 0.16483516483516483,
"grad_norm": 18.323993013074592,
"learning_rate": 2.962261039953964e-07,
"logits/chosen": 3986.946044921875,
"logits/rejected": 3482.71142578125,
"logps/chosen": -242.8981170654297,
"logps/rejected": -276.468994140625,
"loss": 1.3525,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -88.96837615966797,
"rewards/margins": 30.285064697265625,
"rewards/rejected": -119.2534408569336,
"step": 210
},
{
"epoch": 0.1726844583987441,
"grad_norm": 14.15120837947915,
"learning_rate": 2.952546944044155e-07,
"logits/chosen": 4956.119140625,
"logits/rejected": 4112.83984375,
"logps/chosen": -283.2933349609375,
"logps/rejected": -285.0084533691406,
"loss": 1.3478,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -87.01918029785156,
"rewards/margins": 34.038204193115234,
"rewards/rejected": -121.057373046875,
"step": 220
},
{
"epoch": 0.18053375196232338,
"grad_norm": 13.56003679991926,
"learning_rate": 2.941741323631859e-07,
"logits/chosen": 4587.6279296875,
"logits/rejected": 4040.880126953125,
"logps/chosen": -276.8036804199219,
"logps/rejected": -302.5361328125,
"loss": 1.3477,
"rewards/accuracies": 0.7166666984558105,
"rewards/chosen": -89.46044921875,
"rewards/margins": 33.30833435058594,
"rewards/rejected": -122.76878356933594,
"step": 230
},
{
"epoch": 0.18838304552590268,
"grad_norm": 19.695765717669524,
"learning_rate": 2.929852298660803e-07,
"logits/chosen": 4106.96533203125,
"logits/rejected": 3192.676513671875,
"logps/chosen": -258.5592346191406,
"logps/rejected": -276.740966796875,
"loss": 1.3478,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -90.93244934082031,
"rewards/margins": 31.089950561523438,
"rewards/rejected": -122.02239990234375,
"step": 240
},
{
"epoch": 0.19623233908948196,
"grad_norm": 17.837485568500572,
"learning_rate": 2.9168888032051097e-07,
"logits/chosen": 4755.09423828125,
"logits/rejected": 4138.0048828125,
"logps/chosen": -256.274169921875,
"logps/rejected": -310.71746826171875,
"loss": 1.3551,
"rewards/accuracies": 0.6333333253860474,
"rewards/chosen": -89.11972045898438,
"rewards/margins": 37.10137176513672,
"rewards/rejected": -126.2210922241211,
"step": 250
},
{
"epoch": 0.20408163265306123,
"grad_norm": 15.060829491384922,
"learning_rate": 2.9028605787557423e-07,
"logits/chosen": 4961.2783203125,
"logits/rejected": 3883.23583984375,
"logps/chosen": -295.96575927734375,
"logps/rejected": -291.8458251953125,
"loss": 1.362,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -96.2462158203125,
"rewards/margins": 29.351673126220703,
"rewards/rejected": -125.59788513183594,
"step": 260
},
{
"epoch": 0.2119309262166405,
"grad_norm": 21.725472315572194,
"learning_rate": 2.8877781669002034e-07,
"logits/chosen": 4907.73681640625,
"logits/rejected": 4685.39306640625,
"logps/chosen": -290.1836242675781,
"logps/rejected": -312.2330627441406,
"loss": 1.3387,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -93.42694854736328,
"rewards/margins": 30.104965209960938,
"rewards/rejected": -123.53192138671875,
"step": 270
},
{
"epoch": 0.21978021978021978,
"grad_norm": 22.380743969527877,
"learning_rate": 2.871652901401002e-07,
"logits/chosen": 4144.8955078125,
"logits/rejected": 3516.05615234375,
"logps/chosen": -250.620361328125,
"logps/rejected": -303.0557861328125,
"loss": 1.3452,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -87.56240844726562,
"rewards/margins": 46.16553497314453,
"rewards/rejected": -133.7279510498047,
"step": 280
},
{
"epoch": 0.22762951334379905,
"grad_norm": 19.023537926104407,
"learning_rate": 2.854496899678834e-07,
"logits/chosen": 5847.7109375,
"logits/rejected": 4171.96630859375,
"logps/chosen": -327.1379699707031,
"logps/rejected": -330.09600830078125,
"loss": 1.3403,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -90.06501007080078,
"rewards/margins": 57.50885772705078,
"rewards/rejected": -147.57388305664062,
"step": 290
},
{
"epoch": 0.23547880690737832,
"grad_norm": 17.58097454746967,
"learning_rate": 2.8363230537068793e-07,
"logits/chosen": 4251.00634765625,
"logits/rejected": 4112.849609375,
"logps/chosen": -255.7366485595703,
"logps/rejected": -293.42156982421875,
"loss": 1.3468,
"rewards/accuracies": 0.6750000715255737,
"rewards/chosen": -88.24583435058594,
"rewards/margins": 34.95277404785156,
"rewards/rejected": -123.19859313964844,
"step": 300
},
{
"epoch": 0.24332810047095763,
"grad_norm": 14.70842299502561,
"learning_rate": 2.817145020323055e-07,
"logits/chosen": 4780.62255859375,
"logits/rejected": 4147.02685546875,
"logps/chosen": -269.6231384277344,
"logps/rejected": -292.3729553222656,
"loss": 1.3542,
"rewards/accuracies": 0.6833333373069763,
"rewards/chosen": -92.05260467529297,
"rewards/margins": 24.870826721191406,
"rewards/rejected": -116.9234390258789,
"step": 310
},
{
"epoch": 0.25117739403453687,
"grad_norm": 13.938859657249182,
"learning_rate": 2.7969772109675106e-07,
"logits/chosen": 4895.505859375,
"logits/rejected": 4120.4462890625,
"logps/chosen": -288.72723388671875,
"logps/rejected": -322.5893859863281,
"loss": 1.35,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -87.61097717285156,
"rewards/margins": 32.92052459716797,
"rewards/rejected": -120.53150939941406,
"step": 320
},
{
"epoch": 0.25902668759811615,
"grad_norm": 11.637579462880986,
"learning_rate": 2.775834780853069e-07,
"logits/chosen": 4723.48291015625,
"logits/rejected": 4224.01220703125,
"logps/chosen": -292.0691833496094,
"logps/rejected": -297.52789306640625,
"loss": 1.357,
"rewards/accuracies": 0.6500000357627869,
"rewards/chosen": -97.65233612060547,
"rewards/margins": 22.543180465698242,
"rewards/rejected": -120.19551849365234,
"step": 330
},
{
"epoch": 0.2668759811616955,
"grad_norm": 11.50915278056431,
"learning_rate": 2.75373361757676e-07,
"logits/chosen": 4986.0654296875,
"logits/rejected": 4922.3876953125,
"logps/chosen": -282.6824035644531,
"logps/rejected": -336.1164855957031,
"loss": 1.3449,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -88.81416320800781,
"rewards/margins": 26.942358016967773,
"rewards/rejected": -115.75653076171875,
"step": 340
},
{
"epoch": 0.27472527472527475,
"grad_norm": 19.664421369914834,
"learning_rate": 2.730690329180992e-07,
"logits/chosen": 5255.826171875,
"logits/rejected": 4648.89111328125,
"logps/chosen": -287.81707763671875,
"logps/rejected": -344.7186279296875,
"loss": 1.3353,
"rewards/accuracies": 0.6916666626930237,
"rewards/chosen": -92.95384216308594,
"rewards/margins": 44.985557556152344,
"rewards/rejected": -137.93942260742188,
"step": 350
},
{
"epoch": 0.282574568288854,
"grad_norm": 14.042295784485253,
"learning_rate": 2.7067222316733527e-07,
"logits/chosen": 4749.18994140625,
"logits/rejected": 4491.1953125,
"logps/chosen": -266.18914794921875,
"logps/rejected": -342.7701416015625,
"loss": 1.3367,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -88.32334899902344,
"rewards/margins": 55.10559844970703,
"rewards/rejected": -143.42892456054688,
"step": 360
},
{
"epoch": 0.2904238618524333,
"grad_norm": 15.2758713919473,
"learning_rate": 2.6818473360143916e-07,
"logits/chosen": 4407.02783203125,
"logits/rejected": 4003.607421875,
"logps/chosen": -270.6219787597656,
"logps/rejected": -287.3055419921875,
"loss": 1.3537,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -86.1965560913086,
"rewards/margins": 33.04121780395508,
"rewards/rejected": -119.2377700805664,
"step": 370
},
{
"epoch": 0.29827315541601257,
"grad_norm": 13.766452688703493,
"learning_rate": 2.6560843345831893e-07,
"logits/chosen": 5333.09375,
"logits/rejected": 4200.95947265625,
"logps/chosen": -317.053955078125,
"logps/rejected": -308.80609130859375,
"loss": 1.3395,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -88.68672943115234,
"rewards/margins": 37.15703582763672,
"rewards/rejected": -125.84376525878906,
"step": 380
},
{
"epoch": 0.30612244897959184,
"grad_norm": 36.85374368236901,
"learning_rate": 2.62945258713086e-07,
"logits/chosen": 5441.1943359375,
"logits/rejected": 3993.84423828125,
"logps/chosen": -302.8955993652344,
"logps/rejected": -322.1986999511719,
"loss": 1.3343,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -87.42176818847656,
"rewards/margins": 54.94853591918945,
"rewards/rejected": -142.3703155517578,
"step": 390
},
{
"epoch": 0.3139717425431711,
"grad_norm": 14.099963149615544,
"learning_rate": 2.601972106232561e-07,
"logits/chosen": 4693.27587890625,
"logits/rejected": 4468.32373046875,
"logps/chosen": -273.7616882324219,
"logps/rejected": -314.75164794921875,
"loss": 1.3598,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -82.94029235839844,
"rewards/margins": 43.07162857055664,
"rewards/rejected": -126.01191711425781,
"step": 400
},
{
"epoch": 0.3218210361067504,
"grad_norm": 22.82272878384856,
"learning_rate": 2.5736635422489343e-07,
"logits/chosen": 5083.9404296875,
"logits/rejected": 4400.0224609375,
"logps/chosen": -309.4847717285156,
"logps/rejected": -357.3506774902344,
"loss": 1.3187,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -97.01141357421875,
"rewards/margins": 51.86890411376953,
"rewards/rejected": -148.88031005859375,
"step": 410
},
{
"epoch": 0.32967032967032966,
"grad_norm": 13.413614592026427,
"learning_rate": 2.5445481678082706e-07,
"logits/chosen": 4738.033203125,
"logits/rejected": 4165.1875,
"logps/chosen": -292.02459716796875,
"logps/rejected": -313.284912109375,
"loss": 1.3412,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -87.97795104980469,
"rewards/margins": 44.012813568115234,
"rewards/rejected": -131.9907684326172,
"step": 420
},
{
"epoch": 0.33751962323390894,
"grad_norm": 16.35310936737267,
"learning_rate": 2.514647861821081e-07,
"logits/chosen": 4596.02392578125,
"logits/rejected": 3646.234375,
"logps/chosen": -269.6007080078125,
"logps/rejected": -309.58538818359375,
"loss": 1.3277,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -89.86016845703125,
"rewards/margins": 58.46223831176758,
"rewards/rejected": -148.32241821289062,
"step": 430
},
{
"epoch": 0.3453689167974882,
"grad_norm": 13.924787630126689,
"learning_rate": 2.483985093039065e-07,
"logits/chosen": 4906.8564453125,
"logits/rejected": 4671.38818359375,
"logps/chosen": -275.3404846191406,
"logps/rejected": -325.6501770019531,
"loss": 1.3556,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -92.95171356201172,
"rewards/margins": 41.062828063964844,
"rewards/rejected": -134.01454162597656,
"step": 440
},
{
"epoch": 0.3532182103610675,
"grad_norm": 12.974922593697578,
"learning_rate": 2.4525829031708404e-07,
"logits/chosen": 5700.68115234375,
"logits/rejected": 3948.375732421875,
"logps/chosen": -295.39019775390625,
"logps/rejected": -297.334228515625,
"loss": 1.3449,
"rewards/accuracies": 0.7416667342185974,
"rewards/chosen": -86.63603973388672,
"rewards/margins": 47.681427001953125,
"rewards/rejected": -134.3174591064453,
"step": 450
},
{
"epoch": 0.36106750392464676,
"grad_norm": 13.7663993271018,
"learning_rate": 2.420464889567126e-07,
"logits/chosen": 5984.6328125,
"logits/rejected": 4481.2509765625,
"logps/chosen": -346.4881286621094,
"logps/rejected": -334.8788146972656,
"loss": 1.3296,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -96.00253295898438,
"rewards/margins": 48.739898681640625,
"rewards/rejected": -144.74244689941406,
"step": 460
},
{
"epoch": 0.36891679748822603,
"grad_norm": 16.285406224013258,
"learning_rate": 2.3876551874883775e-07,
"logits/chosen": 5035.26953125,
"logits/rejected": 4289.298828125,
"logps/chosen": -301.6008605957031,
"logps/rejected": -343.3747253417969,
"loss": 1.3369,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -89.3971939086914,
"rewards/margins": 51.47382736206055,
"rewards/rejected": -140.8710174560547,
"step": 470
},
{
"epoch": 0.37676609105180536,
"grad_norm": 12.840284775101187,
"learning_rate": 2.354178451968214e-07,
"logits/chosen": 5432.525390625,
"logits/rejected": 4493.26953125,
"logps/chosen": -295.23321533203125,
"logps/rejected": -313.70111083984375,
"loss": 1.3372,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -87.82811737060547,
"rewards/margins": 51.47626876831055,
"rewards/rejected": -139.30441284179688,
"step": 480
},
{
"epoch": 0.38461538461538464,
"grad_norm": 13.012539888722776,
"learning_rate": 2.3200598392862542e-07,
"logits/chosen": 5000.28125,
"logits/rejected": 4610.8955078125,
"logps/chosen": -264.51531982421875,
"logps/rejected": -339.9250183105469,
"loss": 1.3246,
"rewards/accuracies": 0.75,
"rewards/chosen": -87.90812683105469,
"rewards/margins": 49.94524383544922,
"rewards/rejected": -137.85336303710938,
"step": 490
},
{
"epoch": 0.3924646781789639,
"grad_norm": 18.56442430897208,
"learning_rate": 2.2853249880642897e-07,
"logits/chosen": 5522.9404296875,
"logits/rejected": 4092.22265625,
"logps/chosen": -323.59771728515625,
"logps/rejected": -357.1122131347656,
"loss": 1.3349,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -101.6022720336914,
"rewards/margins": 52.95317459106445,
"rewards/rejected": -154.55545043945312,
"step": 500
},
{
"epoch": 0.4003139717425432,
"grad_norm": 14.082902463369917,
"learning_rate": 2.25e-07,
"logits/chosen": 4505.0869140625,
"logits/rejected": 3814.422607421875,
"logps/chosen": -273.663330078125,
"logps/rejected": -298.57855224609375,
"loss": 1.328,
"rewards/accuracies": 0.7916668057441711,
"rewards/chosen": -84.23956298828125,
"rewards/margins": 48.00691223144531,
"rewards/rejected": -132.2464599609375,
"step": 510
},
{
"epoch": 0.40816326530612246,
"grad_norm": 12.873522668189558,
"learning_rate": 2.2141114202526867e-07,
"logits/chosen": 4780.2392578125,
"logits/rejected": 4262.83203125,
"logps/chosen": -296.81536865234375,
"logps/rejected": -335.5638122558594,
"loss": 1.365,
"rewards/accuracies": 0.6666666269302368,
"rewards/chosen": -98.39509582519531,
"rewards/margins": 41.327919006347656,
"rewards/rejected": -139.7230224609375,
"step": 520
},
{
"epoch": 0.41601255886970173,
"grad_norm": 12.684426666795588,
"learning_rate": 2.1776862174957646e-07,
"logits/chosen": 5397.5029296875,
"logits/rejected": 4275.8896484375,
"logps/chosen": -295.6401672363281,
"logps/rejected": -316.23468017578125,
"loss": 1.3303,
"rewards/accuracies": 0.75,
"rewards/chosen": -82.70533752441406,
"rewards/margins": 34.76225662231445,
"rewards/rejected": -117.46760559082031,
"step": 530
},
{
"epoch": 0.423861852433281,
"grad_norm": 22.323475438849997,
"learning_rate": 2.1407517636510023e-07,
"logits/chosen": 5051.541015625,
"logits/rejected": 4335.1279296875,
"logps/chosen": -312.322265625,
"logps/rejected": -339.65802001953125,
"loss": 1.3352,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -101.51814270019531,
"rewards/margins": 49.8724479675293,
"rewards/rejected": -151.39059448242188,
"step": 540
},
{
"epoch": 0.4317111459968603,
"grad_norm": 21.570737156393047,
"learning_rate": 2.1033358133197376e-07,
"logits/chosen": 4971.50927734375,
"logits/rejected": 4415.0771484375,
"logps/chosen": -298.01519775390625,
"logps/rejected": -359.4136962890625,
"loss": 1.3335,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -83.20237731933594,
"rewards/margins": 51.15970230102539,
"rewards/rejected": -134.36209106445312,
"step": 550
},
{
"epoch": 0.43956043956043955,
"grad_norm": 12.415018699318011,
"learning_rate": 2.0654664829265295e-07,
"logits/chosen": 4912.80859375,
"logits/rejected": 4474.53515625,
"logps/chosen": -297.15765380859375,
"logps/rejected": -323.38116455078125,
"loss": 1.3455,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -93.88143157958984,
"rewards/margins": 36.26217269897461,
"rewards/rejected": -130.1436004638672,
"step": 560
},
{
"epoch": 0.4474097331240188,
"grad_norm": 20.61913444595419,
"learning_rate": 2.027172229590914e-07,
"logits/chosen": 5299.6044921875,
"logits/rejected": 4737.7197265625,
"logps/chosen": -336.34356689453125,
"logps/rejected": -368.6340026855469,
"loss": 1.3334,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -93.41209411621094,
"rewards/margins": 51.496124267578125,
"rewards/rejected": -144.90821838378906,
"step": 570
},
{
"epoch": 0.4552590266875981,
"grad_norm": 31.600089809367816,
"learning_rate": 1.9884818297431403e-07,
"logits/chosen": 5278.46240234375,
"logits/rejected": 4592.17578125,
"logps/chosen": -296.7460632324219,
"logps/rejected": -345.90313720703125,
"loss": 1.3445,
"rewards/accuracies": 0.7583333849906921,
"rewards/chosen": -84.3451156616211,
"rewards/margins": 48.27422332763672,
"rewards/rejected": -132.6193389892578,
"step": 580
},
{
"epoch": 0.4631083202511774,
"grad_norm": 15.260249014970388,
"learning_rate": 1.9494243574999645e-07,
"logits/chosen": 5775.82470703125,
"logits/rejected": 4653.576171875,
"logps/chosen": -324.98199462890625,
"logps/rejected": -319.8547668457031,
"loss": 1.3314,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -93.31060028076172,
"rewards/margins": 47.719669342041016,
"rewards/rejected": -141.0302734375,
"step": 590
},
{
"epoch": 0.47095761381475665,
"grad_norm": 15.88452181021661,
"learning_rate": 1.9100291628167416e-07,
"logits/chosen": 5445.01904296875,
"logits/rejected": 4488.2998046875,
"logps/chosen": -265.55645751953125,
"logps/rejected": -320.50567626953125,
"loss": 1.3278,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -88.00885009765625,
"rewards/margins": 42.79914093017578,
"rewards/rejected": -130.8079833984375,
"step": 600
},
{
"epoch": 0.478806907378336,
"grad_norm": 20.55547909163982,
"learning_rate": 1.870325849432238e-07,
"logits/chosen": 5234.193359375,
"logits/rejected": 4424.484375,
"logps/chosen": -293.6166076660156,
"logps/rejected": -331.92974853515625,
"loss": 1.3129,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -87.48274993896484,
"rewards/margins": 59.040428161621094,
"rewards/rejected": -146.52317810058594,
"step": 610
},
{
"epoch": 0.48665620094191525,
"grad_norm": 17.927137367169394,
"learning_rate": 1.8303442526227364e-07,
"logits/chosen": 4438.71240234375,
"logits/rejected": 3490.155029296875,
"logps/chosen": -275.29241943359375,
"logps/rejected": -313.7520751953125,
"loss": 1.3229,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -91.52749633789062,
"rewards/margins": 61.12894821166992,
"rewards/rejected": -152.6564178466797,
"step": 620
},
{
"epoch": 0.4945054945054945,
"grad_norm": 20.192182222445886,
"learning_rate": 1.7901144167821507e-07,
"logits/chosen": 5873.173828125,
"logits/rejected": 4774.25537109375,
"logps/chosen": -323.35992431640625,
"logps/rejected": -342.5325012207031,
"loss": 1.3214,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -80.52757263183594,
"rewards/margins": 67.7401123046875,
"rewards/rejected": -148.26768493652344,
"step": 630
},
{
"epoch": 0.5023547880690737,
"grad_norm": 13.166592462596656,
"learning_rate": 1.749666572845e-07,
"logits/chosen": 5518.23046875,
"logits/rejected": 4573.6103515625,
"logps/chosen": -308.5555419921875,
"logps/rejected": -345.7183837890625,
"loss": 1.3288,
"rewards/accuracies": 0.7083333730697632,
"rewards/chosen": -97.77859497070312,
"rewards/margins": 33.44771194458008,
"rewards/rejected": -131.226318359375,
"step": 640
},
{
"epoch": 0.5102040816326531,
"grad_norm": 13.697385802349345,
"learning_rate": 1.7090311155692034e-07,
"logits/chosen": 4693.83349609375,
"logits/rejected": 3723.333984375,
"logps/chosen": -293.64495849609375,
"logps/rejected": -310.81787109375,
"loss": 1.3321,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -97.3224105834961,
"rewards/margins": 44.76685333251953,
"rewards/rejected": -142.08926391601562,
"step": 650
},
{
"epoch": 0.5180533751962323,
"grad_norm": 25.46823282035701,
"learning_rate": 1.6682385806957695e-07,
"logits/chosen": 5368.3603515625,
"logits/rejected": 4091.384765625,
"logps/chosen": -303.13446044921875,
"logps/rejected": -332.54107666015625,
"loss": 1.3345,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -85.75367736816406,
"rewards/margins": 58.79213333129883,
"rewards/rejected": -144.5458221435547,
"step": 660
},
{
"epoch": 0.5259026687598116,
"grad_norm": 14.259535892393998,
"learning_rate": 1.627319622002544e-07,
"logits/chosen": 5289.35595703125,
"logits/rejected": 4184.24462890625,
"logps/chosen": -298.56573486328125,
"logps/rejected": -323.38275146484375,
"loss": 1.3366,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -91.83341979980469,
"rewards/margins": 42.687252044677734,
"rewards/rejected": -134.52066040039062,
"step": 670
},
{
"epoch": 0.533751962323391,
"grad_norm": 13.615931001877374,
"learning_rate": 1.5863049882692565e-07,
"logits/chosen": 5200.74267578125,
"logits/rejected": 4202.2919921875,
"logps/chosen": -306.4901123046875,
"logps/rejected": -322.50390625,
"loss": 1.3358,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -92.8583984375,
"rewards/margins": 50.42744445800781,
"rewards/rejected": -143.28582763671875,
"step": 680
},
{
"epoch": 0.5416012558869702,
"grad_norm": 18.445984962657718,
"learning_rate": 1.5452255001711795e-07,
"logits/chosen": 5152.58740234375,
"logits/rejected": 4540.1044921875,
"logps/chosen": -297.11669921875,
"logps/rejected": -357.1095275878906,
"loss": 1.3182,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -87.97896575927734,
"rewards/margins": 52.92973709106445,
"rewards/rejected": -140.90872192382812,
"step": 690
},
{
"epoch": 0.5494505494505495,
"grad_norm": 22.733099236513056,
"learning_rate": 1.50411202711876e-07,
"logits/chosen": 4857.0771484375,
"logits/rejected": 4486.8955078125,
"logps/chosen": -285.587646484375,
"logps/rejected": -335.50311279296875,
"loss": 1.3496,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -96.36004638671875,
"rewards/margins": 52.195396423339844,
"rewards/rejected": -148.55545043945312,
"step": 700
},
{
"epoch": 0.5572998430141287,
"grad_norm": 13.753296852144786,
"learning_rate": 1.4629954640606275e-07,
"logits/chosen": 4837.16650390625,
"logits/rejected": 4699.06884765625,
"logps/chosen": -282.49725341796875,
"logps/rejected": -339.46014404296875,
"loss": 1.3241,
"rewards/accuracies": 0.75,
"rewards/chosen": -88.5594253540039,
"rewards/margins": 48.25908660888672,
"rewards/rejected": -136.81851196289062,
"step": 710
},
{
"epoch": 0.565149136577708,
"grad_norm": 23.74236196605101,
"learning_rate": 1.4219067082674153e-07,
"logits/chosen": 5229.64599609375,
"logits/rejected": 4363.564453125,
"logps/chosen": -307.42694091796875,
"logps/rejected": -333.94476318359375,
"loss": 1.3345,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -88.42107391357422,
"rewards/margins": 45.73460006713867,
"rewards/rejected": -134.1556854248047,
"step": 720
},
{
"epoch": 0.5729984301412873,
"grad_norm": 14.581185643023554,
"learning_rate": 1.3808766361138294e-07,
"logits/chosen": 5660.58642578125,
"logits/rejected": 4180.7021484375,
"logps/chosen": -318.3136901855469,
"logps/rejected": -321.05255126953125,
"loss": 1.3196,
"rewards/accuracies": 0.75,
"rewards/chosen": -93.13568115234375,
"rewards/margins": 52.86260223388672,
"rewards/rejected": -145.99827575683594,
"step": 730
},
{
"epoch": 0.5808477237048666,
"grad_norm": 20.680345594369143,
"learning_rate": 1.3399360798764307e-07,
"logits/chosen": 4665.7041015625,
"logits/rejected": 3810.73828125,
"logps/chosen": -277.14202880859375,
"logps/rejected": -301.589111328125,
"loss": 1.3096,
"rewards/accuracies": 0.7916666269302368,
"rewards/chosen": -84.7000732421875,
"rewards/margins": 54.24741744995117,
"rewards/rejected": -138.94747924804688,
"step": 740
},
{
"epoch": 0.5886970172684458,
"grad_norm": 21.101874083978554,
"learning_rate": 1.2991158045645435e-07,
"logits/chosen": 5032.05322265625,
"logits/rejected": 4430.30322265625,
"logps/chosen": -268.8847961425781,
"logps/rejected": -301.6675109863281,
"loss": 1.3294,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -88.50262451171875,
"rewards/margins": 46.05238723754883,
"rewards/rejected": -134.55502319335938,
"step": 750
},
{
"epoch": 0.5965463108320251,
"grad_norm": 16.80574616569174,
"learning_rate": 1.2584464848017214e-07,
"logits/chosen": 5412.44775390625,
"logits/rejected": 4117.3740234375,
"logps/chosen": -287.7677001953125,
"logps/rejected": -306.7326354980469,
"loss": 1.3314,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -84.49854278564453,
"rewards/margins": 47.73679733276367,
"rewards/rejected": -132.23533630371094,
"step": 760
},
{
"epoch": 0.6043956043956044,
"grad_norm": 26.26369158658654,
"learning_rate": 1.2179586817751263e-07,
"logits/chosen": 5509.0380859375,
"logits/rejected": 4625.6474609375,
"logps/chosen": -320.41656494140625,
"logps/rejected": -366.6982421875,
"loss": 1.3377,
"rewards/accuracies": 0.7166667580604553,
"rewards/chosen": -90.7030258178711,
"rewards/margins": 52.11555099487305,
"rewards/rejected": -142.81857299804688,
"step": 770
},
{
"epoch": 0.6122448979591837,
"grad_norm": 16.19764581259732,
"learning_rate": 1.177682820270159e-07,
"logits/chosen": 5451.91162109375,
"logits/rejected": 5010.38232421875,
"logps/chosen": -305.00799560546875,
"logps/rejected": -377.30596923828125,
"loss": 1.3328,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -88.89636993408203,
"rewards/margins": 51.006568908691406,
"rewards/rejected": -139.9029541015625,
"step": 780
},
{
"epoch": 0.6200941915227629,
"grad_norm": 16.344358737664763,
"learning_rate": 1.13764916580758e-07,
"logits/chosen": 4994.599609375,
"logits/rejected": 4273.96875,
"logps/chosen": -293.0992126464844,
"logps/rejected": -346.97467041015625,
"loss": 1.3234,
"rewards/accuracies": 0.7916667461395264,
"rewards/chosen": -94.93836975097656,
"rewards/margins": 63.40740966796875,
"rewards/rejected": -158.3457794189453,
"step": 790
},
{
"epoch": 0.6279434850863422,
"grad_norm": 18.968431335137478,
"learning_rate": 1.0978878019003183e-07,
"logits/chosen": 5044.96875,
"logits/rejected": 4262.8154296875,
"logps/chosen": -310.9752502441406,
"logps/rejected": -353.9684143066406,
"loss": 1.3377,
"rewards/accuracies": 0.75,
"rewards/chosen": -94.22391510009766,
"rewards/margins": 47.09145736694336,
"rewards/rejected": -141.31536865234375,
"step": 800
},
{
"epoch": 0.6357927786499215,
"grad_norm": 13.606253137803622,
"learning_rate": 1.0584286074470446e-07,
"logits/chosen": 4622.6064453125,
"logits/rejected": 3968.47412109375,
"logps/chosen": -293.10089111328125,
"logps/rejected": -363.0287170410156,
"loss": 1.3322,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -90.6499252319336,
"rewards/margins": 51.11803436279297,
"rewards/rejected": -141.7679443359375,
"step": 810
},
{
"epoch": 0.6436420722135008,
"grad_norm": 38.62289956267384,
"learning_rate": 1.0193012342795095e-07,
"logits/chosen": 5154.56494140625,
"logits/rejected": 3781.949951171875,
"logps/chosen": -306.31817626953125,
"logps/rejected": -307.9537048339844,
"loss": 1.32,
"rewards/accuracies": 0.8333333730697632,
"rewards/chosen": -85.11204528808594,
"rewards/margins": 54.1730842590332,
"rewards/rejected": -139.28512573242188,
"step": 820
},
{
"epoch": 0.6514913657770801,
"grad_norm": 22.79781152184824,
"learning_rate": 9.805350848805078e-08,
"logits/chosen": 5019.9873046875,
"logits/rejected": 4204.40673828125,
"logps/chosen": -296.72509765625,
"logps/rejected": -338.36236572265625,
"loss": 1.3248,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -92.53424072265625,
"rewards/margins": 53.35478973388672,
"rewards/rejected": -145.88902282714844,
"step": 830
},
{
"epoch": 0.6593406593406593,
"grad_norm": 14.988614689944379,
"learning_rate": 9.421592902892184e-08,
"logits/chosen": 5479.388671875,
"logits/rejected": 4344.3359375,
"logps/chosen": -319.1064147949219,
"logps/rejected": -352.5812683105469,
"loss": 1.3196,
"rewards/accuracies": 0.6833332777023315,
"rewards/chosen": -91.74089050292969,
"rewards/margins": 48.036460876464844,
"rewards/rejected": -139.77737426757812,
"step": 840
},
{
"epoch": 0.6671899529042387,
"grad_norm": 16.378454591394703,
"learning_rate": 9.042026882105215e-08,
"logits/chosen": 5068.8427734375,
"logits/rejected": 4380.408203125,
"logps/chosen": -304.6463928222656,
"logps/rejected": -351.93988037109375,
"loss": 1.311,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -89.17027282714844,
"rewards/margins": 62.253204345703125,
"rewards/rejected": -151.42349243164062,
"step": 850
},
{
"epoch": 0.6750392464678179,
"grad_norm": 33.867305610136256,
"learning_rate": 8.666938013447484e-08,
"logits/chosen": 5499.62353515625,
"logits/rejected": 4524.25390625,
"logps/chosen": -333.63714599609375,
"logps/rejected": -339.46429443359375,
"loss": 1.3436,
"rewards/accuracies": 0.7416666150093079,
"rewards/chosen": -85.83646392822266,
"rewards/margins": 50.95392990112305,
"rewards/rejected": -136.79037475585938,
"step": 860
},
{
"epoch": 0.6828885400313972,
"grad_norm": 16.295742425420926,
"learning_rate": 8.29660815954134e-08,
"logits/chosen": 5463.05712890625,
"logits/rejected": 4347.86669921875,
"logps/chosen": -314.76629638671875,
"logps/rejected": -340.78515625,
"loss": 1.3232,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -84.63117980957031,
"rewards/margins": 55.43559646606445,
"rewards/rejected": -140.0667724609375,
"step": 870
},
{
"epoch": 0.6907378335949764,
"grad_norm": 35.343044136950866,
"learning_rate": 7.931315606820979e-08,
"logits/chosen": 4798.94140625,
"logits/rejected": 4442.7099609375,
"logps/chosen": -299.4132080078125,
"logps/rejected": -370.7325744628906,
"loss": 1.3375,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -88.87822723388672,
"rewards/margins": 50.25794219970703,
"rewards/rejected": -139.13616943359375,
"step": 880
},
{
"epoch": 0.6985871271585558,
"grad_norm": 15.65569216217348,
"learning_rate": 7.571334856412503e-08,
"logits/chosen": 5700.92724609375,
"logits/rejected": 4396.40234375,
"logps/chosen": -342.78717041015625,
"logps/rejected": -354.91058349609375,
"loss": 1.337,
"rewards/accuracies": 0.8083333969116211,
"rewards/chosen": -85.16032409667969,
"rewards/margins": 57.98085403442383,
"rewards/rejected": -143.14117431640625,
"step": 890
},
{
"epoch": 0.706436420722135,
"grad_norm": 18.88682608723191,
"learning_rate": 7.216936417858549e-08,
"logits/chosen": 4812.2412109375,
"logits/rejected": 4236.50439453125,
"logps/chosen": -288.49560546875,
"logps/rejected": -342.0299987792969,
"loss": 1.3348,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -84.1703872680664,
"rewards/margins": 47.417232513427734,
"rewards/rejected": -131.58761596679688,
"step": 900
},
{
"epoch": 0.7142857142857143,
"grad_norm": 16.830087200212606,
"learning_rate": 6.868386605842379e-08,
"logits/chosen": 4705.6962890625,
"logits/rejected": 4288.7353515625,
"logps/chosen": -298.02349853515625,
"logps/rejected": -354.6422119140625,
"loss": 1.3302,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -89.75446319580078,
"rewards/margins": 56.94415283203125,
"rewards/rejected": -146.6986083984375,
"step": 910
},
{
"epoch": 0.7221350078492935,
"grad_norm": 15.005561893820257,
"learning_rate": 6.525947340064232e-08,
"logits/chosen": 5283.912109375,
"logits/rejected": 4178.82958984375,
"logps/chosen": -321.24029541015625,
"logps/rejected": -337.23944091796875,
"loss": 1.3283,
"rewards/accuracies": 0.7666667103767395,
"rewards/chosen": -88.7941665649414,
"rewards/margins": 50.259727478027344,
"rewards/rejected": -139.0539093017578,
"step": 920
},
{
"epoch": 0.7299843014128728,
"grad_norm": 15.702994402687256,
"learning_rate": 6.189875948420276e-08,
"logits/chosen": 5172.5234375,
"logits/rejected": 3729.58544921875,
"logps/chosen": -328.5713806152344,
"logps/rejected": -337.1180114746094,
"loss": 1.324,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -92.56468200683594,
"rewards/margins": 58.63731002807617,
"rewards/rejected": -151.20199584960938,
"step": 930
},
{
"epoch": 0.7378335949764521,
"grad_norm": 28.79822002326502,
"learning_rate": 5.86042497363215e-08,
"logits/chosen": 5023.8671875,
"logits/rejected": 4205.1982421875,
"logps/chosen": -307.396240234375,
"logps/rejected": -361.35791015625,
"loss": 1.3224,
"rewards/accuracies": 0.8083333969116211,
"rewards/chosen": -89.06114959716797,
"rewards/margins": 67.33191680908203,
"rewards/rejected": -156.39306640625,
"step": 940
},
{
"epoch": 0.7456828885400314,
"grad_norm": 29.49903030029792,
"learning_rate": 5.537841983472337e-08,
"logits/chosen": 5192.91015625,
"logits/rejected": 4614.15087890625,
"logps/chosen": -308.1656188964844,
"logps/rejected": -341.88336181640625,
"loss": 1.3437,
"rewards/accuracies": 0.7166666388511658,
"rewards/chosen": -90.37879943847656,
"rewards/margins": 37.416908264160156,
"rewards/rejected": -127.79571533203125,
"step": 950
},
{
"epoch": 0.7535321821036107,
"grad_norm": 19.260341973231338,
"learning_rate": 5.222369384727954e-08,
"logits/chosen": 5086.7275390625,
"logits/rejected": 4373.75390625,
"logps/chosen": -302.8155212402344,
"logps/rejected": -341.2423095703125,
"loss": 1.3244,
"rewards/accuracies": 0.783333420753479,
"rewards/chosen": -84.31675720214844,
"rewards/margins": 54.4622802734375,
"rewards/rejected": -138.77902221679688,
"step": 960
},
{
"epoch": 0.7613814756671899,
"grad_norm": 21.497254099738637,
"learning_rate": 4.914244241042832e-08,
"logits/chosen": 5195.4404296875,
"logits/rejected": 4174.501953125,
"logps/chosen": -328.4187316894531,
"logps/rejected": -357.6769714355469,
"loss": 1.3155,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -88.17876434326172,
"rewards/margins": 62.826393127441406,
"rewards/rejected": -151.00515747070312,
"step": 970
},
{
"epoch": 0.7692307692307693,
"grad_norm": 14.487995995435664,
"learning_rate": 4.613698094774715e-08,
"logits/chosen": 4949.87158203125,
"logits/rejected": 3807.67724609375,
"logps/chosen": -312.44573974609375,
"logps/rejected": -349.9386291503906,
"loss": 1.3152,
"rewards/accuracies": 0.8333333134651184,
"rewards/chosen": -83.61039733886719,
"rewards/margins": 75.85111236572266,
"rewards/rejected": -159.46151733398438,
"step": 980
},
{
"epoch": 0.7770800627943485,
"grad_norm": 19.108590316137075,
"learning_rate": 4.3209567930014746e-08,
"logits/chosen": 5185.36865234375,
"logits/rejected": 4195.95068359375,
"logps/chosen": -315.67938232421875,
"logps/rejected": -348.85284423828125,
"loss": 1.3174,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -89.78607177734375,
"rewards/margins": 64.9991683959961,
"rewards/rejected": -154.7852325439453,
"step": 990
},
{
"epoch": 0.7849293563579278,
"grad_norm": 22.072531979816144,
"learning_rate": 4.036240317807059e-08,
"logits/chosen": 4905.2353515625,
"logits/rejected": 3987.02978515625,
"logps/chosen": -303.3591003417969,
"logps/rejected": -321.552978515625,
"loss": 1.3349,
"rewards/accuracies": 0.75,
"rewards/chosen": -89.55894470214844,
"rewards/margins": 51.63554000854492,
"rewards/rejected": -141.1945037841797,
"step": 1000
},
{
"epoch": 0.792778649921507,
"grad_norm": 21.75070665962702,
"learning_rate": 3.759762620974716e-08,
"logits/chosen": 5060.6669921875,
"logits/rejected": 4109.7626953125,
"logps/chosen": -304.08740234375,
"logps/rejected": -337.9571838378906,
"loss": 1.334,
"rewards/accuracies": 0.7083333134651184,
"rewards/chosen": -89.3779296875,
"rewards/margins": 57.18464279174805,
"rewards/rejected": -146.56259155273438,
"step": 1010
},
{
"epoch": 0.8006279434850864,
"grad_norm": 17.10653047682008,
"learning_rate": 3.4917314632117423e-08,
"logits/chosen": 5245.35009765625,
"logits/rejected": 4436.5830078125,
"logps/chosen": -326.85821533203125,
"logps/rejected": -367.40740966796875,
"loss": 1.3302,
"rewards/accuracies": 0.8333333730697632,
"rewards/chosen": -78.67982482910156,
"rewards/margins": 62.575775146484375,
"rewards/rejected": -141.25559997558594,
"step": 1020
},
{
"epoch": 0.8084772370486656,
"grad_norm": 16.25681330236911,
"learning_rate": 3.232348258026552e-08,
"logits/chosen": 5550.1083984375,
"logits/rejected": 5013.5693359375,
"logps/chosen": -318.5128479003906,
"logps/rejected": -371.39556884765625,
"loss": 1.329,
"rewards/accuracies": 0.7750000357627869,
"rewards/chosen": -87.0693130493164,
"rewards/margins": 51.2072868347168,
"rewards/rejected": -138.276611328125,
"step": 1030
},
{
"epoch": 0.8163265306122449,
"grad_norm": 15.78931624178539,
"learning_rate": 2.9818079203753654e-08,
"logits/chosen": 4891.5419921875,
"logits/rejected": 4282.7998046875,
"logps/chosen": -314.73773193359375,
"logps/rejected": -368.1146240234375,
"loss": 1.3238,
"rewards/accuracies": 0.7250000834465027,
"rewards/chosen": -94.64768981933594,
"rewards/margins": 64.27806854248047,
"rewards/rejected": -158.92575073242188,
"step": 1040
},
{
"epoch": 0.8241758241758241,
"grad_norm": 16.54458435339151,
"learning_rate": 2.740298720192245e-08,
"logits/chosen": 4927.3154296875,
"logits/rejected": 4254.25341796875,
"logps/chosen": -328.06463623046875,
"logps/rejected": -370.9690856933594,
"loss": 1.3151,
"rewards/accuracies": 0.8083332777023315,
"rewards/chosen": -90.51560974121094,
"rewards/margins": 64.61587524414062,
"rewards/rejected": -155.13148498535156,
"step": 1050
},
{
"epoch": 0.8320251177394035,
"grad_norm": 26.179961673866252,
"learning_rate": 2.5080021409126655e-08,
"logits/chosen": 6100.56396484375,
"logits/rejected": 4631.39599609375,
"logps/chosen": -352.55450439453125,
"logps/rejected": -360.7041015625,
"loss": 1.3358,
"rewards/accuracies": 0.7333332896232605,
"rewards/chosen": -91.52915954589844,
"rewards/margins": 49.11104202270508,
"rewards/rejected": -140.6402130126953,
"step": 1060
},
{
"epoch": 0.8398744113029827,
"grad_norm": 17.503149002663815,
"learning_rate": 2.2850927430967287e-08,
"logits/chosen": 5088.08935546875,
"logits/rejected": 4477.36572265625,
"logps/chosen": -312.17938232421875,
"logps/rejected": -340.80975341796875,
"loss": 1.3326,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -86.90293884277344,
"rewards/margins": 60.76922607421875,
"rewards/rejected": -147.67214965820312,
"step": 1070
},
{
"epoch": 0.847723704866562,
"grad_norm": 16.608334704247707,
"learning_rate": 2.0717380332546768e-08,
"logits/chosen": 5636.8876953125,
"logits/rejected": 4137.8525390625,
"logps/chosen": -351.24298095703125,
"logps/rejected": -354.1505126953125,
"loss": 1.3203,
"rewards/accuracies": 0.8333333730697632,
"rewards/chosen": -84.64915466308594,
"rewards/margins": 68.51604461669922,
"rewards/rejected": -153.1652069091797,
"step": 1080
},
{
"epoch": 0.8555729984301413,
"grad_norm": 16.306285487869843,
"learning_rate": 1.8680983379731618e-08,
"logits/chosen": 5661.28173828125,
"logits/rejected": 4081.420654296875,
"logps/chosen": -317.64776611328125,
"logps/rejected": -356.91375732421875,
"loss": 1.3259,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -90.13359069824219,
"rewards/margins": 53.870460510253906,
"rewards/rejected": -144.0040283203125,
"step": 1090
},
{
"epoch": 0.8634222919937206,
"grad_norm": 17.07320149919412,
"learning_rate": 1.6743266834369856e-08,
"logits/chosen": 5442.111328125,
"logits/rejected": 4267.1455078125,
"logps/chosen": -330.0649108886719,
"logps/rejected": -343.8323669433594,
"loss": 1.3308,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -91.5087661743164,
"rewards/margins": 62.82781982421875,
"rewards/rejected": -154.3365936279297,
"step": 1100
},
{
"epoch": 0.8712715855572999,
"grad_norm": 13.747396863762589,
"learning_rate": 1.4905686804366853e-08,
"logits/chosen": 5771.70458984375,
"logits/rejected": 4442.76708984375,
"logps/chosen": -355.2906799316406,
"logps/rejected": -374.15447998046875,
"loss": 1.3248,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -90.69126892089844,
"rewards/margins": 61.19647216796875,
"rewards/rejected": -151.8877410888672,
"step": 1110
},
{
"epoch": 0.8791208791208791,
"grad_norm": 27.204997822783117,
"learning_rate": 1.3169624149485037e-08,
"logits/chosen": 5658.005859375,
"logits/rejected": 4368.58642578125,
"logps/chosen": -337.535888671875,
"logps/rejected": -371.7395324707031,
"loss": 1.3197,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -92.40483093261719,
"rewards/margins": 62.56023025512695,
"rewards/rejected": -154.96505737304688,
"step": 1120
},
{
"epoch": 0.8869701726844584,
"grad_norm": 20.723713223509073,
"learning_rate": 1.1536383443688968e-08,
"logits/chosen": 5141.8701171875,
"logits/rejected": 3853.640625,
"logps/chosen": -318.36944580078125,
"logps/rejected": -326.81610107421875,
"loss": 1.3163,
"rewards/accuracies": 0.7916666865348816,
"rewards/chosen": -83.58235168457031,
"rewards/margins": 63.00525665283203,
"rewards/rejected": -146.58761596679688,
"step": 1130
},
{
"epoch": 0.8948194662480377,
"grad_norm": 15.383586929186926,
"learning_rate": 1.0007191994816272e-08,
"logits/chosen": 5663.7626953125,
"logits/rejected": 4409.96728515625,
"logps/chosen": -343.25408935546875,
"logps/rejected": -359.54864501953125,
"loss": 1.3158,
"rewards/accuracies": 0.7166666388511658,
"rewards/chosen": -85.61146545410156,
"rewards/margins": 53.071754455566406,
"rewards/rejected": -138.68319702148438,
"step": 1140
},
{
"epoch": 0.902668759811617,
"grad_norm": 15.9052219677393,
"learning_rate": 8.583198922310031e-09,
"logits/chosen": 4978.74658203125,
"logits/rejected": 4447.34521484375,
"logps/chosen": -312.7223815917969,
"logps/rejected": -353.9768981933594,
"loss": 1.3155,
"rewards/accuracies": 0.6916667222976685,
"rewards/chosen": -85.73565673828125,
"rewards/margins": 48.706329345703125,
"rewards/rejected": -134.44198608398438,
"step": 1150
},
{
"epoch": 0.9105180533751962,
"grad_norm": 19.701859698836042,
"learning_rate": 7.2654742937067916e-09,
"logits/chosen": 5107.5390625,
"logits/rejected": 3909.550048828125,
"logps/chosen": -320.367431640625,
"logps/rejected": -338.51812744140625,
"loss": 1.3246,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -91.79454803466797,
"rewards/margins": 50.79388427734375,
"rewards/rejected": -142.5884246826172,
"step": 1160
},
{
"epoch": 0.9183673469387755,
"grad_norm": 32.20474236778808,
"learning_rate": 6.0550083205280325e-09,
"logits/chosen": 5575.20849609375,
"logits/rejected": 5152.0107421875,
"logps/chosen": -332.0797424316406,
"logps/rejected": -392.1387023925781,
"loss": 1.3201,
"rewards/accuracies": 0.73333340883255,
"rewards/chosen": -89.12452697753906,
"rewards/margins": 56.22435760498047,
"rewards/rejected": -145.34889221191406,
"step": 1170
},
{
"epoch": 0.9262166405023547,
"grad_norm": 19.875692057488717,
"learning_rate": 4.952710614180567e-09,
"logits/chosen": 4974.9326171875,
"logits/rejected": 4310.21337890625,
"logps/chosen": -328.6920166015625,
"logps/rejected": -362.4015197753906,
"loss": 1.3248,
"rewards/accuracies": 0.7833333611488342,
"rewards/chosen": -88.78321075439453,
"rewards/margins": 56.8275032043457,
"rewards/rejected": -145.61070251464844,
"step": 1180
},
{
"epoch": 0.9340659340659341,
"grad_norm": 18.2157711653637,
"learning_rate": 3.959409502423744e-09,
"logits/chosen": 5420.66015625,
"logits/rejected": 4498.2080078125,
"logps/chosen": -345.4786071777344,
"logps/rejected": -369.42694091796875,
"loss": 1.3206,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -96.6148452758789,
"rewards/margins": 55.66218185424805,
"rewards/rejected": -152.2770233154297,
"step": 1190
},
{
"epoch": 0.9419152276295133,
"grad_norm": 19.275111817920287,
"learning_rate": 3.075851406917995e-09,
"logits/chosen": 5607.0615234375,
"logits/rejected": 4485.181640625,
"logps/chosen": -340.6480407714844,
"logps/rejected": -372.07196044921875,
"loss": 1.3207,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -85.0034408569336,
"rewards/margins": 68.38439178466797,
"rewards/rejected": -153.38784790039062,
"step": 1200
},
{
"epoch": 0.9497645211930926,
"grad_norm": 47.451083320046365,
"learning_rate": 2.3027002823221064e-09,
"logits/chosen": 5242.3955078125,
"logits/rejected": 4309.052734375,
"logps/chosen": -328.3695983886719,
"logps/rejected": -372.03692626953125,
"loss": 1.3288,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -88.16230773925781,
"rewards/margins": 65.71019744873047,
"rewards/rejected": -153.87249755859375,
"step": 1210
},
{
"epoch": 0.957613814756672,
"grad_norm": 17.39982846549713,
"learning_rate": 1.6405371173608073e-09,
"logits/chosen": 6537.92041015625,
"logits/rejected": 4597.3408203125,
"logps/chosen": -389.57415771484375,
"logps/rejected": -396.27960205078125,
"loss": 1.3285,
"rewards/accuracies": 0.7500000596046448,
"rewards/chosen": -92.74442291259766,
"rewards/margins": 58.356590270996094,
"rewards/rejected": -151.10101318359375,
"step": 1220
},
{
"epoch": 0.9654631083202512,
"grad_norm": 17.667026886080837,
"learning_rate": 1.0898594982375387e-09,
"logits/chosen": 5799.66064453125,
"logits/rejected": 4413.10302734375,
"logps/chosen": -349.25537109375,
"logps/rejected": -362.83990478515625,
"loss": 1.3307,
"rewards/accuracies": 0.7416666746139526,
"rewards/chosen": -88.6881332397461,
"rewards/margins": 61.15995407104492,
"rewards/rejected": -149.8480987548828,
"step": 1230
},
{
"epoch": 0.9733124018838305,
"grad_norm": 13.882974741420169,
"learning_rate": 6.510812347205185e-10,
"logits/chosen": 4966.72021484375,
"logits/rejected": 4371.02392578125,
"logps/chosen": -324.99078369140625,
"logps/rejected": -384.681640625,
"loss": 1.324,
"rewards/accuracies": 0.7083333134651184,
"rewards/chosen": -93.39674377441406,
"rewards/margins": 56.08783721923828,
"rewards/rejected": -149.48458862304688,
"step": 1240
},
{
"epoch": 0.9811616954474097,
"grad_norm": 15.154457626536647,
"learning_rate": 3.245320491830683e-10,
"logits/chosen": 5032.3193359375,
"logits/rejected": 4055.299560546875,
"logps/chosen": -315.40277099609375,
"logps/rejected": -327.2320251464844,
"loss": 1.3286,
"rewards/accuracies": 0.7333333492279053,
"rewards/chosen": -85.07127380371094,
"rewards/margins": 52.71143341064453,
"rewards/rejected": -137.78269958496094,
"step": 1250
},
{
"epoch": 0.989010989010989,
"grad_norm": 15.979946674969982,
"learning_rate": 1.1045732883197545e-10,
"logits/chosen": 4809.5751953125,
"logits/rejected": 4305.59765625,
"logps/chosen": -319.3553771972656,
"logps/rejected": -360.33917236328125,
"loss": 1.3166,
"rewards/accuracies": 0.7583333253860474,
"rewards/chosen": -87.20452117919922,
"rewards/margins": 56.80842208862305,
"rewards/rejected": -144.01295471191406,
"step": 1260
},
{
"epoch": 0.9968602825745683,
"grad_norm": 27.24614835648412,
"learning_rate": 9.017941309813482e-12,
"logits/chosen": 4971.7294921875,
"logits/rejected": 3906.59228515625,
"logps/chosen": -316.85064697265625,
"logps/rejected": -327.2884521484375,
"loss": 1.3334,
"rewards/accuracies": 0.7666666507720947,
"rewards/chosen": -92.36187744140625,
"rewards/margins": 48.6181755065918,
"rewards/rejected": -140.9800567626953,
"step": 1270
},
{
"epoch": 1.0,
"step": 1274,
"total_flos": 0.0,
"train_loss": 1.3597402943172485,
"train_runtime": 13015.2109,
"train_samples_per_second": 4.697,
"train_steps_per_second": 0.098
}
],
"logging_steps": 10,
"max_steps": 1274,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 2000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}