IL_SquareAll-7b-sft-full / trainer_state.json
TTTXXX01's picture
Model save
10db243 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 1274,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0007849293563579278,
"grad_norm": 7.21583952854038,
"learning_rate": 3.90625e-09,
"logits/chosen": 5914.52099609375,
"logits/rejected": 2785.021484375,
"logps/chosen": -212.45889282226562,
"logps/rejected": -98.59669494628906,
"loss": 1.3611,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.007849293563579277,
"grad_norm": 7.250440323853339,
"learning_rate": 3.9062499999999997e-08,
"logits/chosen": 4974.06640625,
"logits/rejected": 4328.462890625,
"logps/chosen": -204.18780517578125,
"logps/rejected": -179.7210693359375,
"loss": 1.3611,
"rewards/accuracies": 0.5462963581085205,
"rewards/chosen": 0.09606336802244186,
"rewards/margins": 0.11150993406772614,
"rewards/rejected": -0.015446568839251995,
"step": 10
},
{
"epoch": 0.015698587127158554,
"grad_norm": 7.101196143379034,
"learning_rate": 7.812499999999999e-08,
"logits/chosen": 6083.6796875,
"logits/rejected": 4833.9912109375,
"logps/chosen": -217.20980834960938,
"logps/rejected": -196.7445831298828,
"loss": 1.361,
"rewards/accuracies": 0.5583332777023315,
"rewards/chosen": 0.001381927402690053,
"rewards/margins": 0.03239595144987106,
"rewards/rejected": -0.031014028936624527,
"step": 20
},
{
"epoch": 0.023547880690737835,
"grad_norm": 6.6022522261600285,
"learning_rate": 1.1718749999999999e-07,
"logits/chosen": 6079.79736328125,
"logits/rejected": 5101.44482421875,
"logps/chosen": -250.5417938232422,
"logps/rejected": -209.3507080078125,
"loss": 1.3611,
"rewards/accuracies": 0.6083333492279053,
"rewards/chosen": 0.03053979016840458,
"rewards/margins": 0.08365341275930405,
"rewards/rejected": -0.05311363935470581,
"step": 30
},
{
"epoch": 0.03139717425431711,
"grad_norm": 6.651029235705796,
"learning_rate": 1.5624999999999999e-07,
"logits/chosen": 5290.8310546875,
"logits/rejected": 4330.50439453125,
"logps/chosen": -212.19906616210938,
"logps/rejected": -181.70709228515625,
"loss": 1.3605,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -0.11981513351202011,
"rewards/margins": -0.09974738210439682,
"rewards/rejected": -0.020067747682332993,
"step": 40
},
{
"epoch": 0.03924646781789639,
"grad_norm": 8.194672586151817,
"learning_rate": 1.9531249999999998e-07,
"logits/chosen": 6344.0693359375,
"logits/rejected": 4979.76123046875,
"logps/chosen": -266.24810791015625,
"logps/rejected": -207.02828979492188,
"loss": 1.3583,
"rewards/accuracies": 0.3749999701976776,
"rewards/chosen": -0.6214836239814758,
"rewards/margins": -0.29282474517822266,
"rewards/rejected": -0.3286588788032532,
"step": 50
},
{
"epoch": 0.04709576138147567,
"grad_norm": 11.446322704538849,
"learning_rate": 2.3437499999999998e-07,
"logits/chosen": 5300.9951171875,
"logits/rejected": 4405.18017578125,
"logps/chosen": -216.7227325439453,
"logps/rejected": -210.4974822998047,
"loss": 1.3477,
"rewards/accuracies": 0.3499999940395355,
"rewards/chosen": -2.7187533378601074,
"rewards/margins": -1.0213677883148193,
"rewards/rejected": -1.6973854303359985,
"step": 60
},
{
"epoch": 0.054945054945054944,
"grad_norm": 19.538858672981345,
"learning_rate": 2.734375e-07,
"logits/chosen": 4725.4267578125,
"logits/rejected": 4486.5966796875,
"logps/chosen": -191.88389587402344,
"logps/rejected": -188.22422790527344,
"loss": 1.2939,
"rewards/accuracies": 0.38333338499069214,
"rewards/chosen": -13.402986526489258,
"rewards/margins": -1.9104820489883423,
"rewards/rejected": -11.492505073547363,
"step": 70
},
{
"epoch": 0.06279434850863422,
"grad_norm": 15.091021164111524,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 4773.61181640625,
"logits/rejected": 4373.45947265625,
"logps/chosen": -246.6728973388672,
"logps/rejected": -227.4425048828125,
"loss": 1.1407,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -50.10432815551758,
"rewards/margins": -4.2523627281188965,
"rewards/rejected": -45.851966857910156,
"step": 80
},
{
"epoch": 0.0706436420722135,
"grad_norm": 12.74258111169898,
"learning_rate": 3.5156249999999997e-07,
"logits/chosen": 4061.00244140625,
"logits/rejected": 3487.479248046875,
"logps/chosen": -445.48590087890625,
"logps/rejected": -380.95623779296875,
"loss": 0.9196,
"rewards/accuracies": 0.3583333492279053,
"rewards/chosen": -226.60366821289062,
"rewards/margins": -30.091236114501953,
"rewards/rejected": -196.5124053955078,
"step": 90
},
{
"epoch": 0.07849293563579278,
"grad_norm": 0.6594127545492917,
"learning_rate": 3.9062499999999997e-07,
"logits/chosen": 1796.354248046875,
"logits/rejected": 1736.423583984375,
"logps/chosen": -1689.5208740234375,
"logps/rejected": -1636.62158203125,
"loss": 0.7246,
"rewards/accuracies": 0.4583333432674408,
"rewards/chosen": -1479.2900390625,
"rewards/margins": -42.110374450683594,
"rewards/rejected": -1437.179931640625,
"step": 100
},
{
"epoch": 0.08634222919937205,
"grad_norm": 0.07200474368710162,
"learning_rate": 4.2968749999999996e-07,
"logits/chosen": 992.4479370117188,
"logits/rejected": 837.40087890625,
"logps/chosen": -3747.5078125,
"logps/rejected": -3181.522216796875,
"loss": 0.697,
"rewards/accuracies": 0.4583333432674408,
"rewards/chosen": -3564.392578125,
"rewards/margins": -561.8626098632812,
"rewards/rejected": -3002.5302734375,
"step": 110
},
{
"epoch": 0.09419152276295134,
"grad_norm": 0.017698020360709873,
"learning_rate": 4.6874999999999996e-07,
"logits/chosen": 864.0091552734375,
"logits/rejected": 803.8848876953125,
"logps/chosen": -4694.90478515625,
"logps/rejected": -4021.72216796875,
"loss": 0.695,
"rewards/accuracies": 0.4166666865348816,
"rewards/chosen": -4492.0673828125,
"rewards/margins": -668.5100708007812,
"rewards/rejected": -3823.557373046875,
"step": 120
},
{
"epoch": 0.10204081632653061,
"grad_norm": 0.015776874764385673,
"learning_rate": 4.999962424962166e-07,
"logits/chosen": 675.6439819335938,
"logits/rejected": 709.3082275390625,
"logps/chosen": -5338.3056640625,
"logps/rejected": -4936.4189453125,
"loss": 0.6948,
"rewards/accuracies": 0.46666663885116577,
"rewards/chosen": -5128.4970703125,
"rewards/margins": -389.72308349609375,
"rewards/rejected": -4738.7744140625,
"step": 130
},
{
"epoch": 0.10989010989010989,
"grad_norm": 0.01684332265893427,
"learning_rate": 4.998647417232375e-07,
"logits/chosen": 550.5469970703125,
"logits/rejected": 620.0813598632812,
"logps/chosen": -5882.72998046875,
"logps/rejected": -5161.9072265625,
"loss": 0.6946,
"rewards/accuracies": 0.4333333373069763,
"rewards/chosen": -5697.06298828125,
"rewards/margins": -712.7469482421875,
"rewards/rejected": -4984.3154296875,
"step": 140
},
{
"epoch": 0.11773940345368916,
"grad_norm": 0.038418008430490494,
"learning_rate": 4.995454786965036e-07,
"logits/chosen": 543.4015502929688,
"logits/rejected": 561.3269653320312,
"logps/chosen": -6591.6357421875,
"logps/rejected": -5493.3583984375,
"loss": 0.6946,
"rewards/accuracies": 0.4166666865348816,
"rewards/chosen": -6389.9521484375,
"rewards/margins": -1068.891357421875,
"rewards/rejected": -5321.06103515625,
"step": 150
},
{
"epoch": 0.12558869701726844,
"grad_norm": 0.006107357900924098,
"learning_rate": 4.990386933279972e-07,
"logits/chosen": 570.4808349609375,
"logits/rejected": 570.5257568359375,
"logps/chosen": -6998.73583984375,
"logps/rejected": -6497.4736328125,
"loss": 0.6945,
"rewards/accuracies": 0.44166669249534607,
"rewards/chosen": -6801.20703125,
"rewards/margins": -500.7915954589844,
"rewards/rejected": -6300.41552734375,
"step": 160
},
{
"epoch": 0.13343799058084774,
"grad_norm": 0.017235831589250027,
"learning_rate": 4.983447664444096e-07,
"logits/chosen": 655.1981201171875,
"logits/rejected": 689.1236572265625,
"logps/chosen": -7807.98291015625,
"logps/rejected": -7047.0390625,
"loss": 0.6945,
"rewards/accuracies": 0.40833336114883423,
"rewards/chosen": -7598.36181640625,
"rewards/margins": -747.6442260742188,
"rewards/rejected": -6850.71728515625,
"step": 170
},
{
"epoch": 0.141287284144427,
"grad_norm": 0.006581257523228928,
"learning_rate": 4.97464219500968e-07,
"logits/chosen": 607.44287109375,
"logits/rejected": 628.7760009765625,
"logps/chosen": -7429.20068359375,
"logps/rejected": -6579.3798828125,
"loss": 0.6945,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": -7240.6728515625,
"rewards/margins": -839.4552612304688,
"rewards/rejected": -6401.21826171875,
"step": 180
},
{
"epoch": 0.14913657770800628,
"grad_norm": 0.004199166098585641,
"learning_rate": 4.963977141895843e-07,
"logits/chosen": 503.93402099609375,
"logits/rejected": 599.97412109375,
"logps/chosen": -8052.12255859375,
"logps/rejected": -7054.7041015625,
"loss": 0.6945,
"rewards/accuracies": 0.38333335518836975,
"rewards/chosen": -7848.3642578125,
"rewards/margins": -984.8982543945312,
"rewards/rejected": -6863.4658203125,
"step": 190
},
{
"epoch": 0.15698587127158556,
"grad_norm": 0.004354813694410577,
"learning_rate": 4.951460519416227e-07,
"logits/chosen": 621.1477661132812,
"logits/rejected": 538.1853637695312,
"logps/chosen": -8098.0732421875,
"logps/rejected": -7754.7119140625,
"loss": 0.6945,
"rewards/accuracies": 0.46666669845581055,
"rewards/chosen": -7915.5283203125,
"rewards/margins": -357.48858642578125,
"rewards/rejected": -7558.0400390625,
"step": 200
},
{
"epoch": 0.16483516483516483,
"grad_norm": 0.009380186214322453,
"learning_rate": 4.937101733256606e-07,
"logits/chosen": 325.4959716796875,
"logits/rejected": 409.53607177734375,
"logps/chosen": -7732.62353515625,
"logps/rejected": -7081.75634765625,
"loss": 0.6945,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": -7578.7099609375,
"rewards/margins": -654.1348266601562,
"rewards/rejected": -6924.57568359375,
"step": 210
},
{
"epoch": 0.1726844583987441,
"grad_norm": 0.0026320208691028484,
"learning_rate": 4.920911573406924e-07,
"logits/chosen": 467.2705078125,
"logits/rejected": 420.5731506347656,
"logps/chosen": -9338.3125,
"logps/rejected": -8117.6083984375,
"loss": 0.6945,
"rewards/accuracies": 0.40833336114883423,
"rewards/chosen": -9141.974609375,
"rewards/margins": -1188.3638916015625,
"rewards/rejected": -7953.61181640625,
"step": 220
},
{
"epoch": 0.18053375196232338,
"grad_norm": 0.0015911643887604168,
"learning_rate": 4.902902206053098e-07,
"logits/chosen": 487.3170471191406,
"logits/rejected": 419.96014404296875,
"logps/chosen": -8749.359375,
"logps/rejected": -8256.490234375,
"loss": 0.6945,
"rewards/accuracies": 0.5583333373069763,
"rewards/chosen": -8562.0205078125,
"rewards/margins": -485.2974548339844,
"rewards/rejected": -8076.7236328125,
"step": 230
},
{
"epoch": 0.18838304552590268,
"grad_norm": 0.0014238534648916966,
"learning_rate": 4.883087164434672e-07,
"logits/chosen": 599.5636596679688,
"logits/rejected": 433.95379638671875,
"logps/chosen": -7963.13671875,
"logps/rejected": -6745.1474609375,
"loss": 0.6945,
"rewards/accuracies": 0.4583333432674408,
"rewards/chosen": -7795.5078125,
"rewards/margins": -1205.0496826171875,
"rewards/rejected": -6590.45703125,
"step": 240
},
{
"epoch": 0.19623233908948196,
"grad_norm": 0.0011220905101507928,
"learning_rate": 4.861481338675183e-07,
"logits/chosen": 429.65631103515625,
"logits/rejected": 408.43914794921875,
"logps/chosen": -9243.3759765625,
"logps/rejected": -8726.2060546875,
"loss": 0.6945,
"rewards/accuracies": 0.5083333253860474,
"rewards/chosen": -9076.232421875,
"rewards/margins": -534.5325317382812,
"rewards/rejected": -8541.7001953125,
"step": 250
},
{
"epoch": 0.20408163265306123,
"grad_norm": 0.0003814804492906571,
"learning_rate": 4.838100964592904e-07,
"logits/chosen": 448.19342041015625,
"logits/rejected": 390.24542236328125,
"logps/chosen": -9954.365234375,
"logps/rejected": -8276.337890625,
"loss": 0.6945,
"rewards/accuracies": 0.4166666865348816,
"rewards/chosen": -9754.646484375,
"rewards/margins": -1644.5335693359375,
"rewards/rejected": -8110.11328125,
"step": 260
},
{
"epoch": 0.2119309262166405,
"grad_norm": 0.00021334165898926726,
"learning_rate": 4.812963611500339e-07,
"logits/chosen": 729.0921630859375,
"logits/rejected": 629.4249877929688,
"logps/chosen": -9477.98046875,
"logps/rejected": -9332.41015625,
"loss": 0.6945,
"rewards/accuracies": 0.5083333253860474,
"rewards/chosen": -9281.224609375,
"rewards/margins": -137.44529724121094,
"rewards/rejected": -9143.779296875,
"step": 270
},
{
"epoch": 0.21978021978021978,
"grad_norm": 0.001957333999343426,
"learning_rate": 4.786088169001671e-07,
"logits/chosen": 485.05133056640625,
"logits/rejected": 364.7899475097656,
"logps/chosen": -8338.849609375,
"logps/rejected": -7758.9248046875,
"loss": 0.6945,
"rewards/accuracies": 0.46666669845581055,
"rewards/chosen": -8175.6982421875,
"rewards/margins": -586.074951171875,
"rewards/rejected": -7589.62353515625,
"step": 280
},
{
"epoch": 0.22762951334379905,
"grad_norm": 0.0019067056234537022,
"learning_rate": 4.7574948327980567e-07,
"logits/chosen": 633.2568359375,
"logits/rejected": 568.8843994140625,
"logps/chosen": -11425.986328125,
"logps/rejected": -8694.7080078125,
"loss": 0.6945,
"rewards/accuracies": 0.3333333432674408,
"rewards/chosen": -11188.95703125,
"rewards/margins": -2676.765625,
"rewards/rejected": -8512.1923828125,
"step": 290
},
{
"epoch": 0.23547880690737832,
"grad_norm": 0.0007137610008725143,
"learning_rate": 4.727205089511466e-07,
"logits/chosen": 443.945068359375,
"logits/rejected": 464.84844970703125,
"logps/chosen": -8716.529296875,
"logps/rejected": -8945.1943359375,
"loss": 0.6944,
"rewards/accuracies": 0.533333420753479,
"rewards/chosen": -8549.056640625,
"rewards/margins": 225.9518585205078,
"rewards/rejected": -8775.009765625,
"step": 300
},
{
"epoch": 0.24332810047095763,
"grad_norm": 0.00242148784073435,
"learning_rate": 4.6952417005384247e-07,
"logits/chosen": 475.70001220703125,
"logits/rejected": 436.1145935058594,
"logps/chosen": -9641.552734375,
"logps/rejected": -8915.654296875,
"loss": 0.6945,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -9463.9716796875,
"rewards/margins": -723.7852783203125,
"rewards/rejected": -8740.1875,
"step": 310
},
{
"epoch": 0.25117739403453687,
"grad_norm": 0.001561325165725142,
"learning_rate": 4.661628684945851e-07,
"logits/chosen": 559.2214965820312,
"logits/rejected": 500.6468811035156,
"logps/chosen": -9699.0966796875,
"logps/rejected": -8847.6376953125,
"loss": 0.6945,
"rewards/accuracies": 0.44166669249534607,
"rewards/chosen": -9498.0400390625,
"rewards/margins": -852.41845703125,
"rewards/rejected": -8645.6220703125,
"step": 320
},
{
"epoch": 0.25902668759811615,
"grad_norm": 0.016139043709451042,
"learning_rate": 4.626391301421782e-07,
"logits/chosen": 609.8659057617188,
"logits/rejected": 474.7484436035156,
"logps/chosen": -9566.7265625,
"logps/rejected": -8918.560546875,
"loss": 0.6945,
"rewards/accuracies": 0.4416666626930237,
"rewards/chosen": -9372.2734375,
"rewards/margins": -631.0362548828125,
"rewards/rejected": -8741.2373046875,
"step": 330
},
{
"epoch": 0.2668759811616955,
"grad_norm": 0.0010302004756725745,
"learning_rate": 4.5895560292945996e-07,
"logits/chosen": 548.1551513671875,
"logits/rejected": 537.958984375,
"logps/chosen": -9928.501953125,
"logps/rejected": -10640.470703125,
"loss": 0.6945,
"rewards/accuracies": 0.5750000476837158,
"rewards/chosen": -9734.6435546875,
"rewards/margins": 685.4605712890625,
"rewards/rejected": -10420.1044921875,
"step": 340
},
{
"epoch": 0.27472527472527475,
"grad_norm": 0.006445284513914828,
"learning_rate": 4.5511505486349865e-07,
"logits/chosen": 604.7824096679688,
"logits/rejected": 522.2452392578125,
"logps/chosen": -10464.3125,
"logps/rejected": -9905.189453125,
"loss": 0.6944,
"rewards/accuracies": 0.5000000596046448,
"rewards/chosen": -10269.486328125,
"rewards/margins": -571.1192626953125,
"rewards/rejected": -9698.369140625,
"step": 350
},
{
"epoch": 0.282574568288854,
"grad_norm": 0.0012301299433899874,
"learning_rate": 4.5112037194555876e-07,
"logits/chosen": 376.46502685546875,
"logits/rejected": 502.958740234375,
"logps/chosen": -9755.0048828125,
"logps/rejected": -10040.54296875,
"loss": 0.6945,
"rewards/accuracies": 0.5333333611488342,
"rewards/chosen": -9577.0810546875,
"rewards/margins": 264.18084716796875,
"rewards/rejected": -9841.26171875,
"step": 360
},
{
"epoch": 0.2904238618524333,
"grad_norm": 0.00021146370226041996,
"learning_rate": 4.4697455600239863e-07,
"logits/chosen": 545.6077880859375,
"logits/rejected": 587.63916015625,
"logps/chosen": -9203.572265625,
"logps/rejected": -8875.9814453125,
"loss": 0.6944,
"rewards/accuracies": 0.49166664481163025,
"rewards/chosen": -9019.150390625,
"rewards/margins": -311.24102783203125,
"rewards/rejected": -8707.91015625,
"step": 370
},
{
"epoch": 0.29827315541601257,
"grad_norm": 0.0011735615277733223,
"learning_rate": 4.426807224305315e-07,
"logits/chosen": 516.1826171875,
"logits/rejected": 511.04052734375,
"logps/chosen": -10854.4208984375,
"logps/rejected": -9300.3759765625,
"loss": 0.6944,
"rewards/accuracies": 0.38333335518836975,
"rewards/chosen": -10626.080078125,
"rewards/margins": -1508.713623046875,
"rewards/rejected": -9117.3662109375,
"step": 380
},
{
"epoch": 0.30612244897959184,
"grad_norm": 0.0022171606075063687,
"learning_rate": 4.3824209785514326e-07,
"logits/chosen": 506.26513671875,
"logits/rejected": 493.7334899902344,
"logps/chosen": -11133.5224609375,
"logps/rejected": -8884.3017578125,
"loss": 0.6944,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -10918.0302734375,
"rewards/margins": -2213.581787109375,
"rewards/rejected": -8704.4482421875,
"step": 390
},
{
"epoch": 0.3139717425431711,
"grad_norm": 0.00023374097945210665,
"learning_rate": 4.3366201770542687e-07,
"logits/chosen": 508.15655517578125,
"logits/rejected": 496.728271484375,
"logps/chosen": -9687.2529296875,
"logps/rejected": -9643.234375,
"loss": 0.6945,
"rewards/accuracies": 0.46666663885116577,
"rewards/chosen": -9496.4658203125,
"rewards/margins": -41.97016143798828,
"rewards/rejected": -9454.4951171875,
"step": 400
},
{
"epoch": 0.3218210361067504,
"grad_norm": 0.001203764010050171,
"learning_rate": 4.2894392370815567e-07,
"logits/chosen": 618.2146606445312,
"logits/rejected": 611.4668579101562,
"logps/chosen": -10374.201171875,
"logps/rejected": -9531.54296875,
"loss": 0.6944,
"rewards/accuracies": 0.42500004172325134,
"rewards/chosen": -10161.7138671875,
"rewards/margins": -838.6236572265625,
"rewards/rejected": -9323.0908203125,
"step": 410
},
{
"epoch": 0.32967032967032966,
"grad_norm": 0.0008040915447573037,
"learning_rate": 4.2409136130137845e-07,
"logits/chosen": 474.4013671875,
"logits/rejected": 573.8902587890625,
"logps/chosen": -9800.27734375,
"logps/rejected": -8966.046875,
"loss": 0.6944,
"rewards/accuracies": 0.4416666626930237,
"rewards/chosen": -9596.248046875,
"rewards/margins": -811.4415283203125,
"rewards/rejected": -8784.8056640625,
"step": 420
},
{
"epoch": 0.33751962323390894,
"grad_norm": 0.0032741111209748735,
"learning_rate": 4.1910797697018017e-07,
"logits/chosen": 426.7384338378906,
"logits/rejected": 273.4219665527344,
"logps/chosen": -9522.728515625,
"logps/rejected": -8452.818359375,
"loss": 0.6944,
"rewards/accuracies": 0.38333335518836975,
"rewards/chosen": -9342.9833984375,
"rewards/margins": -1051.3973388671875,
"rewards/rejected": -8291.5859375,
"step": 430
},
{
"epoch": 0.3453689167974882,
"grad_norm": 0.0012517374589376031,
"learning_rate": 4.1399751550651084e-07,
"logits/chosen": 582.216796875,
"logits/rejected": 551.5579833984375,
"logps/chosen": -9716.650390625,
"logps/rejected": -9910.478515625,
"loss": 0.6945,
"rewards/accuracies": 0.45833340287208557,
"rewards/chosen": -9534.2568359375,
"rewards/margins": 184.53692626953125,
"rewards/rejected": -9718.794921875,
"step": 440
},
{
"epoch": 0.3532182103610675,
"grad_norm": 0.001267385614262214,
"learning_rate": 4.087638171951401e-07,
"logits/chosen": 542.8116455078125,
"logits/rejected": 322.68206787109375,
"logps/chosen": -11353.0361328125,
"logps/rejected": -8831.9453125,
"loss": 0.6945,
"rewards/accuracies": 0.3750000298023224,
"rewards/chosen": -11144.2685546875,
"rewards/margins": -2475.314697265625,
"rewards/rejected": -8668.953125,
"step": 450
},
{
"epoch": 0.36106750392464676,
"grad_norm": 0.000739065140338407,
"learning_rate": 4.034108149278543e-07,
"logits/chosen": 580.6771850585938,
"logits/rejected": 625.329833984375,
"logps/chosen": -12085.8525390625,
"logps/rejected": -9598.5498046875,
"loss": 0.6944,
"rewards/accuracies": 0.35833337903022766,
"rewards/chosen": -11835.419921875,
"rewards/margins": -2427.078369140625,
"rewards/rejected": -9408.341796875,
"step": 460
},
{
"epoch": 0.36891679748822603,
"grad_norm": 0.0004433808575629704,
"learning_rate": 3.979425312480629e-07,
"logits/chosen": 469.94207763671875,
"logits/rejected": 505.85650634765625,
"logps/chosen": -10658.087890625,
"logps/rejected": -9693.3017578125,
"loss": 0.6944,
"rewards/accuracies": 0.44166669249534607,
"rewards/chosen": -10445.822265625,
"rewards/margins": -955.0867309570312,
"rewards/rejected": -9490.736328125,
"step": 470
},
{
"epoch": 0.37676609105180536,
"grad_norm": 0.0025329886800627066,
"learning_rate": 3.923630753280357e-07,
"logits/chosen": 434.8932189941406,
"logits/rejected": 503.2549743652344,
"logps/chosen": -11348.947265625,
"logps/rejected": -9960.654296875,
"loss": 0.6945,
"rewards/accuracies": 0.4166666865348816,
"rewards/chosen": -11141.552734375,
"rewards/margins": -1355.29443359375,
"rewards/rejected": -9786.2578125,
"step": 480
},
{
"epoch": 0.38461538461538464,
"grad_norm": 0.0010205898557466677,
"learning_rate": 3.866766398810424e-07,
"logits/chosen": 308.83660888671875,
"logits/rejected": 506.587158203125,
"logps/chosen": -10585.296875,
"logps/rejected": -10477.8291015625,
"loss": 0.6944,
"rewards/accuracies": 0.5333333611488342,
"rewards/chosen": -10408.689453125,
"rewards/margins": -132.885009765625,
"rewards/rejected": -10275.802734375,
"step": 490
},
{
"epoch": 0.3924646781789639,
"grad_norm": 0.002583908861114826,
"learning_rate": 3.8088749801071496e-07,
"logits/chosen": 642.0178833007812,
"logits/rejected": 534.6312866210938,
"logps/chosen": -11370.619140625,
"logps/rejected": -9348.8134765625,
"loss": 0.6945,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -11148.666015625,
"rewards/margins": -2002.3675537109375,
"rewards/rejected": -9146.296875,
"step": 500
},
{
"epoch": 0.4003139717425432,
"grad_norm": 0.0005946839058806162,
"learning_rate": 3.75e-07,
"logits/chosen": 381.1919860839844,
"logits/rejected": 432.2389221191406,
"logps/chosen": -9434.353515625,
"logps/rejected": -8624.7890625,
"loss": 0.6945,
"rewards/accuracies": 0.4416666626930237,
"rewards/chosen": -9244.8701171875,
"rewards/margins": -786.4471435546875,
"rewards/rejected": -8458.4228515625,
"step": 510
},
{
"epoch": 0.40816326530612246,
"grad_norm": 0.0009481081985350053,
"learning_rate": 3.6901857004211443e-07,
"logits/chosen": 533.3642578125,
"logits/rejected": 495.35711669921875,
"logps/chosen": -9859.2314453125,
"logps/rejected": -9604.912109375,
"loss": 0.6945,
"rewards/accuracies": 0.5250000357627869,
"rewards/chosen": -9660.765625,
"rewards/margins": -251.6417999267578,
"rewards/rejected": -9409.125,
"step": 520
},
{
"epoch": 0.41601255886970173,
"grad_norm": 0.0021459710993465323,
"learning_rate": 3.6294770291596076e-07,
"logits/chosen": 534.8011474609375,
"logits/rejected": 582.2864379882812,
"logps/chosen": -11175.54296875,
"logps/rejected": -9463.5615234375,
"loss": 0.6944,
"rewards/accuracies": 0.38333332538604736,
"rewards/chosen": -10962.6201171875,
"rewards/margins": -1697.818115234375,
"rewards/rejected": -9264.802734375,
"step": 530
},
{
"epoch": 0.423861852433281,
"grad_norm": 0.002362326062720146,
"learning_rate": 3.5679196060850034e-07,
"logits/chosen": 563.3406372070312,
"logits/rejected": 534.1571044921875,
"logps/chosen": -10592.9794921875,
"logps/rejected": -9686.8466796875,
"loss": 0.6945,
"rewards/accuracies": 0.44166669249534607,
"rewards/chosen": -10382.162109375,
"rewards/margins": -883.5538330078125,
"rewards/rejected": -9498.607421875,
"step": 540
},
{
"epoch": 0.4317111459968603,
"grad_norm": 0.0006598179965538966,
"learning_rate": 3.505559688866229e-07,
"logits/chosen": 525.2164916992188,
"logits/rejected": 597.19384765625,
"logps/chosen": -10372.4970703125,
"logps/rejected": -10030.5712890625,
"loss": 0.6944,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -10157.755859375,
"rewards/margins": -352.24920654296875,
"rewards/rejected": -9805.5068359375,
"step": 550
},
{
"epoch": 0.43956043956043955,
"grad_norm": 0.0019021639748611691,
"learning_rate": 3.4424441382108826e-07,
"logits/chosen": 477.6744689941406,
"logits/rejected": 351.56585693359375,
"logps/chosen": -10582.34765625,
"logps/rejected": -10212.2646484375,
"loss": 0.6944,
"rewards/accuracies": 0.5166666507720947,
"rewards/chosen": -10379.072265625,
"rewards/margins": -360.01824951171875,
"rewards/rejected": -10019.0546875,
"step": 560
},
{
"epoch": 0.4474097331240188,
"grad_norm": 0.0020283240343375282,
"learning_rate": 3.378620382651523e-07,
"logits/chosen": 652.3689575195312,
"logits/rejected": 540.5253295898438,
"logps/chosen": -11030.5693359375,
"logps/rejected": -10400.912109375,
"loss": 0.6945,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -10787.609375,
"rewards/margins": -610.4671630859375,
"rewards/rejected": -10177.142578125,
"step": 570
},
{
"epoch": 0.4552590266875981,
"grad_norm": 0.0011743520654221532,
"learning_rate": 3.314136382905234e-07,
"logits/chosen": 415.26837158203125,
"logits/rejected": 544.0094604492188,
"logps/chosen": -11010.978515625,
"logps/rejected": -10206.412109375,
"loss": 0.6944,
"rewards/accuracies": 0.42500004172325134,
"rewards/chosen": -10798.5625,
"rewards/margins": -805.4620361328125,
"rewards/rejected": -9993.099609375,
"step": 580
},
{
"epoch": 0.4631083202511774,
"grad_norm": 0.0007190533185071401,
"learning_rate": 3.249040595833274e-07,
"logits/chosen": 532.3883056640625,
"logits/rejected": 512.3326416015625,
"logps/chosen": -11925.1640625,
"logps/rejected": -10241.1181640625,
"loss": 0.6944,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -11693.439453125,
"rewards/margins": -1631.1793212890625,
"rewards/rejected": -10062.2587890625,
"step": 590
},
{
"epoch": 0.47095761381475665,
"grad_norm": 0.00010262811846556397,
"learning_rate": 3.1833819380279023e-07,
"logits/chosen": 452.8245544433594,
"logits/rejected": 421.19256591796875,
"logps/chosen": -11195.3623046875,
"logps/rejected": -10164.623046875,
"loss": 0.6945,
"rewards/accuracies": 0.4833333492279053,
"rewards/chosen": -11017.814453125,
"rewards/margins": -1042.8782958984375,
"rewards/rejected": -9974.935546875,
"step": 600
},
{
"epoch": 0.478806907378336,
"grad_norm": 0.0002983478101059786,
"learning_rate": 3.11720974905373e-07,
"logits/chosen": 496.3247985839844,
"logits/rejected": 534.28564453125,
"logps/chosen": -10881.5830078125,
"logps/rejected": -9769.900390625,
"loss": 0.6944,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -10675.4658203125,
"rewards/margins": -1090.9879150390625,
"rewards/rejected": -9584.4775390625,
"step": 610
},
{
"epoch": 0.48665620094191525,
"grad_norm": 0.00101252108820539,
"learning_rate": 3.0505737543712275e-07,
"logits/chosen": 380.66571044921875,
"logits/rejected": 452.74505615234375,
"logps/chosen": -9639.6396484375,
"logps/rejected": -8211.8359375,
"loss": 0.6945,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -9455.880859375,
"rewards/margins": -1405.1441650390625,
"rewards/rejected": -8050.73681640625,
"step": 620
},
{
"epoch": 0.4945054945054945,
"grad_norm": 0.006400668125732286,
"learning_rate": 2.9835240279702513e-07,
"logits/chosen": 720.4910278320312,
"logits/rejected": 706.498046875,
"logps/chosen": -11893.275390625,
"logps/rejected": -10403.083984375,
"loss": 0.6944,
"rewards/accuracies": 0.38333338499069214,
"rewards/chosen": -11650.458984375,
"rewards/margins": -1441.623779296875,
"rewards/rejected": -10208.8349609375,
"step": 630
},
{
"epoch": 0.5023547880690737,
"grad_norm": 0.006141630657512915,
"learning_rate": 2.9161109547416667e-07,
"logits/chosen": 582.9979248046875,
"logits/rejected": 497.1927795410156,
"logps/chosen": -11451.755859375,
"logps/rejected": -10217.904296875,
"loss": 0.6944,
"rewards/accuracies": 0.4083333909511566,
"rewards/chosen": -11240.994140625,
"rewards/margins": -1237.598876953125,
"rewards/rejected": -10003.3955078125,
"step": 640
},
{
"epoch": 0.5102040816326531,
"grad_norm": 0.00025863304722455687,
"learning_rate": 2.848385192615339e-07,
"logits/chosen": 556.7777099609375,
"logits/rejected": 490.5819396972656,
"logps/chosen": -10025.822265625,
"logps/rejected": -8551.1728515625,
"loss": 0.6948,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -9829.482421875,
"rewards/margins": -1447.03662109375,
"rewards/rejected": -8382.4453125,
"step": 650
},
{
"epoch": 0.5180533751962323,
"grad_norm": 0.0005278461920808727,
"learning_rate": 2.780397634492949e-07,
"logits/chosen": 569.379638671875,
"logits/rejected": 464.354248046875,
"logps/chosen": -11049.357421875,
"logps/rejected": -9395.771484375,
"loss": 0.6944,
"rewards/accuracies": 0.38333335518836975,
"rewards/chosen": -10831.970703125,
"rewards/margins": -1624.146240234375,
"rewards/rejected": -9207.8251953125,
"step": 660
},
{
"epoch": 0.5259026687598116,
"grad_norm": 0.0016308010126626566,
"learning_rate": 2.71219937000424e-07,
"logits/chosen": 596.8631591796875,
"logits/rejected": 429.16033935546875,
"logps/chosen": -11016.8818359375,
"logps/rejected": -9547.1630859375,
"loss": 0.6944,
"rewards/accuracies": 0.38333335518836975,
"rewards/chosen": -10810.1220703125,
"rewards/margins": -1451.813232421875,
"rewards/rejected": -9358.3095703125,
"step": 670
},
{
"epoch": 0.533751962323391,
"grad_norm": 0.0002343973569786191,
"learning_rate": 2.6438416471154273e-07,
"logits/chosen": 512.0562744140625,
"logits/rejected": 428.00152587890625,
"logps/chosen": -10903.138671875,
"logps/rejected": -9482.482421875,
"loss": 0.6944,
"rewards/accuracies": 0.4416666626930237,
"rewards/chosen": -10689.478515625,
"rewards/margins": -1386.2215576171875,
"rewards/rejected": -9303.255859375,
"step": 680
},
{
"epoch": 0.5416012558869702,
"grad_norm": 0.0003518441184716535,
"learning_rate": 2.5753758336186326e-07,
"logits/chosen": 450.0887756347656,
"logits/rejected": 649.3826904296875,
"logps/chosen": -10758.783203125,
"logps/rejected": -10088.453125,
"loss": 0.6944,
"rewards/accuracies": 0.4833333492279053,
"rewards/chosen": -10549.6767578125,
"rewards/margins": -677.419921875,
"rewards/rejected": -9872.255859375,
"step": 690
},
{
"epoch": 0.5494505494505495,
"grad_norm": 0.0034656839712573543,
"learning_rate": 2.5068533785312666e-07,
"logits/chosen": 410.7701721191406,
"logits/rejected": 457.66015625,
"logps/chosen": -10351.4404296875,
"logps/rejected": -10264.9072265625,
"loss": 0.6945,
"rewards/accuracies": 0.4833333492279053,
"rewards/chosen": -10162.1796875,
"rewards/margins": -84.24781799316406,
"rewards/rejected": -10077.93359375,
"step": 700
},
{
"epoch": 0.5572998430141287,
"grad_norm": 0.0003336925053790682,
"learning_rate": 2.4383257734343794e-07,
"logits/chosen": 521.080078125,
"logits/rejected": 515.5257568359375,
"logps/chosen": -10283.4560546875,
"logps/rejected": -10653.884765625,
"loss": 0.6944,
"rewards/accuracies": 0.4999999403953552,
"rewards/chosen": -10089.544921875,
"rewards/margins": 361.7181701660156,
"rewards/rejected": -10451.2626953125,
"step": 710
},
{
"epoch": 0.565149136577708,
"grad_norm": 0.0026677539813701255,
"learning_rate": 2.3698445137790258e-07,
"logits/chosen": 502.305908203125,
"logits/rejected": 500.286376953125,
"logps/chosen": -11090.75390625,
"logps/rejected": -9903.01953125,
"loss": 0.6945,
"rewards/accuracies": 0.35833337903022766,
"rewards/chosen": -10871.75,
"rewards/margins": -1168.49609375,
"rewards/rejected": -9703.2529296875,
"step": 720
},
{
"epoch": 0.5729984301412873,
"grad_norm": 0.0004985921946052606,
"learning_rate": 2.3014610601897157e-07,
"logits/chosen": 483.7727966308594,
"logits/rejected": 401.9848327636719,
"logps/chosen": -12027.9130859375,
"logps/rejected": -9711.83203125,
"loss": 0.6944,
"rewards/accuracies": 0.3916666507720947,
"rewards/chosen": -11802.767578125,
"rewards/margins": -2266.010009765625,
"rewards/rejected": -9536.7578125,
"step": 730
},
{
"epoch": 0.5808477237048666,
"grad_norm": 0.0006625971741926073,
"learning_rate": 2.2332267997940513e-07,
"logits/chosen": 509.74871826171875,
"logits/rejected": 394.54791259765625,
"logps/chosen": -10014.029296875,
"logps/rejected": -8989.1533203125,
"loss": 0.6945,
"rewards/accuracies": 0.491666704416275,
"rewards/chosen": -9821.6044921875,
"rewards/margins": -995.0965576171875,
"rewards/rejected": -8826.505859375,
"step": 740
},
{
"epoch": 0.5886970172684458,
"grad_norm": 0.0005006783918768109,
"learning_rate": 2.1651930076075723e-07,
"logits/chosen": 326.0789489746094,
"logits/rejected": 323.6217956542969,
"logps/chosen": -11039.0478515625,
"logps/rejected": -10318.177734375,
"loss": 0.6944,
"rewards/accuracies": 0.4166666567325592,
"rewards/chosen": -10858.6220703125,
"rewards/margins": -707.5388793945312,
"rewards/rejected": -10151.0830078125,
"step": 750
},
{
"epoch": 0.5965463108320251,
"grad_norm": 0.00029775808523041525,
"learning_rate": 2.0974108080028692e-07,
"logits/chosen": 484.8963317871094,
"logits/rejected": 345.1195068359375,
"logps/chosen": -11451.5283203125,
"logps/rejected": -9524.697265625,
"loss": 0.6945,
"rewards/accuracies": 0.3500000536441803,
"rewards/chosen": -11248.279296875,
"rewards/margins": -1898.110595703125,
"rewards/rejected": -9350.166015625,
"step": 760
},
{
"epoch": 0.6043956043956044,
"grad_norm": 0.000363602944445742,
"learning_rate": 2.0299311362918773e-07,
"logits/chosen": 456.2711486816406,
"logits/rejected": 465.47869873046875,
"logps/chosen": -12091.4501953125,
"logps/rejected": -10765.958984375,
"loss": 0.6944,
"rewards/accuracies": 0.4416666626930237,
"rewards/chosen": -11861.7158203125,
"rewards/margins": -1319.616455078125,
"rewards/rejected": -10542.099609375,
"step": 770
},
{
"epoch": 0.6122448979591837,
"grad_norm": 0.0003616112714064354,
"learning_rate": 1.962804700450265e-07,
"logits/chosen": 644.1038818359375,
"logits/rejected": 607.6719970703125,
"logps/chosen": -11279.4140625,
"logps/rejected": -11286.236328125,
"loss": 0.6944,
"rewards/accuracies": 0.491666704416275,
"rewards/chosen": -11063.3095703125,
"rewards/margins": -14.460107803344727,
"rewards/rejected": -11048.849609375,
"step": 780
},
{
"epoch": 0.6200941915227629,
"grad_norm": 0.00044028490625225996,
"learning_rate": 1.8960819430126334e-07,
"logits/chosen": 377.1701965332031,
"logits/rejected": 342.3818359375,
"logps/chosen": -11099.810546875,
"logps/rejected": -10203.8212890625,
"loss": 0.6944,
"rewards/accuracies": 0.46666669845581055,
"rewards/chosen": -10901.6796875,
"rewards/margins": -886.4556884765625,
"rewards/rejected": -10015.224609375,
"step": 790
},
{
"epoch": 0.6279434850863422,
"grad_norm": 0.005357622654444911,
"learning_rate": 1.8298130031671972e-07,
"logits/chosen": 550.1544799804688,
"logits/rejected": 495.3414611816406,
"logps/chosen": -10878.4814453125,
"logps/rejected": -9992.93359375,
"loss": 0.6944,
"rewards/accuracies": 0.4500000476837158,
"rewards/chosen": -10661.7587890625,
"rewards/margins": -881.5245971679688,
"rewards/rejected": -9780.2333984375,
"step": 800
},
{
"epoch": 0.6357927786499215,
"grad_norm": 0.000805097508736943,
"learning_rate": 1.7640476790784075e-07,
"logits/chosen": 371.45465087890625,
"logits/rejected": 396.21429443359375,
"logps/chosen": -10371.6611328125,
"logps/rejected": -9846.5078125,
"loss": 0.6944,
"rewards/accuracies": 0.44166669249534607,
"rewards/chosen": -10169.205078125,
"rewards/margins": -543.99462890625,
"rewards/rejected": -9625.2109375,
"step": 810
},
{
"epoch": 0.6436420722135008,
"grad_norm": 0.004693810777668788,
"learning_rate": 1.6988353904658492e-07,
"logits/chosen": 315.043212890625,
"logits/rejected": 400.67559814453125,
"logps/chosen": -11178.794921875,
"logps/rejected": -8904.158203125,
"loss": 0.6944,
"rewards/accuracies": 0.3750000298023224,
"rewards/chosen": -10957.580078125,
"rewards/margins": -2222.112060546875,
"rewards/rejected": -8735.470703125,
"step": 820
},
{
"epoch": 0.6514913657770801,
"grad_norm": 0.0010885556542559263,
"learning_rate": 1.634225141467513e-07,
"logits/chosen": 429.1761169433594,
"logits/rejected": 422.4315490722656,
"logps/chosen": -11035.6904296875,
"logps/rejected": -10288.4482421875,
"loss": 0.6944,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -10831.50390625,
"rewards/margins": -735.5105590820312,
"rewards/rejected": -10095.994140625,
"step": 830
},
{
"epoch": 0.6593406593406593,
"grad_norm": 0.0012791510584133318,
"learning_rate": 1.570265483815364e-07,
"logits/chosen": 470.92156982421875,
"logits/rejected": 462.15985107421875,
"logps/chosen": -11918.072265625,
"logps/rejected": -10374.5302734375,
"loss": 0.6944,
"rewards/accuracies": 0.3583333492279053,
"rewards/chosen": -11690.7412109375,
"rewards/margins": -1529.003173828125,
"rewards/rejected": -10161.73828125,
"step": 840
},
{
"epoch": 0.6671899529042387,
"grad_norm": 0.0005945542833698765,
"learning_rate": 1.5070044803508691e-07,
"logits/chosen": 571.09765625,
"logits/rejected": 574.0386962890625,
"logps/chosen": -10925.759765625,
"logps/rejected": -10105.3671875,
"loss": 0.6944,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -10710.2822265625,
"rewards/margins": -805.3693237304688,
"rewards/rejected": -9904.912109375,
"step": 850
},
{
"epoch": 0.6750392464678179,
"grad_norm": 0.016505752953715463,
"learning_rate": 1.444489668907914e-07,
"logits/chosen": 713.1834106445312,
"logits/rejected": 583.1593017578125,
"logps/chosen": -11648.771484375,
"logps/rejected": -10224.966796875,
"loss": 0.6945,
"rewards/accuracies": 0.46666669845581055,
"rewards/chosen": -11401.0,
"rewards/margins": -1378.7158203125,
"rewards/rejected": -10022.2841796875,
"step": 860
},
{
"epoch": 0.6828885400313972,
"grad_norm": 0.0008005280036489723,
"learning_rate": 1.3827680265902232e-07,
"logits/chosen": 574.0198974609375,
"logits/rejected": 531.7598876953125,
"logps/chosen": -11879.4306640625,
"logps/rejected": -10229.916015625,
"loss": 0.6944,
"rewards/accuracies": 0.4750000536441803,
"rewards/chosen": -11649.333984375,
"rewards/margins": -1620.1064453125,
"rewards/rejected": -10029.2265625,
"step": 870
},
{
"epoch": 0.6907378335949764,
"grad_norm": 0.0003804149709393164,
"learning_rate": 1.3218859344701632e-07,
"logits/chosen": 561.2614135742188,
"logits/rejected": 561.1036376953125,
"logps/chosen": -10448.341796875,
"logps/rejected": -10479.4130859375,
"loss": 0.6944,
"rewards/accuracies": 0.5666667222976685,
"rewards/chosen": -10237.830078125,
"rewards/margins": 9.993700981140137,
"rewards/rejected": -10247.8251953125,
"step": 880
},
{
"epoch": 0.6985871271585558,
"grad_norm": 0.0001534716417740244,
"learning_rate": 1.2618891427354172e-07,
"logits/chosen": 686.5509033203125,
"logits/rejected": 529.2532958984375,
"logps/chosen": -12045.03515625,
"logps/rejected": -10437.076171875,
"loss": 0.6944,
"rewards/accuracies": 0.4166666865348816,
"rewards/chosen": -11787.4404296875,
"rewards/margins": -1562.1439208984375,
"rewards/rejected": -10225.294921875,
"step": 890
},
{
"epoch": 0.706436420722135,
"grad_norm": 0.0010127199015733139,
"learning_rate": 1.202822736309758e-07,
"logits/chosen": 499.6285095214844,
"logits/rejected": 601.0958862304688,
"logps/chosen": -10409.6845703125,
"logps/rejected": -10087.666015625,
"loss": 0.6944,
"rewards/accuracies": 0.46666669845581055,
"rewards/chosen": -10205.333984375,
"rewards/margins": -328.0994567871094,
"rewards/rejected": -9877.236328125,
"step": 900
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.0002216596019617402,
"learning_rate": 1.1447311009737299e-07,
"logits/chosen": 517.1658935546875,
"logits/rejected": 596.5044555664062,
"logps/chosen": -10285.78125,
"logps/rejected": -10152.029296875,
"loss": 0.6944,
"rewards/accuracies": 0.533333420753479,
"rewards/chosen": -10077.556640625,
"rewards/margins": -133.45657348632812,
"rewards/rejected": -9944.099609375,
"step": 910
},
{
"epoch": 0.7221350078492935,
"grad_norm": 0.00023808742474268642,
"learning_rate": 1.0876578900107053e-07,
"logits/chosen": 626.1790161132812,
"logits/rejected": 471.45172119140625,
"logps/chosen": -11441.62109375,
"logps/rejected": -9792.45703125,
"loss": 0.6944,
"rewards/accuracies": 0.3749999701976776,
"rewards/chosen": -11209.2001953125,
"rewards/margins": -1614.8851318359375,
"rewards/rejected": -9594.3154296875,
"step": 920
},
{
"epoch": 0.7299843014128728,
"grad_norm": 0.00047469806423056587,
"learning_rate": 1.0316459914033793e-07,
"logits/chosen": 530.0946655273438,
"logits/rejected": 513.5704345703125,
"logps/chosen": -11292.869140625,
"logps/rejected": -8902.419921875,
"loss": 0.6945,
"rewards/accuracies": 0.3750000298023224,
"rewards/chosen": -11056.87890625,
"rewards/margins": -2340.38232421875,
"rewards/rejected": -8716.4970703125,
"step": 930
},
{
"epoch": 0.7378335949764521,
"grad_norm": 0.0006332868699013036,
"learning_rate": 9.767374956053584e-08,
"logits/chosen": 504.7762756347656,
"logits/rejected": 429.9141540527344,
"logps/chosen": -10969.6884765625,
"logps/rejected": -10032.423828125,
"loss": 0.6945,
"rewards/accuracies": 0.4750000536441803,
"rewards/chosen": -10751.392578125,
"rewards/margins": -923.9509887695312,
"rewards/rejected": -9827.4423828125,
"step": 940
},
{
"epoch": 0.7456828885400314,
"grad_norm": 0.0005963470709798749,
"learning_rate": 9.229736639120561e-08,
"logits/chosen": 526.2850952148438,
"logits/rejected": 545.1388549804688,
"logps/chosen": -11055.03125,
"logps/rejected": -10669.08203125,
"loss": 0.6944,
"rewards/accuracies": 0.5166666507720947,
"rewards/chosen": -10837.23046875,
"rewards/margins": -382.30279541015625,
"rewards/rejected": -10454.9267578125,
"step": 950
},
{
"epoch": 0.7535321821036107,
"grad_norm": 0.0006939197374970369,
"learning_rate": 8.70394897454659e-08,
"logits/chosen": 528.6098022460938,
"logits/rejected": 517.1777954101562,
"logps/chosen": -10959.4619140625,
"logps/rejected": -10072.0302734375,
"loss": 0.6944,
"rewards/accuracies": 0.3916666805744171,
"rewards/chosen": -10741.033203125,
"rewards/margins": -871.4791870117188,
"rewards/rejected": -9869.552734375,
"step": 960
},
{
"epoch": 0.7613814756671899,
"grad_norm": 0.0007296030856672226,
"learning_rate": 8.19040706840472e-08,
"logits/chosen": 584.8048706054688,
"logits/rejected": 536.4219360351562,
"logps/chosen": -11178.66015625,
"logps/rejected": -9755.2314453125,
"loss": 0.6944,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -10938.4404296875,
"rewards/margins": -1389.900634765625,
"rewards/rejected": -9548.5400390625,
"step": 970
},
{
"epoch": 0.7692307692307693,
"grad_norm": 0.00023802143900857867,
"learning_rate": 7.689496824624525e-08,
"logits/chosen": 404.8477783203125,
"logits/rejected": 342.9325866699219,
"logps/chosen": -10674.650390625,
"logps/rejected": -9277.875,
"loss": 0.6944,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": -10445.833984375,
"rewards/margins": -1358.45263671875,
"rewards/rejected": -9087.380859375,
"step": 980
},
{
"epoch": 0.7770800627943485,
"grad_norm": 0.0013929764018688865,
"learning_rate": 7.201594655002458e-08,
"logits/chosen": 494.68701171875,
"logits/rejected": 510.9247131347656,
"logps/chosen": -11169.6572265625,
"logps/rejected": -9497.7294921875,
"loss": 0.6945,
"rewards/accuracies": 0.4833333492279053,
"rewards/chosen": -10943.76171875,
"rewards/margins": -1640.102783203125,
"rewards/rejected": -9303.6591796875,
"step": 990
},
{
"epoch": 0.7849293563579278,
"grad_norm": 0.00030992582390807383,
"learning_rate": 6.727067196345099e-08,
"logits/chosen": 484.021240234375,
"logits/rejected": 429.843505859375,
"logps/chosen": -10762.0595703125,
"logps/rejected": -9416.263671875,
"loss": 0.6944,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -10548.3046875,
"rewards/margins": -1312.38671875,
"rewards/rejected": -9235.9189453125,
"step": 1000
},
{
"epoch": 0.792778649921507,
"grad_norm": 0.0007414681340839939,
"learning_rate": 6.26627103495786e-08,
"logits/chosen": 505.01483154296875,
"logits/rejected": 517.8411254882812,
"logps/chosen": -11052.3828125,
"logps/rejected": -9386.2392578125,
"loss": 0.6944,
"rewards/accuracies": 0.44166669249534607,
"rewards/chosen": -10837.673828125,
"rewards/margins": -1642.868408203125,
"rewards/rejected": -9194.8046875,
"step": 1010
},
{
"epoch": 0.8006279434850864,
"grad_norm": 0.0003687874816117168,
"learning_rate": 5.8195524386862374e-08,
"logits/chosen": 562.7574462890625,
"logits/rejected": 544.7307739257812,
"logps/chosen": -11268.1689453125,
"logps/rejected": -10319.515625,
"loss": 0.6944,
"rewards/accuracies": 0.4416666626930237,
"rewards/chosen": -11019.990234375,
"rewards/margins": -926.55419921875,
"rewards/rejected": -10093.435546875,
"step": 1020
},
{
"epoch": 0.8084772370486656,
"grad_norm": 0.005760798367996112,
"learning_rate": 5.38724709671092e-08,
"logits/chosen": 474.04486083984375,
"logits/rejected": 475.46295166015625,
"logps/chosen": -11839.8818359375,
"logps/rejected": -11526.0009765625,
"loss": 0.6944,
"rewards/accuracies": 0.44166669249534607,
"rewards/chosen": -11608.4130859375,
"rewards/margins": -315.5327453613281,
"rewards/rejected": -11292.8798828125,
"step": 1030
},
{
"epoch": 0.8163265306122449,
"grad_norm": 0.00037781549278870523,
"learning_rate": 4.969679867292276e-08,
"logits/chosen": 521.4114990234375,
"logits/rejected": 587.4351196289062,
"logps/chosen": -10636.8388671875,
"logps/rejected": -9961.1240234375,
"loss": 0.6945,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -10416.7373046875,
"rewards/margins": -664.7984619140625,
"rewards/rejected": -9751.939453125,
"step": 1040
},
{
"epoch": 0.8241758241758241,
"grad_norm": 0.006793223367431191,
"learning_rate": 4.5671645336537416e-08,
"logits/chosen": 514.43994140625,
"logits/rejected": 551.023681640625,
"logps/chosen": -10938.4794921875,
"logps/rejected": -10230.494140625,
"loss": 0.6944,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": -10700.9990234375,
"rewards/margins": -686.2994384765625,
"rewards/rejected": -10014.7001953125,
"step": 1050
},
{
"epoch": 0.8320251177394035,
"grad_norm": 0.00045900227188746113,
"learning_rate": 4.180003568187776e-08,
"logits/chosen": 524.1320190429688,
"logits/rejected": 552.4287719726562,
"logps/chosen": -13095.03125,
"logps/rejected": -10694.0908203125,
"loss": 0.6944,
"rewards/accuracies": 0.4166666567325592,
"rewards/chosen": -12834.0439453125,
"rewards/margins": -2359.99267578125,
"rewards/rejected": -10474.0517578125,
"step": 1060
},
{
"epoch": 0.8398744113029827,
"grad_norm": 0.00034521122588735233,
"learning_rate": 3.8084879051612144e-08,
"logits/chosen": 519.01025390625,
"logits/rejected": 546.77587890625,
"logps/chosen": -11113.267578125,
"logps/rejected": -10381.822265625,
"loss": 0.6944,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": -10888.025390625,
"rewards/margins": -699.3690185546875,
"rewards/rejected": -10188.65625,
"step": 1070
},
{
"epoch": 0.847723704866562,
"grad_norm": 0.0007197800185943902,
"learning_rate": 3.452896722091128e-08,
"logits/chosen": 619.9454956054688,
"logits/rejected": 521.5427856445312,
"logps/chosen": -12005.8955078125,
"logps/rejected": -9818.310546875,
"loss": 0.6944,
"rewards/accuracies": 0.34166669845581055,
"rewards/chosen": -11739.326171875,
"rewards/margins": -2121.99365234375,
"rewards/rejected": -9617.3330078125,
"step": 1080
},
{
"epoch": 0.8555729984301413,
"grad_norm": 0.0002482736757925687,
"learning_rate": 3.11349722995527e-08,
"logits/chosen": 710.8172607421875,
"logits/rejected": 605.4769287109375,
"logps/chosen": -11714.08984375,
"logps/rejected": -9476.154296875,
"loss": 0.6944,
"rewards/accuracies": 0.3750000298023224,
"rewards/chosen": -11486.615234375,
"rewards/margins": -2223.34521484375,
"rewards/rejected": -9263.2705078125,
"step": 1090
},
{
"epoch": 0.8634222919937206,
"grad_norm": 0.0010064561005363537,
"learning_rate": 2.7905444723949762e-08,
"logits/chosen": 569.6670532226562,
"logits/rejected": 575.785888671875,
"logps/chosen": -11909.3740234375,
"logps/rejected": -9983.9169921875,
"loss": 0.6944,
"rewards/accuracies": 0.3750000298023224,
"rewards/chosen": -11670.837890625,
"rewards/margins": -1876.4185791015625,
"rewards/rejected": -9794.419921875,
"step": 1100
},
{
"epoch": 0.8712715855572999,
"grad_norm": 0.0003760939578581169,
"learning_rate": 2.484281134061142e-08,
"logits/chosen": 736.3983764648438,
"logits/rejected": 638.23974609375,
"logps/chosen": -12203.880859375,
"logps/rejected": -10321.19921875,
"loss": 0.6944,
"rewards/accuracies": 0.38333335518836975,
"rewards/chosen": -11939.2685546875,
"rewards/margins": -1840.337158203125,
"rewards/rejected": -10098.9306640625,
"step": 1110
},
{
"epoch": 0.8791208791208791,
"grad_norm": 0.00021809248372642968,
"learning_rate": 2.194937358247506e-08,
"logits/chosen": 548.2640380859375,
"logits/rejected": 535.1536865234375,
"logps/chosen": -12217.0576171875,
"logps/rejected": -10378.3642578125,
"loss": 0.6945,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -11971.9169921875,
"rewards/margins": -1810.3236083984375,
"rewards/rejected": -10161.5927734375,
"step": 1120
},
{
"epoch": 0.8869701726844584,
"grad_norm": 0.0002678202477740615,
"learning_rate": 1.9227305739481612e-08,
"logits/chosen": 546.5886840820312,
"logits/rejected": 471.8921813964844,
"logps/chosen": -11283.3466796875,
"logps/rejected": -9136.5048828125,
"loss": 0.6944,
"rewards/accuracies": 0.35833337903022766,
"rewards/chosen": -11048.564453125,
"rewards/margins": -2092.28466796875,
"rewards/rejected": -8956.279296875,
"step": 1130
},
{
"epoch": 0.8948194662480377,
"grad_norm": 0.0005387042417401348,
"learning_rate": 1.6678653324693787e-08,
"logits/chosen": 464.132568359375,
"logits/rejected": 543.2286376953125,
"logps/chosen": -12346.04296875,
"logps/rejected": -10363.9443359375,
"loss": 0.6944,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -12088.423828125,
"rewards/margins": -1945.2904052734375,
"rewards/rejected": -10143.1318359375,
"step": 1140
},
{
"epoch": 0.902668759811617,
"grad_norm": 0.00028081548767204913,
"learning_rate": 1.4305331537183384e-08,
"logits/chosen": 474.62298583984375,
"logits/rejected": 503.7117614746094,
"logps/chosen": -10857.63671875,
"logps/rejected": -10257.396484375,
"loss": 0.6944,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": -10630.65234375,
"rewards/margins": -592.7437744140625,
"rewards/rejected": -10037.9091796875,
"step": 1150
},
{
"epoch": 0.9105180533751962,
"grad_norm": 0.0004423495668441553,
"learning_rate": 1.2109123822844653e-08,
"logits/chosen": 524.8532104492188,
"logits/rejected": 533.907958984375,
"logps/chosen": -11179.892578125,
"logps/rejected": -9216.802734375,
"loss": 0.6944,
"rewards/accuracies": 0.366666704416275,
"rewards/chosen": -10951.333984375,
"rewards/margins": -1930.469970703125,
"rewards/rejected": -9020.865234375,
"step": 1160
},
{
"epoch": 0.9183673469387755,
"grad_norm": 0.00019514550735900794,
"learning_rate": 1.0091680534213387e-08,
"logits/chosen": 615.3060302734375,
"logits/rejected": 558.8253784179688,
"logps/chosen": -12210.0439453125,
"logps/rejected": -12054.3017578125,
"loss": 0.6945,
"rewards/accuracies": 0.5083333253860474,
"rewards/chosen": -11967.0947265625,
"rewards/margins": -159.54014587402344,
"rewards/rejected": -11807.5546875,
"step": 1170
},
{
"epoch": 0.9262166405023547,
"grad_norm": 0.0004545153062548297,
"learning_rate": 8.254517690300944e-09,
"logits/chosen": 616.0506591796875,
"logits/rejected": 626.9619750976562,
"logps/chosen": -10846.5625,
"logps/rejected": -10076.8857421875,
"loss": 0.6944,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -10606.6162109375,
"rewards/margins": -746.5523681640625,
"rewards/rejected": -9860.064453125,
"step": 1180
},
{
"epoch": 0.9340659340659341,
"grad_norm": 0.003226387409113769,
"learning_rate": 6.599015837372907e-09,
"logits/chosen": 554.5615844726562,
"logits/rejected": 558.3009033203125,
"logps/chosen": -11654.806640625,
"logps/rejected": -10600.859375,
"loss": 0.6944,
"rewards/accuracies": 0.4333333373069763,
"rewards/chosen": -11405.919921875,
"rewards/margins": -1022.2127685546875,
"rewards/rejected": -10383.70703125,
"step": 1190
},
{
"epoch": 0.9419152276295133,
"grad_norm": 0.00016417329889322683,
"learning_rate": 5.126419011529992e-09,
"logits/chosen": 487.95513916015625,
"logits/rejected": 477.30352783203125,
"logps/chosen": -11990.833984375,
"logps/rejected": -10696.8125,
"loss": 0.6944,
"rewards/accuracies": 0.46666663885116577,
"rewards/chosen": -11735.171875,
"rewards/margins": -1257.026123046875,
"rewards/rejected": -10478.146484375,
"step": 1200
},
{
"epoch": 0.9497645211930926,
"grad_norm": 0.0005186075718613249,
"learning_rate": 3.837833803870177e-09,
"logits/chosen": 388.5582580566406,
"logits/rejected": 399.3382568359375,
"logps/chosen": -11644.5869140625,
"logps/rejected": -10613.4970703125,
"loss": 0.6944,
"rewards/accuracies": 0.5083333253860474,
"rewards/chosen": -11404.4013671875,
"rewards/margins": -1009.1047973632812,
"rewards/rejected": -10395.2958984375,
"step": 1210
},
{
"epoch": 0.957613814756672,
"grad_norm": 0.0006889943530268683,
"learning_rate": 2.734228528934679e-09,
"logits/chosen": 607.74072265625,
"logits/rejected": 482.1900939941406,
"logps/chosen": -13991.2314453125,
"logps/rejected": -11046.5498046875,
"loss": 0.6945,
"rewards/accuracies": 0.34166666865348816,
"rewards/chosen": -13694.462890625,
"rewards/margins": -2893.10888671875,
"rewards/rejected": -10801.353515625,
"step": 1220
},
{
"epoch": 0.9654631083202512,
"grad_norm": 0.0003228878623085867,
"learning_rate": 1.8164324970625645e-09,
"logits/chosen": 612.4022827148438,
"logits/rejected": 605.3236083984375,
"logps/chosen": -12401.9326171875,
"logps/rejected": -10077.7578125,
"loss": 0.6944,
"rewards/accuracies": 0.3499999940395355,
"rewards/chosen": -12141.349609375,
"rewards/margins": -2276.58935546875,
"rewards/rejected": -9864.7607421875,
"step": 1230
},
{
"epoch": 0.9733124018838305,
"grad_norm": 0.0004635193620610196,
"learning_rate": 1.0851353912008642e-09,
"logits/chosen": 548.4609375,
"logits/rejected": 406.30108642578125,
"logps/chosen": -10926.345703125,
"logps/rejected": -10544.0869140625,
"loss": 0.6944,
"rewards/accuracies": 0.4333333373069763,
"rewards/chosen": -10694.732421875,
"rewards/margins": -385.8048095703125,
"rewards/rejected": -10308.9296875,
"step": 1240
},
{
"epoch": 0.9811616954474097,
"grad_norm": 0.0005082408297896108,
"learning_rate": 5.408867486384471e-10,
"logits/chosen": 561.4332275390625,
"logits/rejected": 464.0692443847656,
"logps/chosen": -11016.3642578125,
"logps/rejected": -9608.521484375,
"loss": 0.6944,
"rewards/accuracies": 0.40833336114883423,
"rewards/chosen": -10786.0556640625,
"rewards/margins": -1366.96337890625,
"rewards/rejected": -9419.091796875,
"step": 1250
},
{
"epoch": 0.989010989010989,
"grad_norm": 0.0001467066278307736,
"learning_rate": 1.840955480532924e-10,
"logits/chosen": 427.8658142089844,
"logits/rejected": 491.8282775878906,
"logps/chosen": -10776.49609375,
"logps/rejected": -10325.6982421875,
"loss": 0.6945,
"rewards/accuracies": 0.491666704416275,
"rewards/chosen": -10544.3359375,
"rewards/margins": -434.962158203125,
"rewards/rejected": -10109.3740234375,
"step": 1260
},
{
"epoch": 0.9968602825745683,
"grad_norm": 0.0001355855815871487,
"learning_rate": 1.502990218302247e-11,
"logits/chosen": 581.5196533203125,
"logits/rejected": 462.2622985839844,
"logps/chosen": -10980.98828125,
"logps/rejected": -9310.0419921875,
"loss": 0.6945,
"rewards/accuracies": 0.42500001192092896,
"rewards/chosen": -10756.513671875,
"rewards/margins": -1632.776123046875,
"rewards/rejected": -9123.736328125,
"step": 1270
},
{
"epoch": 1.0,
"step": 1274,
"total_flos": 0.0,
"train_loss": 0.7359606589028375,
"train_runtime": 14766.5572,
"train_samples_per_second": 4.14,
"train_steps_per_second": 0.086
}
],
"logging_steps": 10,
"max_steps": 1274,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 6,
"trial_name": null,
"trial_params": null
}