AmberYifan's picture
Training in progress, epoch 2, checkpoint
b253272 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 1668,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.001199040767386091,
"grad_norm": 25.642412537629156,
"learning_rate": 1.99203187250996e-09,
"logits/chosen": -2.515625,
"logits/rejected": -2.4375,
"logps/chosen": -260.0,
"logps/rejected": -251.0,
"loss": 0.6914,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.011990407673860911,
"grad_norm": 24.290562967872415,
"learning_rate": 1.99203187250996e-08,
"logits/chosen": -2.390625,
"logits/rejected": -2.421875,
"logps/chosen": -217.0,
"logps/rejected": -210.0,
"loss": 0.6933,
"rewards/accuracies": 0.2361111044883728,
"rewards/chosen": 0.0242919921875,
"rewards/margins": 0.009033203125,
"rewards/rejected": 0.01531982421875,
"step": 10
},
{
"epoch": 0.023980815347721823,
"grad_norm": 24.03209890621009,
"learning_rate": 3.98406374501992e-08,
"logits/chosen": -2.359375,
"logits/rejected": -2.390625,
"logps/chosen": -216.0,
"logps/rejected": -207.0,
"loss": 0.6914,
"rewards/accuracies": 0.3125,
"rewards/chosen": 0.0224609375,
"rewards/margins": 0.015625,
"rewards/rejected": 0.006866455078125,
"step": 20
},
{
"epoch": 0.03597122302158273,
"grad_norm": 24.100678886411075,
"learning_rate": 5.97609561752988e-08,
"logits/chosen": -2.421875,
"logits/rejected": -2.40625,
"logps/chosen": -188.0,
"logps/rejected": -200.0,
"loss": 0.697,
"rewards/accuracies": 0.23749999701976776,
"rewards/chosen": 0.0081787109375,
"rewards/margins": -0.0087890625,
"rewards/rejected": 0.016845703125,
"step": 30
},
{
"epoch": 0.047961630695443645,
"grad_norm": 22.19662351600711,
"learning_rate": 7.96812749003984e-08,
"logits/chosen": -2.375,
"logits/rejected": -2.375,
"logps/chosen": -205.0,
"logps/rejected": -192.0,
"loss": 0.6921,
"rewards/accuracies": 0.2750000059604645,
"rewards/chosen": 0.0081787109375,
"rewards/margins": 0.0018768310546875,
"rewards/rejected": 0.006256103515625,
"step": 40
},
{
"epoch": 0.05995203836930456,
"grad_norm": 22.91225992894245,
"learning_rate": 9.9601593625498e-08,
"logits/chosen": -2.4375,
"logits/rejected": -2.453125,
"logps/chosen": -217.0,
"logps/rejected": -213.0,
"loss": 0.6927,
"rewards/accuracies": 0.23749999701976776,
"rewards/chosen": 0.0238037109375,
"rewards/margins": -0.0031280517578125,
"rewards/rejected": 0.02685546875,
"step": 50
},
{
"epoch": 0.07194244604316546,
"grad_norm": 22.915913925151774,
"learning_rate": 1.195219123505976e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.40625,
"logps/chosen": -202.0,
"logps/rejected": -211.0,
"loss": 0.6965,
"rewards/accuracies": 0.16249999403953552,
"rewards/chosen": 0.0037384033203125,
"rewards/margins": -0.0037384033203125,
"rewards/rejected": 0.00750732421875,
"step": 60
},
{
"epoch": 0.08393285371702638,
"grad_norm": 24.635308516543407,
"learning_rate": 1.394422310756972e-07,
"logits/chosen": -2.390625,
"logits/rejected": -2.421875,
"logps/chosen": -214.0,
"logps/rejected": -204.0,
"loss": 0.693,
"rewards/accuracies": 0.20000000298023224,
"rewards/chosen": 0.01129150390625,
"rewards/margins": -0.0118408203125,
"rewards/rejected": 0.0230712890625,
"step": 70
},
{
"epoch": 0.09592326139088729,
"grad_norm": 23.422749201250696,
"learning_rate": 1.593625498007968e-07,
"logits/chosen": -2.421875,
"logits/rejected": -2.453125,
"logps/chosen": -211.0,
"logps/rejected": -224.0,
"loss": 0.6932,
"rewards/accuracies": 0.17499999701976776,
"rewards/chosen": 0.01251220703125,
"rewards/margins": -0.006256103515625,
"rewards/rejected": 0.018798828125,
"step": 80
},
{
"epoch": 0.1079136690647482,
"grad_norm": 25.550050248970773,
"learning_rate": 1.7928286852589642e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.390625,
"logps/chosen": -230.0,
"logps/rejected": -223.0,
"loss": 0.6919,
"rewards/accuracies": 0.21250000596046448,
"rewards/chosen": 0.0281982421875,
"rewards/margins": 0.001251220703125,
"rewards/rejected": 0.0269775390625,
"step": 90
},
{
"epoch": 0.11990407673860912,
"grad_norm": 25.761780120884907,
"learning_rate": 1.99203187250996e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.453125,
"logps/chosen": -194.0,
"logps/rejected": -208.0,
"loss": 0.6914,
"rewards/accuracies": 0.26249998807907104,
"rewards/chosen": 0.010009765625,
"rewards/margins": -0.00250244140625,
"rewards/rejected": 0.0125732421875,
"step": 100
},
{
"epoch": 0.13189448441247004,
"grad_norm": 22.84334462455036,
"learning_rate": 2.191235059760956e-07,
"logits/chosen": -2.453125,
"logits/rejected": -2.46875,
"logps/chosen": -202.0,
"logps/rejected": -218.0,
"loss": 0.687,
"rewards/accuracies": 0.2874999940395355,
"rewards/chosen": 0.021240234375,
"rewards/margins": 0.0050048828125,
"rewards/rejected": 0.016357421875,
"step": 110
},
{
"epoch": 0.14388489208633093,
"grad_norm": 24.300426924539867,
"learning_rate": 2.390438247011952e-07,
"logits/chosen": -2.390625,
"logits/rejected": -2.375,
"logps/chosen": -207.0,
"logps/rejected": -189.0,
"loss": 0.6896,
"rewards/accuracies": 0.26249998807907104,
"rewards/chosen": 0.006256103515625,
"rewards/margins": 0.0106201171875,
"rewards/rejected": -0.004425048828125,
"step": 120
},
{
"epoch": 0.15587529976019185,
"grad_norm": 25.941365174437085,
"learning_rate": 2.589641434262948e-07,
"logits/chosen": -2.40625,
"logits/rejected": -2.375,
"logps/chosen": -212.0,
"logps/rejected": -204.0,
"loss": 0.6807,
"rewards/accuracies": 0.38749998807907104,
"rewards/chosen": 0.036376953125,
"rewards/margins": 0.0194091796875,
"rewards/rejected": 0.016845703125,
"step": 130
},
{
"epoch": 0.16786570743405277,
"grad_norm": 22.46827042788133,
"learning_rate": 2.788844621513944e-07,
"logits/chosen": -2.40625,
"logits/rejected": -2.46875,
"logps/chosen": -216.0,
"logps/rejected": -208.0,
"loss": 0.6813,
"rewards/accuracies": 0.375,
"rewards/chosen": 0.0281982421875,
"rewards/margins": 0.016845703125,
"rewards/rejected": 0.01129150390625,
"step": 140
},
{
"epoch": 0.17985611510791366,
"grad_norm": 22.828346012492343,
"learning_rate": 2.98804780876494e-07,
"logits/chosen": -2.40625,
"logits/rejected": -2.40625,
"logps/chosen": -214.0,
"logps/rejected": -217.0,
"loss": 0.6788,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": 0.03759765625,
"rewards/margins": 0.0238037109375,
"rewards/rejected": 0.01373291015625,
"step": 150
},
{
"epoch": 0.19184652278177458,
"grad_norm": 25.091544876831573,
"learning_rate": 3.187250996015936e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.40625,
"logps/chosen": -206.0,
"logps/rejected": -215.0,
"loss": 0.6829,
"rewards/accuracies": 0.44999998807907104,
"rewards/chosen": 0.056396484375,
"rewards/margins": 0.0400390625,
"rewards/rejected": 0.0162353515625,
"step": 160
},
{
"epoch": 0.2038369304556355,
"grad_norm": 22.476144325987256,
"learning_rate": 3.386454183266932e-07,
"logits/chosen": -2.421875,
"logits/rejected": -2.421875,
"logps/chosen": -222.0,
"logps/rejected": -220.0,
"loss": 0.6707,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.0576171875,
"rewards/margins": 0.07275390625,
"rewards/rejected": -0.0150146484375,
"step": 170
},
{
"epoch": 0.2158273381294964,
"grad_norm": 22.738418341833743,
"learning_rate": 3.5856573705179284e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.40625,
"logps/chosen": -211.0,
"logps/rejected": -214.0,
"loss": 0.6701,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": 0.06640625,
"rewards/margins": 0.05322265625,
"rewards/rejected": 0.01318359375,
"step": 180
},
{
"epoch": 0.2278177458033573,
"grad_norm": 22.162554634157406,
"learning_rate": 3.784860557768924e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.4375,
"logps/chosen": -200.0,
"logps/rejected": -188.0,
"loss": 0.6715,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": 0.08203125,
"rewards/margins": 0.0654296875,
"rewards/rejected": 0.0162353515625,
"step": 190
},
{
"epoch": 0.23980815347721823,
"grad_norm": 24.649798845932942,
"learning_rate": 3.98406374501992e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.375,
"logps/chosen": -212.0,
"logps/rejected": -207.0,
"loss": 0.6712,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": 0.033203125,
"rewards/margins": 0.050048828125,
"rewards/rejected": -0.016845703125,
"step": 200
},
{
"epoch": 0.2517985611510791,
"grad_norm": 19.334172790085294,
"learning_rate": 4.1832669322709163e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.453125,
"logps/chosen": -205.0,
"logps/rejected": -213.0,
"loss": 0.6612,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": 0.048828125,
"rewards/margins": 0.07275390625,
"rewards/rejected": -0.0238037109375,
"step": 210
},
{
"epoch": 0.2637889688249401,
"grad_norm": 20.773278941923554,
"learning_rate": 4.382470119521912e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.390625,
"logps/chosen": -217.0,
"logps/rejected": -210.0,
"loss": 0.6522,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": -0.0162353515625,
"rewards/margins": 0.08251953125,
"rewards/rejected": -0.0986328125,
"step": 220
},
{
"epoch": 0.27577937649880097,
"grad_norm": 23.13141722026618,
"learning_rate": 4.581673306772908e-07,
"logits/chosen": -2.40625,
"logits/rejected": -2.390625,
"logps/chosen": -207.0,
"logps/rejected": -210.0,
"loss": 0.641,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.050048828125,
"rewards/margins": 0.1259765625,
"rewards/rejected": -0.1767578125,
"step": 230
},
{
"epoch": 0.28776978417266186,
"grad_norm": 25.417525122060646,
"learning_rate": 4.780876494023904e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.328125,
"logps/chosen": -220.0,
"logps/rejected": -205.0,
"loss": 0.6402,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -0.06201171875,
"rewards/margins": 0.154296875,
"rewards/rejected": -0.2158203125,
"step": 240
},
{
"epoch": 0.2997601918465228,
"grad_norm": 22.97557618510118,
"learning_rate": 4.9800796812749e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.34375,
"logps/chosen": -196.0,
"logps/rejected": -205.0,
"loss": 0.6303,
"rewards/accuracies": 0.612500011920929,
"rewards/chosen": -0.251953125,
"rewards/margins": 0.1259765625,
"rewards/rejected": -0.37890625,
"step": 250
},
{
"epoch": 0.3117505995203837,
"grad_norm": 22.380902540882072,
"learning_rate": 4.980008884940026e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.359375,
"logps/chosen": -206.0,
"logps/rejected": -206.0,
"loss": 0.6271,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.203125,
"rewards/margins": 0.224609375,
"rewards/rejected": -0.427734375,
"step": 260
},
{
"epoch": 0.3237410071942446,
"grad_norm": 20.410713037919127,
"learning_rate": 4.957796534873389e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.421875,
"logps/chosen": -211.0,
"logps/rejected": -206.0,
"loss": 0.6168,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.462890625,
"rewards/margins": 0.234375,
"rewards/rejected": -0.69921875,
"step": 270
},
{
"epoch": 0.33573141486810554,
"grad_norm": 19.858477875493026,
"learning_rate": 4.935584184806753e-07,
"logits/chosen": -2.390625,
"logits/rejected": -2.375,
"logps/chosen": -213.0,
"logps/rejected": -216.0,
"loss": 0.5864,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.8046875,
"rewards/margins": 0.310546875,
"rewards/rejected": -1.109375,
"step": 280
},
{
"epoch": 0.34772182254196643,
"grad_norm": 25.75186279410947,
"learning_rate": 4.913371834740116e-07,
"logits/chosen": -2.390625,
"logits/rejected": -2.390625,
"logps/chosen": -213.0,
"logps/rejected": -219.0,
"loss": 0.6046,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": -1.015625,
"rewards/margins": 0.3046875,
"rewards/rejected": -1.328125,
"step": 290
},
{
"epoch": 0.3597122302158273,
"grad_norm": 20.582004189162635,
"learning_rate": 4.891159484673478e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.359375,
"logps/chosen": -219.0,
"logps/rejected": -223.0,
"loss": 0.6033,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.66796875,
"rewards/margins": 0.26171875,
"rewards/rejected": -0.9296875,
"step": 300
},
{
"epoch": 0.37170263788968827,
"grad_norm": 17.87508243629168,
"learning_rate": 4.868947134606841e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.421875,
"logps/chosen": -219.0,
"logps/rejected": -233.0,
"loss": 0.5365,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.87890625,
"rewards/margins": 0.6171875,
"rewards/rejected": -1.4921875,
"step": 310
},
{
"epoch": 0.38369304556354916,
"grad_norm": 22.10087172308854,
"learning_rate": 4.846734784540204e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.4375,
"logps/chosen": -231.0,
"logps/rejected": -238.0,
"loss": 0.5703,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.8125,
"rewards/margins": 0.43359375,
"rewards/rejected": -1.25,
"step": 320
},
{
"epoch": 0.39568345323741005,
"grad_norm": 27.704111990126563,
"learning_rate": 4.824522434473567e-07,
"logits/chosen": -2.421875,
"logits/rejected": -2.421875,
"logps/chosen": -235.0,
"logps/rejected": -235.0,
"loss": 0.543,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -1.15625,
"rewards/margins": 0.490234375,
"rewards/rejected": -1.6484375,
"step": 330
},
{
"epoch": 0.407673860911271,
"grad_norm": 19.609485654347775,
"learning_rate": 4.80231008440693e-07,
"logits/chosen": -2.40625,
"logits/rejected": -2.421875,
"logps/chosen": -223.0,
"logps/rejected": -228.0,
"loss": 0.5288,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.8046875,
"rewards/margins": 0.61328125,
"rewards/rejected": -1.4140625,
"step": 340
},
{
"epoch": 0.4196642685851319,
"grad_norm": 25.341062357182967,
"learning_rate": 4.780097734340293e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.3125,
"logps/chosen": -219.0,
"logps/rejected": -229.0,
"loss": 0.5653,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.8515625,
"rewards/margins": 0.546875,
"rewards/rejected": -1.3984375,
"step": 350
},
{
"epoch": 0.4316546762589928,
"grad_norm": 24.609756364283665,
"learning_rate": 4.757885384273656e-07,
"logits/chosen": -2.421875,
"logits/rejected": -2.40625,
"logps/chosen": -213.0,
"logps/rejected": -226.0,
"loss": 0.5599,
"rewards/accuracies": 0.6499999761581421,
"rewards/chosen": -0.70703125,
"rewards/margins": 0.453125,
"rewards/rejected": -1.15625,
"step": 360
},
{
"epoch": 0.44364508393285373,
"grad_norm": 20.448105072112956,
"learning_rate": 4.7356730342070187e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.40625,
"logps/chosen": -226.0,
"logps/rejected": -231.0,
"loss": 0.5373,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.171875,
"rewards/margins": 0.58984375,
"rewards/rejected": -1.7578125,
"step": 370
},
{
"epoch": 0.4556354916067146,
"grad_norm": 20.00137526211279,
"learning_rate": 4.713460684140382e-07,
"logits/chosen": -2.390625,
"logits/rejected": -2.359375,
"logps/chosen": -225.0,
"logps/rejected": -228.0,
"loss": 0.5482,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.4609375,
"rewards/margins": 0.474609375,
"rewards/rejected": -1.9375,
"step": 380
},
{
"epoch": 0.4676258992805755,
"grad_norm": 20.08046772545977,
"learning_rate": 4.691248334073745e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.375,
"logps/chosen": -222.0,
"logps/rejected": -232.0,
"loss": 0.5292,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -1.4453125,
"rewards/margins": 0.4609375,
"rewards/rejected": -1.90625,
"step": 390
},
{
"epoch": 0.47961630695443647,
"grad_norm": 24.656493214054557,
"learning_rate": 4.6690359840071075e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.296875,
"logps/chosen": -203.0,
"logps/rejected": -220.0,
"loss": 0.4912,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.72265625,
"rewards/margins": 0.69140625,
"rewards/rejected": -1.4140625,
"step": 400
},
{
"epoch": 0.49160671462829736,
"grad_norm": 20.967973696578976,
"learning_rate": 4.646823633940471e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.453125,
"logps/chosen": -220.0,
"logps/rejected": -229.0,
"loss": 0.5305,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.74609375,
"rewards/margins": 0.58203125,
"rewards/rejected": -1.328125,
"step": 410
},
{
"epoch": 0.5035971223021583,
"grad_norm": 25.657618682397636,
"learning_rate": 4.6246112838738336e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.328125,
"logps/chosen": -217.0,
"logps/rejected": -217.0,
"loss": 0.5173,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -1.7109375,
"rewards/margins": 0.625,
"rewards/rejected": -2.328125,
"step": 420
},
{
"epoch": 0.5155875299760192,
"grad_norm": 22.796145877614705,
"learning_rate": 4.602398933807197e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.421875,
"logps/chosen": -221.0,
"logps/rejected": -254.0,
"loss": 0.5249,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -1.1015625,
"rewards/margins": 0.419921875,
"rewards/rejected": -1.5234375,
"step": 430
},
{
"epoch": 0.5275779376498801,
"grad_norm": 21.830523584389546,
"learning_rate": 4.5801865837405597e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.296875,
"logps/chosen": -231.0,
"logps/rejected": -234.0,
"loss": 0.4868,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.97265625,
"rewards/margins": 0.75,
"rewards/rejected": -1.7265625,
"step": 440
},
{
"epoch": 0.539568345323741,
"grad_norm": 22.863177511462844,
"learning_rate": 4.5579742336739224e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.3125,
"logps/chosen": -217.0,
"logps/rejected": -206.0,
"loss": 0.5349,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -0.51953125,
"rewards/margins": 0.79296875,
"rewards/rejected": -1.3125,
"step": 450
},
{
"epoch": 0.5515587529976019,
"grad_norm": 22.106098891434247,
"learning_rate": 4.535761883607285e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.34375,
"logps/chosen": -242.0,
"logps/rejected": -229.0,
"loss": 0.4969,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -1.0078125,
"rewards/margins": 0.5234375,
"rewards/rejected": -1.53125,
"step": 460
},
{
"epoch": 0.5635491606714629,
"grad_norm": 21.83764765940895,
"learning_rate": 4.513549533540648e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.40625,
"logps/chosen": -221.0,
"logps/rejected": -229.0,
"loss": 0.4531,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.9609375,
"rewards/margins": 0.75390625,
"rewards/rejected": -1.7109375,
"step": 470
},
{
"epoch": 0.5755395683453237,
"grad_norm": 27.75012920313157,
"learning_rate": 4.491337183474012e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.328125,
"logps/chosen": -227.0,
"logps/rejected": -221.0,
"loss": 0.5074,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -1.234375,
"rewards/margins": 0.625,
"rewards/rejected": -1.8515625,
"step": 480
},
{
"epoch": 0.5875299760191847,
"grad_norm": 17.36030484793641,
"learning_rate": 4.4691248334073746e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.390625,
"logps/chosen": -225.0,
"logps/rejected": -239.0,
"loss": 0.48,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.375,
"rewards/margins": 0.73828125,
"rewards/rejected": -2.109375,
"step": 490
},
{
"epoch": 0.5995203836930456,
"grad_norm": 22.808492833261823,
"learning_rate": 4.4469124833407373e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.328125,
"logps/chosen": -214.0,
"logps/rejected": -214.0,
"loss": 0.5236,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -1.3984375,
"rewards/margins": 0.443359375,
"rewards/rejected": -1.84375,
"step": 500
},
{
"epoch": 0.6115107913669064,
"grad_norm": 23.84926176790092,
"learning_rate": 4.4247001332741e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.375,
"logps/chosen": -215.0,
"logps/rejected": -235.0,
"loss": 0.4798,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.96484375,
"rewards/margins": 0.76953125,
"rewards/rejected": -1.734375,
"step": 510
},
{
"epoch": 0.6235011990407674,
"grad_norm": 21.734949611768823,
"learning_rate": 4.402487783207463e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.375,
"logps/chosen": -215.0,
"logps/rejected": -238.0,
"loss": 0.437,
"rewards/accuracies": 0.8125,
"rewards/chosen": -1.0546875,
"rewards/margins": 0.9921875,
"rewards/rejected": -2.046875,
"step": 520
},
{
"epoch": 0.6354916067146283,
"grad_norm": 17.985205119836653,
"learning_rate": 4.380275433140826e-07,
"logits/chosen": -2.390625,
"logits/rejected": -2.453125,
"logps/chosen": -216.0,
"logps/rejected": -229.0,
"loss": 0.4438,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -1.0703125,
"rewards/margins": 1.0625,
"rewards/rejected": -2.125,
"step": 530
},
{
"epoch": 0.6474820143884892,
"grad_norm": 19.26308613682953,
"learning_rate": 4.358063083074189e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.359375,
"logps/chosen": -230.0,
"logps/rejected": -244.0,
"loss": 0.4505,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -1.5,
"rewards/margins": 1.234375,
"rewards/rejected": -2.734375,
"step": 540
},
{
"epoch": 0.6594724220623501,
"grad_norm": 24.596146010502707,
"learning_rate": 4.335850733007552e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.359375,
"logps/chosen": -221.0,
"logps/rejected": -233.0,
"loss": 0.4943,
"rewards/accuracies": 0.75,
"rewards/chosen": -1.6796875,
"rewards/margins": 0.7421875,
"rewards/rejected": -2.421875,
"step": 550
},
{
"epoch": 0.6714628297362111,
"grad_norm": 15.013286046936686,
"learning_rate": 4.313638382940915e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.359375,
"logps/chosen": -217.0,
"logps/rejected": -236.0,
"loss": 0.4661,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -0.9921875,
"rewards/margins": 0.984375,
"rewards/rejected": -1.9765625,
"step": 560
},
{
"epoch": 0.6834532374100719,
"grad_norm": 19.571343970012546,
"learning_rate": 4.291426032874278e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.34375,
"logps/chosen": -218.0,
"logps/rejected": -258.0,
"loss": 0.43,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -1.2265625,
"rewards/margins": 1.03125,
"rewards/rejected": -2.25,
"step": 570
},
{
"epoch": 0.6954436450839329,
"grad_norm": 20.377379214041834,
"learning_rate": 4.269213682807641e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.328125,
"logps/chosen": -227.0,
"logps/rejected": -221.0,
"loss": 0.4403,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.83984375,
"rewards/margins": 0.9375,
"rewards/rejected": -1.7734375,
"step": 580
},
{
"epoch": 0.7074340527577938,
"grad_norm": 25.88308379473588,
"learning_rate": 4.247001332741004e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.40625,
"logps/chosen": -219.0,
"logps/rejected": -239.0,
"loss": 0.4357,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -0.78515625,
"rewards/margins": 1.0703125,
"rewards/rejected": -1.8515625,
"step": 590
},
{
"epoch": 0.7194244604316546,
"grad_norm": 21.223402057931814,
"learning_rate": 4.2247889826743666e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.34375,
"logps/chosen": -227.0,
"logps/rejected": -232.0,
"loss": 0.4865,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -0.8515625,
"rewards/margins": 0.8125,
"rewards/rejected": -1.6640625,
"step": 600
},
{
"epoch": 0.7314148681055156,
"grad_norm": 26.413684307486818,
"learning_rate": 4.2025766326077294e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.375,
"logps/chosen": -222.0,
"logps/rejected": -231.0,
"loss": 0.4473,
"rewards/accuracies": 0.75,
"rewards/chosen": -1.453125,
"rewards/margins": 0.984375,
"rewards/rejected": -2.4375,
"step": 610
},
{
"epoch": 0.7434052757793765,
"grad_norm": 19.274016165683612,
"learning_rate": 4.1803642825410926e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.296875,
"logps/chosen": -218.0,
"logps/rejected": -241.0,
"loss": 0.4574,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -1.2734375,
"rewards/margins": 0.94140625,
"rewards/rejected": -2.21875,
"step": 620
},
{
"epoch": 0.7553956834532374,
"grad_norm": 21.962129244766075,
"learning_rate": 4.158151932474456e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.421875,
"logps/chosen": -236.0,
"logps/rejected": -234.0,
"loss": 0.4202,
"rewards/accuracies": 0.8125,
"rewards/chosen": -1.546875,
"rewards/margins": 1.09375,
"rewards/rejected": -2.640625,
"step": 630
},
{
"epoch": 0.7673860911270983,
"grad_norm": 19.250420656857816,
"learning_rate": 4.1359395824078187e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.328125,
"logps/chosen": -231.0,
"logps/rejected": -256.0,
"loss": 0.423,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -2.109375,
"rewards/margins": 1.1640625,
"rewards/rejected": -3.28125,
"step": 640
},
{
"epoch": 0.7793764988009593,
"grad_norm": 17.26038839326138,
"learning_rate": 4.1137272323411815e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.3125,
"logps/chosen": -223.0,
"logps/rejected": -245.0,
"loss": 0.4144,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -2.140625,
"rewards/margins": 1.140625,
"rewards/rejected": -3.28125,
"step": 650
},
{
"epoch": 0.7913669064748201,
"grad_norm": 22.725812386482403,
"learning_rate": 4.091514882274544e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.34375,
"logps/chosen": -224.0,
"logps/rejected": -223.0,
"loss": 0.4619,
"rewards/accuracies": 0.8125,
"rewards/chosen": -1.6328125,
"rewards/margins": 0.75,
"rewards/rejected": -2.390625,
"step": 660
},
{
"epoch": 0.8033573141486811,
"grad_norm": 26.346880341026353,
"learning_rate": 4.069302532207907e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.34375,
"logps/chosen": -234.0,
"logps/rejected": -233.0,
"loss": 0.4198,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -1.09375,
"rewards/margins": 1.2109375,
"rewards/rejected": -2.296875,
"step": 670
},
{
"epoch": 0.815347721822542,
"grad_norm": 16.341325440490397,
"learning_rate": 4.047090182141271e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.328125,
"logps/chosen": -219.0,
"logps/rejected": -237.0,
"loss": 0.4215,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -1.828125,
"rewards/margins": 0.8359375,
"rewards/rejected": -2.65625,
"step": 680
},
{
"epoch": 0.8273381294964028,
"grad_norm": 19.597918931735652,
"learning_rate": 4.0248778320746336e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.421875,
"logps/chosen": -225.0,
"logps/rejected": -246.0,
"loss": 0.4179,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -1.6640625,
"rewards/margins": 1.046875,
"rewards/rejected": -2.71875,
"step": 690
},
{
"epoch": 0.8393285371702638,
"grad_norm": 16.19010210226658,
"learning_rate": 4.0026654820079964e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.296875,
"logps/chosen": -242.0,
"logps/rejected": -253.0,
"loss": 0.4064,
"rewards/accuracies": 0.862500011920929,
"rewards/chosen": -1.6875,
"rewards/margins": 1.1875,
"rewards/rejected": -2.875,
"step": 700
},
{
"epoch": 0.8513189448441247,
"grad_norm": 19.378154801231126,
"learning_rate": 3.980453131941359e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.3125,
"logps/chosen": -230.0,
"logps/rejected": -243.0,
"loss": 0.382,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -1.703125,
"rewards/margins": 1.234375,
"rewards/rejected": -2.9375,
"step": 710
},
{
"epoch": 0.8633093525179856,
"grad_norm": 17.875697953531226,
"learning_rate": 3.958240781874722e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.3125,
"logps/chosen": -230.0,
"logps/rejected": -231.0,
"loss": 0.3839,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -1.6171875,
"rewards/margins": 1.28125,
"rewards/rejected": -2.90625,
"step": 720
},
{
"epoch": 0.8752997601918465,
"grad_norm": 21.20250450740525,
"learning_rate": 3.936028431808085e-07,
"logits/chosen": -2.390625,
"logits/rejected": -2.34375,
"logps/chosen": -231.0,
"logps/rejected": -231.0,
"loss": 0.3574,
"rewards/accuracies": 0.887499988079071,
"rewards/chosen": -1.609375,
"rewards/margins": 1.421875,
"rewards/rejected": -3.03125,
"step": 730
},
{
"epoch": 0.8872901678657075,
"grad_norm": 17.46000299554747,
"learning_rate": 3.913816081741448e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.359375,
"logps/chosen": -216.0,
"logps/rejected": -237.0,
"loss": 0.3473,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -1.703125,
"rewards/margins": 1.625,
"rewards/rejected": -3.328125,
"step": 740
},
{
"epoch": 0.8992805755395683,
"grad_norm": 17.53738029583566,
"learning_rate": 3.8916037316748113e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.28125,
"logps/chosen": -222.0,
"logps/rejected": -232.0,
"loss": 0.371,
"rewards/accuracies": 0.8374999761581421,
"rewards/chosen": -2.09375,
"rewards/margins": 1.2421875,
"rewards/rejected": -3.328125,
"step": 750
},
{
"epoch": 0.9112709832134293,
"grad_norm": 13.845287693810857,
"learning_rate": 3.869391381608174e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.359375,
"logps/chosen": -227.0,
"logps/rejected": -239.0,
"loss": 0.3346,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.359375,
"rewards/margins": 1.28125,
"rewards/rejected": -2.640625,
"step": 760
},
{
"epoch": 0.9232613908872902,
"grad_norm": 18.53800658699909,
"learning_rate": 3.847179031541537e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.328125,
"logps/chosen": -225.0,
"logps/rejected": -227.0,
"loss": 0.3642,
"rewards/accuracies": 0.8500000238418579,
"rewards/chosen": -1.671875,
"rewards/margins": 1.3125,
"rewards/rejected": -2.984375,
"step": 770
},
{
"epoch": 0.935251798561151,
"grad_norm": 21.872485752073064,
"learning_rate": 3.8249666814749e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.3125,
"logps/chosen": -240.0,
"logps/rejected": -237.0,
"loss": 0.3391,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -2.015625,
"rewards/margins": 1.5078125,
"rewards/rejected": -3.53125,
"step": 780
},
{
"epoch": 0.947242206235012,
"grad_norm": 21.111212041972365,
"learning_rate": 3.802754331408263e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.296875,
"logps/chosen": -232.0,
"logps/rejected": -256.0,
"loss": 0.3679,
"rewards/accuracies": 0.7875000238418579,
"rewards/chosen": -2.296875,
"rewards/margins": 1.3203125,
"rewards/rejected": -3.625,
"step": 790
},
{
"epoch": 0.9592326139088729,
"grad_norm": 21.61033072214478,
"learning_rate": 3.7805419813416256e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.265625,
"logps/chosen": -226.0,
"logps/rejected": -241.0,
"loss": 0.3427,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -2.5625,
"rewards/margins": 1.4140625,
"rewards/rejected": -3.96875,
"step": 800
},
{
"epoch": 0.9712230215827338,
"grad_norm": 15.32449671679388,
"learning_rate": 3.7583296312749884e-07,
"logits/chosen": -2.234375,
"logits/rejected": -2.28125,
"logps/chosen": -234.0,
"logps/rejected": -252.0,
"loss": 0.3787,
"rewards/accuracies": 0.824999988079071,
"rewards/chosen": -2.71875,
"rewards/margins": 1.53125,
"rewards/rejected": -4.25,
"step": 810
},
{
"epoch": 0.9832134292565947,
"grad_norm": 19.67405128244237,
"learning_rate": 3.7361172812083517e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.28125,
"logps/chosen": -230.0,
"logps/rejected": -232.0,
"loss": 0.3254,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -1.9453125,
"rewards/margins": 1.46875,
"rewards/rejected": -3.421875,
"step": 820
},
{
"epoch": 0.9952038369304557,
"grad_norm": 21.005105323056593,
"learning_rate": 3.713904931141715e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.28125,
"logps/chosen": -237.0,
"logps/rejected": -239.0,
"loss": 0.2971,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -1.5234375,
"rewards/margins": 1.6328125,
"rewards/rejected": -3.15625,
"step": 830
},
{
"epoch": 1.0,
"eval_logits/chosen": -2.296875,
"eval_logits/rejected": -2.328125,
"eval_logps/chosen": -228.0,
"eval_logps/rejected": -236.0,
"eval_loss": 0.6048940420150757,
"eval_rewards/accuracies": 0.6617646813392639,
"eval_rewards/chosen": -1.9921875,
"eval_rewards/margins": 0.93359375,
"eval_rewards/rejected": -2.921875,
"eval_runtime": 19.9965,
"eval_samples_per_second": 20.053,
"eval_steps_per_second": 0.85,
"step": 834
},
{
"epoch": 1.0071942446043165,
"grad_norm": 18.731496491285114,
"learning_rate": 3.691692581075078e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.375,
"logps/chosen": -237.0,
"logps/rejected": -272.0,
"loss": 0.2766,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -1.9453125,
"rewards/margins": 1.984375,
"rewards/rejected": -3.921875,
"step": 840
},
{
"epoch": 1.0191846522781776,
"grad_norm": 18.062862619034647,
"learning_rate": 3.6694802310084405e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.296875,
"logps/chosen": -226.0,
"logps/rejected": -246.0,
"loss": 0.2457,
"rewards/accuracies": 0.9375,
"rewards/chosen": -1.625,
"rewards/margins": 1.8046875,
"rewards/rejected": -3.4375,
"step": 850
},
{
"epoch": 1.0311750599520384,
"grad_norm": 18.43054843437252,
"learning_rate": 3.6472678809418033e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.34375,
"logps/chosen": -234.0,
"logps/rejected": -239.0,
"loss": 0.2445,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -1.7734375,
"rewards/margins": 1.984375,
"rewards/rejected": -3.765625,
"step": 860
},
{
"epoch": 1.0431654676258992,
"grad_norm": 14.792270635313406,
"learning_rate": 3.625055530875166e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.296875,
"logps/chosen": -239.0,
"logps/rejected": -258.0,
"loss": 0.2187,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -2.046875,
"rewards/margins": 2.15625,
"rewards/rejected": -4.1875,
"step": 870
},
{
"epoch": 1.0551558752997603,
"grad_norm": 14.450058470084105,
"learning_rate": 3.6028431808085294e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.265625,
"logps/chosen": -238.0,
"logps/rejected": -258.0,
"loss": 0.2253,
"rewards/accuracies": 0.9375,
"rewards/chosen": -1.984375,
"rewards/margins": 1.953125,
"rewards/rejected": -3.953125,
"step": 880
},
{
"epoch": 1.0671462829736211,
"grad_norm": 14.392869335197297,
"learning_rate": 3.5806308307418926e-07,
"logits/chosen": -2.375,
"logits/rejected": -2.390625,
"logps/chosen": -236.0,
"logps/rejected": -262.0,
"loss": 0.1993,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -2.15625,
"rewards/margins": 2.25,
"rewards/rejected": -4.40625,
"step": 890
},
{
"epoch": 1.079136690647482,
"grad_norm": 11.360043739301632,
"learning_rate": 3.5584184806752554e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.3125,
"logps/chosen": -224.0,
"logps/rejected": -252.0,
"loss": 0.2037,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.8984375,
"rewards/margins": 2.390625,
"rewards/rejected": -4.28125,
"step": 900
},
{
"epoch": 1.091127098321343,
"grad_norm": 17.249700366406504,
"learning_rate": 3.536206130608618e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.328125,
"logps/chosen": -224.0,
"logps/rejected": -244.0,
"loss": 0.1878,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.921875,
"rewards/margins": 2.34375,
"rewards/rejected": -4.28125,
"step": 910
},
{
"epoch": 1.1031175059952039,
"grad_norm": 17.500202516480442,
"learning_rate": 3.513993780541981e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.375,
"logps/chosen": -203.0,
"logps/rejected": -246.0,
"loss": 0.2128,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.875,
"rewards/margins": 2.375,
"rewards/rejected": -4.25,
"step": 920
},
{
"epoch": 1.1151079136690647,
"grad_norm": 15.16644938505708,
"learning_rate": 3.491781430475344e-07,
"logits/chosen": -2.359375,
"logits/rejected": -2.40625,
"logps/chosen": -220.0,
"logps/rejected": -266.0,
"loss": 0.1957,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.4375,
"rewards/margins": 2.40625,
"rewards/rejected": -3.84375,
"step": 930
},
{
"epoch": 1.1270983213429258,
"grad_norm": 12.741517880568063,
"learning_rate": 3.469569080408707e-07,
"logits/chosen": -2.21875,
"logits/rejected": -2.234375,
"logps/chosen": -225.0,
"logps/rejected": -246.0,
"loss": 0.2034,
"rewards/accuracies": 0.9375,
"rewards/chosen": -2.296875,
"rewards/margins": 2.359375,
"rewards/rejected": -4.65625,
"step": 940
},
{
"epoch": 1.1390887290167866,
"grad_norm": 21.470206899673656,
"learning_rate": 3.4473567303420703e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.359375,
"logps/chosen": -235.0,
"logps/rejected": -256.0,
"loss": 0.1846,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -2.34375,
"rewards/margins": 2.515625,
"rewards/rejected": -4.875,
"step": 950
},
{
"epoch": 1.1510791366906474,
"grad_norm": 17.20708415573527,
"learning_rate": 3.425144380275433e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.359375,
"logps/chosen": -245.0,
"logps/rejected": -276.0,
"loss": 0.2031,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.3125,
"rewards/margins": 2.5625,
"rewards/rejected": -4.875,
"step": 960
},
{
"epoch": 1.1630695443645085,
"grad_norm": 14.942195535135212,
"learning_rate": 3.402932030208796e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.3125,
"logps/chosen": -242.0,
"logps/rejected": -272.0,
"loss": 0.2032,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.234375,
"rewards/margins": 2.734375,
"rewards/rejected": -4.96875,
"step": 970
},
{
"epoch": 1.1750599520383693,
"grad_norm": 11.998202232926158,
"learning_rate": 3.380719680142159e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.328125,
"logps/chosen": -226.0,
"logps/rejected": -248.0,
"loss": 0.1639,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.734375,
"rewards/margins": 2.6875,
"rewards/rejected": -4.40625,
"step": 980
},
{
"epoch": 1.1870503597122302,
"grad_norm": 12.340216646822865,
"learning_rate": 3.358507330075522e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.28125,
"logps/chosen": -238.0,
"logps/rejected": -260.0,
"loss": 0.1902,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -2.75,
"rewards/margins": 2.296875,
"rewards/rejected": -5.0625,
"step": 990
},
{
"epoch": 1.1990407673860912,
"grad_norm": 10.240701799756478,
"learning_rate": 3.3362949800088847e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.28125,
"logps/chosen": -237.0,
"logps/rejected": -260.0,
"loss": 0.1614,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.859375,
"rewards/margins": 2.65625,
"rewards/rejected": -4.5,
"step": 1000
},
{
"epoch": 1.211031175059952,
"grad_norm": 16.753004063840088,
"learning_rate": 3.3140826299422474e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.390625,
"logps/chosen": -234.0,
"logps/rejected": -264.0,
"loss": 0.2139,
"rewards/accuracies": 0.9375,
"rewards/chosen": -2.15625,
"rewards/margins": 2.5625,
"rewards/rejected": -4.71875,
"step": 1010
},
{
"epoch": 1.223021582733813,
"grad_norm": 22.84859803236856,
"learning_rate": 3.291870279875611e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.25,
"logps/chosen": -242.0,
"logps/rejected": -270.0,
"loss": 0.1757,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.359375,
"rewards/margins": 2.578125,
"rewards/rejected": -4.9375,
"step": 1020
},
{
"epoch": 1.235011990407674,
"grad_norm": 14.789129276628799,
"learning_rate": 3.269657929808974e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.296875,
"logps/chosen": -234.0,
"logps/rejected": -262.0,
"loss": 0.1719,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.078125,
"rewards/margins": 2.515625,
"rewards/rejected": -4.59375,
"step": 1030
},
{
"epoch": 1.2470023980815348,
"grad_norm": 21.148440474411384,
"learning_rate": 3.247445579742337e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.28125,
"logps/chosen": -228.0,
"logps/rejected": -266.0,
"loss": 0.1689,
"rewards/accuracies": 0.9375,
"rewards/chosen": -2.6875,
"rewards/margins": 2.546875,
"rewards/rejected": -5.25,
"step": 1040
},
{
"epoch": 1.2589928057553956,
"grad_norm": 19.272034515617282,
"learning_rate": 3.2252332296756996e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.328125,
"logps/chosen": -227.0,
"logps/rejected": -260.0,
"loss": 0.1591,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.65625,
"rewards/margins": 2.96875,
"rewards/rejected": -5.625,
"step": 1050
},
{
"epoch": 1.2709832134292567,
"grad_norm": 16.85198782317795,
"learning_rate": 3.2030208796090623e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.3125,
"logps/chosen": -229.0,
"logps/rejected": -251.0,
"loss": 0.2236,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -1.765625,
"rewards/margins": 2.890625,
"rewards/rejected": -4.65625,
"step": 1060
},
{
"epoch": 1.2829736211031175,
"grad_norm": 15.836692596712956,
"learning_rate": 3.180808529542425e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.28125,
"logps/chosen": -254.0,
"logps/rejected": -276.0,
"loss": 0.168,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -2.671875,
"rewards/margins": 2.703125,
"rewards/rejected": -5.375,
"step": 1070
},
{
"epoch": 1.2949640287769784,
"grad_norm": 8.91604867948452,
"learning_rate": 3.1585961794757884e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.28125,
"logps/chosen": -240.0,
"logps/rejected": -253.0,
"loss": 0.1711,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.59375,
"rewards/margins": 2.75,
"rewards/rejected": -4.34375,
"step": 1080
},
{
"epoch": 1.3069544364508392,
"grad_norm": 14.305189630485472,
"learning_rate": 3.1363838294091517e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.28125,
"logps/chosen": -235.0,
"logps/rejected": -251.0,
"loss": 0.1415,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -1.640625,
"rewards/margins": 2.9375,
"rewards/rejected": -4.59375,
"step": 1090
},
{
"epoch": 1.3189448441247003,
"grad_norm": 12.352735099035222,
"learning_rate": 3.1141714793425145e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.28125,
"logps/chosen": -249.0,
"logps/rejected": -278.0,
"loss": 0.1572,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -2.1875,
"rewards/margins": 2.953125,
"rewards/rejected": -5.125,
"step": 1100
},
{
"epoch": 1.330935251798561,
"grad_norm": 14.924171575427302,
"learning_rate": 3.091959129275877e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.265625,
"logps/chosen": -215.0,
"logps/rejected": -241.0,
"loss": 0.1714,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -1.9375,
"rewards/margins": 2.84375,
"rewards/rejected": -4.78125,
"step": 1110
},
{
"epoch": 1.3429256594724222,
"grad_norm": 17.499923046869892,
"learning_rate": 3.06974677920924e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.296875,
"logps/chosen": -254.0,
"logps/rejected": -278.0,
"loss": 0.1393,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.953125,
"rewards/margins": 2.640625,
"rewards/rejected": -5.59375,
"step": 1120
},
{
"epoch": 1.354916067146283,
"grad_norm": 16.158253954777205,
"learning_rate": 3.0475344291426033e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.3125,
"logps/chosen": -229.0,
"logps/rejected": -266.0,
"loss": 0.1656,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.1875,
"rewards/margins": 3.375,
"rewards/rejected": -5.5625,
"step": 1130
},
{
"epoch": 1.3669064748201438,
"grad_norm": 12.087846600129158,
"learning_rate": 3.025322079075966e-07,
"logits/chosen": -2.234375,
"logits/rejected": -2.296875,
"logps/chosen": -225.0,
"logps/rejected": -272.0,
"loss": 0.1966,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -1.96875,
"rewards/margins": 3.171875,
"rewards/rejected": -5.15625,
"step": 1140
},
{
"epoch": 1.3788968824940047,
"grad_norm": 17.576347022242885,
"learning_rate": 3.003109729009329e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.28125,
"logps/chosen": -220.0,
"logps/rejected": -264.0,
"loss": 0.1847,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.8046875,
"rewards/margins": 2.703125,
"rewards/rejected": -4.5,
"step": 1150
},
{
"epoch": 1.3908872901678657,
"grad_norm": 12.233329087327826,
"learning_rate": 2.980897378942692e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.234375,
"logps/chosen": -235.0,
"logps/rejected": -262.0,
"loss": 0.1373,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -2.34375,
"rewards/margins": 2.890625,
"rewards/rejected": -5.21875,
"step": 1160
},
{
"epoch": 1.4028776978417266,
"grad_norm": 14.731501359525842,
"learning_rate": 2.958685028876055e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.234375,
"logps/chosen": -236.0,
"logps/rejected": -260.0,
"loss": 0.1843,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.1875,
"rewards/margins": 2.625,
"rewards/rejected": -4.8125,
"step": 1170
},
{
"epoch": 1.4148681055155876,
"grad_norm": 10.33569347198834,
"learning_rate": 2.936472678809418e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.296875,
"logps/chosen": -246.0,
"logps/rejected": -266.0,
"loss": 0.1912,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.40625,
"rewards/margins": 2.640625,
"rewards/rejected": -5.0625,
"step": 1180
},
{
"epoch": 1.4268585131894485,
"grad_norm": 16.986111059013577,
"learning_rate": 2.914260328742781e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.25,
"logps/chosen": -231.0,
"logps/rejected": -256.0,
"loss": 0.1611,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.3125,
"rewards/margins": 2.703125,
"rewards/rejected": -5.03125,
"step": 1190
},
{
"epoch": 1.4388489208633093,
"grad_norm": 25.435636014827384,
"learning_rate": 2.8920479786761437e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.296875,
"logps/chosen": -235.0,
"logps/rejected": -268.0,
"loss": 0.1512,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.28125,
"rewards/margins": 2.859375,
"rewards/rejected": -5.15625,
"step": 1200
},
{
"epoch": 1.4508393285371701,
"grad_norm": 13.56680412334536,
"learning_rate": 2.8698356286095065e-07,
"logits/chosen": -2.234375,
"logits/rejected": -2.265625,
"logps/chosen": -260.0,
"logps/rejected": -284.0,
"loss": 0.1255,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -3.171875,
"rewards/margins": 3.40625,
"rewards/rejected": -6.59375,
"step": 1210
},
{
"epoch": 1.4628297362110312,
"grad_norm": 8.141641110335339,
"learning_rate": 2.847623278542869e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.3125,
"logps/chosen": -236.0,
"logps/rejected": -264.0,
"loss": 0.1428,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -2.296875,
"rewards/margins": 3.03125,
"rewards/rejected": -5.34375,
"step": 1220
},
{
"epoch": 1.474820143884892,
"grad_norm": 21.115162166818017,
"learning_rate": 2.825410928476233e-07,
"logits/chosen": -2.1875,
"logits/rejected": -2.25,
"logps/chosen": -221.0,
"logps/rejected": -268.0,
"loss": 0.136,
"rewards/accuracies": 0.9375,
"rewards/chosen": -2.796875,
"rewards/margins": 3.09375,
"rewards/rejected": -5.90625,
"step": 1230
},
{
"epoch": 1.486810551558753,
"grad_norm": 18.47262881897785,
"learning_rate": 2.803198578409596e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.25,
"logps/chosen": -228.0,
"logps/rejected": -260.0,
"loss": 0.1688,
"rewards/accuracies": 0.925000011920929,
"rewards/chosen": -1.9375,
"rewards/margins": 3.171875,
"rewards/rejected": -5.125,
"step": 1240
},
{
"epoch": 1.498800959232614,
"grad_norm": 7.863109211524176,
"learning_rate": 2.7809862283429586e-07,
"logits/chosen": -2.234375,
"logits/rejected": -2.296875,
"logps/chosen": -222.0,
"logps/rejected": -272.0,
"loss": 0.1298,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.046875,
"rewards/margins": 3.53125,
"rewards/rejected": -5.5625,
"step": 1250
},
{
"epoch": 1.5107913669064748,
"grad_norm": 12.978026445303877,
"learning_rate": 2.7587738782763214e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.328125,
"logps/chosen": -216.0,
"logps/rejected": -266.0,
"loss": 0.1403,
"rewards/accuracies": 0.9375,
"rewards/chosen": -2.40625,
"rewards/margins": 3.421875,
"rewards/rejected": -5.8125,
"step": 1260
},
{
"epoch": 1.5227817745803356,
"grad_norm": 15.921496272567383,
"learning_rate": 2.736561528209684e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.296875,
"logps/chosen": -234.0,
"logps/rejected": -274.0,
"loss": 0.1493,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.28125,
"rewards/margins": 3.140625,
"rewards/rejected": -5.40625,
"step": 1270
},
{
"epoch": 1.5347721822541966,
"grad_norm": 14.590593841694169,
"learning_rate": 2.7143491781430474e-07,
"logits/chosen": -2.21875,
"logits/rejected": -2.234375,
"logps/chosen": -220.0,
"logps/rejected": -268.0,
"loss": 0.1147,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.8125,
"rewards/margins": 3.078125,
"rewards/rejected": -5.875,
"step": 1280
},
{
"epoch": 1.5467625899280577,
"grad_norm": 16.15992031571578,
"learning_rate": 2.692136828076411e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.28125,
"logps/chosen": -242.0,
"logps/rejected": -274.0,
"loss": 0.1222,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -3.0625,
"rewards/margins": 3.453125,
"rewards/rejected": -6.53125,
"step": 1290
},
{
"epoch": 1.5587529976019185,
"grad_norm": 10.104750544983435,
"learning_rate": 2.6699244780097735e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.28125,
"logps/chosen": -236.0,
"logps/rejected": -266.0,
"loss": 0.1225,
"rewards/accuracies": 0.9375,
"rewards/chosen": -2.453125,
"rewards/margins": 3.34375,
"rewards/rejected": -5.8125,
"step": 1300
},
{
"epoch": 1.5707434052757794,
"grad_norm": 16.414889130607445,
"learning_rate": 2.6477121279431363e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.3125,
"logps/chosen": -227.0,
"logps/rejected": -270.0,
"loss": 0.1865,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.59375,
"rewards/margins": 3.421875,
"rewards/rejected": -6.03125,
"step": 1310
},
{
"epoch": 1.5827338129496402,
"grad_norm": 19.612192416903497,
"learning_rate": 2.625499777876499e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.28125,
"logps/chosen": -236.0,
"logps/rejected": -272.0,
"loss": 0.1157,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -3.484375,
"rewards/margins": 3.265625,
"rewards/rejected": -6.75,
"step": 1320
},
{
"epoch": 1.594724220623501,
"grad_norm": 14.340619052654011,
"learning_rate": 2.6032874278098623e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.296875,
"logps/chosen": -251.0,
"logps/rejected": -274.0,
"loss": 0.1074,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.421875,
"rewards/margins": 3.90625,
"rewards/rejected": -6.34375,
"step": 1330
},
{
"epoch": 1.6067146282973621,
"grad_norm": 10.947155370834658,
"learning_rate": 2.581075077743225e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.28125,
"logps/chosen": -242.0,
"logps/rejected": -268.0,
"loss": 0.1142,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.53125,
"rewards/margins": 3.640625,
"rewards/rejected": -6.1875,
"step": 1340
},
{
"epoch": 1.6187050359712232,
"grad_norm": 6.379407877957059,
"learning_rate": 2.558862727676588e-07,
"logits/chosen": -2.3125,
"logits/rejected": -2.296875,
"logps/chosen": -243.0,
"logps/rejected": -264.0,
"loss": 0.1175,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.546875,
"rewards/margins": 3.6875,
"rewards/rejected": -6.21875,
"step": 1350
},
{
"epoch": 1.630695443645084,
"grad_norm": 14.584427754669349,
"learning_rate": 2.536650377609951e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.3125,
"logps/chosen": -243.0,
"logps/rejected": -268.0,
"loss": 0.088,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.65625,
"rewards/margins": 3.515625,
"rewards/rejected": -6.1875,
"step": 1360
},
{
"epoch": 1.6426858513189448,
"grad_norm": 18.386633865745754,
"learning_rate": 2.514438027543314e-07,
"logits/chosen": -2.234375,
"logits/rejected": -2.203125,
"logps/chosen": -240.0,
"logps/rejected": -276.0,
"loss": 0.0843,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.375,
"rewards/margins": 4.1875,
"rewards/rejected": -6.5625,
"step": 1370
},
{
"epoch": 1.6546762589928057,
"grad_norm": 13.249684798008296,
"learning_rate": 2.492225677476677e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.328125,
"logps/chosen": -230.0,
"logps/rejected": -282.0,
"loss": 0.11,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.515625,
"rewards/margins": 3.46875,
"rewards/rejected": -5.96875,
"step": 1380
},
{
"epoch": 1.6666666666666665,
"grad_norm": 24.989358834998974,
"learning_rate": 2.47001332741004e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.3125,
"logps/chosen": -240.0,
"logps/rejected": -258.0,
"loss": 0.134,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.59375,
"rewards/margins": 3.3125,
"rewards/rejected": -5.90625,
"step": 1390
},
{
"epoch": 1.6786570743405276,
"grad_norm": 21.225948587869443,
"learning_rate": 2.447800977343403e-07,
"logits/chosen": -2.203125,
"logits/rejected": -2.21875,
"logps/chosen": -227.0,
"logps/rejected": -264.0,
"loss": 0.1539,
"rewards/accuracies": 0.9125000238418579,
"rewards/chosen": -2.671875,
"rewards/margins": 3.546875,
"rewards/rejected": -6.21875,
"step": 1400
},
{
"epoch": 1.6906474820143886,
"grad_norm": 22.09693803294425,
"learning_rate": 2.425588627276766e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.28125,
"logps/chosen": -232.0,
"logps/rejected": -274.0,
"loss": 0.1301,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.8125,
"rewards/margins": 3.59375,
"rewards/rejected": -6.40625,
"step": 1410
},
{
"epoch": 1.7026378896882495,
"grad_norm": 13.568164124099978,
"learning_rate": 2.403376277210129e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.296875,
"logps/chosen": -245.0,
"logps/rejected": -286.0,
"loss": 0.0997,
"rewards/accuracies": 1.0,
"rewards/chosen": -3.734375,
"rewards/margins": 3.609375,
"rewards/rejected": -7.34375,
"step": 1420
},
{
"epoch": 1.7146282973621103,
"grad_norm": 12.536257234153933,
"learning_rate": 2.3811639271434916e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.25,
"logps/chosen": -243.0,
"logps/rejected": -274.0,
"loss": 0.1365,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.765625,
"rewards/margins": 3.515625,
"rewards/rejected": -6.28125,
"step": 1430
},
{
"epoch": 1.7266187050359711,
"grad_norm": 17.74910297920457,
"learning_rate": 2.3589515770768546e-07,
"logits/chosen": -2.296875,
"logits/rejected": -2.296875,
"logps/chosen": -256.0,
"logps/rejected": -276.0,
"loss": 0.1436,
"rewards/accuracies": 0.8999999761581421,
"rewards/chosen": -3.3125,
"rewards/margins": 3.53125,
"rewards/rejected": -6.84375,
"step": 1440
},
{
"epoch": 1.738609112709832,
"grad_norm": 27.091464814087082,
"learning_rate": 2.3367392270102177e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.28125,
"logps/chosen": -235.0,
"logps/rejected": -298.0,
"loss": 0.1031,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.640625,
"rewards/margins": 4.1875,
"rewards/rejected": -6.84375,
"step": 1450
},
{
"epoch": 1.750599520383693,
"grad_norm": 14.870002835228663,
"learning_rate": 2.3145268769435804e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.203125,
"logps/chosen": -247.0,
"logps/rejected": -278.0,
"loss": 0.1465,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -3.046875,
"rewards/margins": 3.609375,
"rewards/rejected": -6.65625,
"step": 1460
},
{
"epoch": 1.762589928057554,
"grad_norm": 17.480506529993356,
"learning_rate": 2.2923145268769435e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.265625,
"logps/chosen": -223.0,
"logps/rejected": -264.0,
"loss": 0.0896,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.484375,
"rewards/margins": 4.3125,
"rewards/rejected": -6.78125,
"step": 1470
},
{
"epoch": 1.774580335731415,
"grad_norm": 6.540878635676034,
"learning_rate": 2.2701021768103065e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.25,
"logps/chosen": -236.0,
"logps/rejected": -278.0,
"loss": 0.1107,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.703125,
"rewards/margins": 3.984375,
"rewards/rejected": -6.6875,
"step": 1480
},
{
"epoch": 1.7865707434052758,
"grad_norm": 16.51964209338177,
"learning_rate": 2.2478898267436695e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.265625,
"logps/chosen": -241.0,
"logps/rejected": -284.0,
"loss": 0.0963,
"rewards/accuracies": 1.0,
"rewards/chosen": -3.671875,
"rewards/margins": 3.71875,
"rewards/rejected": -7.375,
"step": 1490
},
{
"epoch": 1.7985611510791366,
"grad_norm": 17.145067233621255,
"learning_rate": 2.2256774766770323e-07,
"logits/chosen": -2.1875,
"logits/rejected": -2.171875,
"logps/chosen": -234.0,
"logps/rejected": -255.0,
"loss": 0.1016,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.71875,
"rewards/margins": 3.953125,
"rewards/rejected": -6.6875,
"step": 1500
},
{
"epoch": 1.8105515587529974,
"grad_norm": 9.418365551008751,
"learning_rate": 2.2034651266103953e-07,
"logits/chosen": -2.234375,
"logits/rejected": -2.234375,
"logps/chosen": -216.0,
"logps/rejected": -266.0,
"loss": 0.0902,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.65625,
"rewards/margins": 3.875,
"rewards/rejected": -6.53125,
"step": 1510
},
{
"epoch": 1.8225419664268585,
"grad_norm": 8.454143934854207,
"learning_rate": 2.1812527765437583e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.25,
"logps/chosen": -247.0,
"logps/rejected": -278.0,
"loss": 0.1036,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -2.875,
"rewards/margins": 3.75,
"rewards/rejected": -6.625,
"step": 1520
},
{
"epoch": 1.8345323741007196,
"grad_norm": 23.877830813468428,
"learning_rate": 2.159040426477121e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.3125,
"logps/chosen": -249.0,
"logps/rejected": -300.0,
"loss": 0.0974,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -3.625,
"rewards/margins": 3.953125,
"rewards/rejected": -7.5625,
"step": 1530
},
{
"epoch": 1.8465227817745804,
"grad_norm": 17.59044264644789,
"learning_rate": 2.1368280764104841e-07,
"logits/chosen": -2.171875,
"logits/rejected": -2.296875,
"logps/chosen": -242.0,
"logps/rejected": -300.0,
"loss": 0.0921,
"rewards/accuracies": 1.0,
"rewards/chosen": -3.734375,
"rewards/margins": 3.984375,
"rewards/rejected": -7.71875,
"step": 1540
},
{
"epoch": 1.8585131894484412,
"grad_norm": 7.53671069585816,
"learning_rate": 2.1146157263438472e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.265625,
"logps/chosen": -237.0,
"logps/rejected": -278.0,
"loss": 0.0677,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.734375,
"rewards/margins": 4.1875,
"rewards/rejected": -6.9375,
"step": 1550
},
{
"epoch": 1.870503597122302,
"grad_norm": 15.114334538388968,
"learning_rate": 2.09240337627721e-07,
"logits/chosen": -2.28125,
"logits/rejected": -2.25,
"logps/chosen": -245.0,
"logps/rejected": -260.0,
"loss": 0.1064,
"rewards/accuracies": 0.9624999761581421,
"rewards/chosen": -3.0,
"rewards/margins": 3.78125,
"rewards/rejected": -6.78125,
"step": 1560
},
{
"epoch": 1.882494004796163,
"grad_norm": 24.921093039405555,
"learning_rate": 2.070191026210573e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.296875,
"logps/chosen": -246.0,
"logps/rejected": -274.0,
"loss": 0.1109,
"rewards/accuracies": 0.949999988079071,
"rewards/chosen": -2.6875,
"rewards/margins": 3.46875,
"rewards/rejected": -6.15625,
"step": 1570
},
{
"epoch": 1.894484412470024,
"grad_norm": 17.229671646251834,
"learning_rate": 2.047978676143936e-07,
"logits/chosen": -2.203125,
"logits/rejected": -2.21875,
"logps/chosen": -251.0,
"logps/rejected": -276.0,
"loss": 0.0787,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -2.609375,
"rewards/margins": 3.875,
"rewards/rejected": -6.5,
"step": 1580
},
{
"epoch": 1.906474820143885,
"grad_norm": 10.939803601324265,
"learning_rate": 2.025766326077299e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.265625,
"logps/chosen": -235.0,
"logps/rejected": -284.0,
"loss": 0.0871,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -2.515625,
"rewards/margins": 4.125,
"rewards/rejected": -6.625,
"step": 1590
},
{
"epoch": 1.9184652278177459,
"grad_norm": 16.05247518882454,
"learning_rate": 2.0035539760106618e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.296875,
"logps/chosen": -241.0,
"logps/rejected": -300.0,
"loss": 0.0611,
"rewards/accuracies": 1.0,
"rewards/chosen": -2.5625,
"rewards/margins": 4.78125,
"rewards/rejected": -7.34375,
"step": 1600
},
{
"epoch": 1.9304556354916067,
"grad_norm": 9.855796335839392,
"learning_rate": 1.9813416259440246e-07,
"logits/chosen": -2.234375,
"logits/rejected": -2.28125,
"logps/chosen": -245.0,
"logps/rejected": -278.0,
"loss": 0.0889,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -3.625,
"rewards/margins": 3.921875,
"rewards/rejected": -7.53125,
"step": 1610
},
{
"epoch": 1.9424460431654675,
"grad_norm": 7.909249841156511,
"learning_rate": 1.9591292758773879e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.296875,
"logps/chosen": -235.0,
"logps/rejected": -298.0,
"loss": 0.1069,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -3.59375,
"rewards/margins": 4.40625,
"rewards/rejected": -8.0,
"step": 1620
},
{
"epoch": 1.9544364508393284,
"grad_norm": 10.195924724786721,
"learning_rate": 1.9369169258107506e-07,
"logits/chosen": -2.328125,
"logits/rejected": -2.234375,
"logps/chosen": -251.0,
"logps/rejected": -280.0,
"loss": 0.0671,
"rewards/accuracies": 0.9750000238418579,
"rewards/chosen": -3.296875,
"rewards/margins": 4.0625,
"rewards/rejected": -7.375,
"step": 1630
},
{
"epoch": 1.9664268585131894,
"grad_norm": 4.981347556467333,
"learning_rate": 1.9147045757441137e-07,
"logits/chosen": -2.265625,
"logits/rejected": -2.296875,
"logps/chosen": -232.0,
"logps/rejected": -286.0,
"loss": 0.0626,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -3.40625,
"rewards/margins": 4.03125,
"rewards/rejected": -7.4375,
"step": 1640
},
{
"epoch": 1.9784172661870505,
"grad_norm": 8.780872639436975,
"learning_rate": 1.8924922256774767e-07,
"logits/chosen": -2.25,
"logits/rejected": -2.1875,
"logps/chosen": -248.0,
"logps/rejected": -276.0,
"loss": 0.0836,
"rewards/accuracies": 0.987500011920929,
"rewards/chosen": -3.84375,
"rewards/margins": 3.953125,
"rewards/rejected": -7.8125,
"step": 1650
},
{
"epoch": 1.9904076738609113,
"grad_norm": 13.586444001383855,
"learning_rate": 1.8702798756108395e-07,
"logits/chosen": -2.34375,
"logits/rejected": -2.265625,
"logps/chosen": -253.0,
"logps/rejected": -282.0,
"loss": 0.0676,
"rewards/accuracies": 1.0,
"rewards/chosen": -3.046875,
"rewards/margins": 4.21875,
"rewards/rejected": -7.25,
"step": 1660
},
{
"epoch": 2.0,
"eval_logits/chosen": -2.265625,
"eval_logits/rejected": -2.296875,
"eval_logps/chosen": -255.0,
"eval_logps/rejected": -270.0,
"eval_loss": 0.8723406195640564,
"eval_rewards/accuracies": 0.6397058963775635,
"eval_rewards/chosen": -4.71875,
"eval_rewards/margins": 1.59375,
"eval_rewards/rejected": -6.3125,
"eval_runtime": 20.0713,
"eval_samples_per_second": 19.979,
"eval_steps_per_second": 0.847,
"step": 1668
}
],
"logging_steps": 10,
"max_steps": 2502,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}