zephyr-7b-dpo-full / trainer_state.json
devamanyu's picture
Model save
fcb7cc5 verified
raw
history blame
25.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 100,
"global_step": 478,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0020920502092050207,
"grad_norm": 18.792079300993226,
"learning_rate": 1.0416666666666666e-08,
"logits/chosen": -3.359375,
"logits/rejected": -3.34375,
"logps/chosen": -320.0,
"logps/rejected": -380.0,
"loss": 1.3828,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.02092050209205021,
"grad_norm": 18.23398541151873,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": -3.171875,
"logits/rejected": -3.15625,
"logps/chosen": -284.0,
"logps/rejected": -268.0,
"loss": 1.3841,
"rewards/accuracies": 0.2083333283662796,
"rewards/chosen": 0.00051116943359375,
"rewards/margins": 0.000637054443359375,
"rewards/rejected": -0.00012111663818359375,
"step": 10
},
{
"epoch": 0.04184100418410042,
"grad_norm": 17.016957918520955,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": -3.203125,
"logits/rejected": -3.203125,
"logps/chosen": -308.0,
"logps/rejected": -316.0,
"loss": 1.3834,
"rewards/accuracies": 0.26249998807907104,
"rewards/chosen": -0.000522613525390625,
"rewards/margins": 0.0001621246337890625,
"rewards/rejected": -0.000690460205078125,
"step": 20
},
{
"epoch": 0.06276150627615062,
"grad_norm": 16.880895386487456,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -3.25,
"logits/rejected": -3.234375,
"logps/chosen": -324.0,
"logps/rejected": -282.0,
"loss": 1.3779,
"rewards/accuracies": 0.5249999761581421,
"rewards/chosen": 0.0010528564453125,
"rewards/margins": 0.008056640625,
"rewards/rejected": -0.006988525390625,
"step": 30
},
{
"epoch": 0.08368200836820083,
"grad_norm": 17.084942061574367,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -3.078125,
"logits/rejected": -3.140625,
"logps/chosen": -288.0,
"logps/rejected": -276.0,
"loss": 1.3604,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.007110595703125,
"rewards/margins": 0.038330078125,
"rewards/rejected": -0.04541015625,
"step": 40
},
{
"epoch": 0.10460251046025104,
"grad_norm": 16.90050268313775,
"learning_rate": 4.999733114418725e-07,
"logits/chosen": -3.078125,
"logits/rejected": -3.0,
"logps/chosen": -326.0,
"logps/rejected": -332.0,
"loss": 1.3289,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.047607421875,
"rewards/margins": 0.0478515625,
"rewards/rejected": -0.09521484375,
"step": 50
},
{
"epoch": 0.12552301255230125,
"grad_norm": 20.78289142989757,
"learning_rate": 4.990398100856366e-07,
"logits/chosen": -3.15625,
"logits/rejected": -3.09375,
"logps/chosen": -300.0,
"logps/rejected": -338.0,
"loss": 1.2877,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.091796875,
"rewards/margins": 0.11279296875,
"rewards/rejected": -0.205078125,
"step": 60
},
{
"epoch": 0.14644351464435146,
"grad_norm": 23.133166135835843,
"learning_rate": 4.967775735898179e-07,
"logits/chosen": -2.96875,
"logits/rejected": -3.03125,
"logps/chosen": -298.0,
"logps/rejected": -308.0,
"loss": 1.2374,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.0888671875,
"rewards/margins": 0.1982421875,
"rewards/rejected": -0.287109375,
"step": 70
},
{
"epoch": 0.16736401673640167,
"grad_norm": 38.62320509226835,
"learning_rate": 4.931986719649298e-07,
"logits/chosen": -3.109375,
"logits/rejected": -3.125,
"logps/chosen": -376.0,
"logps/rejected": -336.0,
"loss": 1.2072,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.27734375,
"rewards/margins": 0.26953125,
"rewards/rejected": -0.546875,
"step": 80
},
{
"epoch": 0.18828451882845187,
"grad_norm": 40.482824838074976,
"learning_rate": 4.883222001996351e-07,
"logits/chosen": -3.0625,
"logits/rejected": -3.0625,
"logps/chosen": -350.0,
"logps/rejected": -398.0,
"loss": 1.1208,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.66015625,
"rewards/margins": 0.5625,
"rewards/rejected": -1.2265625,
"step": 90
},
{
"epoch": 0.20920502092050208,
"grad_norm": 35.10500983971475,
"learning_rate": 4.821741763807186e-07,
"logits/chosen": -2.921875,
"logits/rejected": -3.015625,
"logps/chosen": -354.0,
"logps/rejected": -406.0,
"loss": 1.1291,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.6328125,
"rewards/margins": 0.6015625,
"rewards/rejected": -1.234375,
"step": 100
},
{
"epoch": 0.20920502092050208,
"eval_logits/chosen": -2.96875,
"eval_logits/rejected": -2.953125,
"eval_logps/chosen": -368.0,
"eval_logps/rejected": -422.0,
"eval_loss": 0.5672109127044678,
"eval_rewards/accuracies": 0.71484375,
"eval_rewards/chosen": -0.73828125,
"eval_rewards/margins": 0.546875,
"eval_rewards/rejected": -1.28125,
"eval_runtime": 40.1054,
"eval_samples_per_second": 49.869,
"eval_steps_per_second": 0.798,
"step": 100
},
{
"epoch": 0.2301255230125523,
"grad_norm": 49.983830380259825,
"learning_rate": 4.747874028753375e-07,
"logits/chosen": -2.9375,
"logits/rejected": -2.984375,
"logps/chosen": -418.0,
"logps/rejected": -422.0,
"loss": 1.1272,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.84375,
"rewards/margins": 0.56640625,
"rewards/rejected": -1.4140625,
"step": 110
},
{
"epoch": 0.2510460251046025,
"grad_norm": 44.36112825504322,
"learning_rate": 4.662012913161997e-07,
"logits/chosen": -2.828125,
"logits/rejected": -2.875,
"logps/chosen": -380.0,
"logps/rejected": -418.0,
"loss": 1.0984,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.6640625,
"rewards/margins": 0.6484375,
"rewards/rejected": -1.3125,
"step": 120
},
{
"epoch": 0.2719665271966527,
"grad_norm": 46.661481140072915,
"learning_rate": 4.5646165232345103e-07,
"logits/chosen": -2.8125,
"logits/rejected": -2.890625,
"logps/chosen": -404.0,
"logps/rejected": -448.0,
"loss": 1.0745,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.9765625,
"rewards/margins": 0.61328125,
"rewards/rejected": -1.5859375,
"step": 130
},
{
"epoch": 0.2928870292887029,
"grad_norm": 42.823235443382885,
"learning_rate": 4.456204510851956e-07,
"logits/chosen": -2.84375,
"logits/rejected": -2.875,
"logps/chosen": -408.0,
"logps/rejected": -452.0,
"loss": 1.0635,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.6875,
"rewards/margins": 0.6484375,
"rewards/rejected": -1.3359375,
"step": 140
},
{
"epoch": 0.3138075313807531,
"grad_norm": 59.20252738056763,
"learning_rate": 4.337355301007335e-07,
"logits/chosen": -2.828125,
"logits/rejected": -2.78125,
"logps/chosen": -406.0,
"logps/rejected": -452.0,
"loss": 1.0733,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.93359375,
"rewards/margins": 0.55078125,
"rewards/rejected": -1.484375,
"step": 150
},
{
"epoch": 0.33472803347280333,
"grad_norm": 51.1296977483052,
"learning_rate": 4.2087030056579986e-07,
"logits/chosen": -2.8125,
"logits/rejected": -2.796875,
"logps/chosen": -376.0,
"logps/rejected": -440.0,
"loss": 1.066,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.796875,
"rewards/margins": 0.83984375,
"rewards/rejected": -1.6328125,
"step": 160
},
{
"epoch": 0.35564853556485354,
"grad_norm": 55.991618044626925,
"learning_rate": 4.070934040463998e-07,
"logits/chosen": -2.640625,
"logits/rejected": -2.609375,
"logps/chosen": -374.0,
"logps/rejected": -412.0,
"loss": 1.082,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -1.0078125,
"rewards/margins": 0.609375,
"rewards/rejected": -1.609375,
"step": 170
},
{
"epoch": 0.37656903765690375,
"grad_norm": 50.525931210242035,
"learning_rate": 3.9247834624635404e-07,
"logits/chosen": -2.625,
"logits/rejected": -2.765625,
"logps/chosen": -358.0,
"logps/rejected": -398.0,
"loss": 1.0064,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.7890625,
"rewards/margins": 0.69921875,
"rewards/rejected": -1.484375,
"step": 180
},
{
"epoch": 0.39748953974895396,
"grad_norm": 46.856348860317034,
"learning_rate": 3.7710310482256523e-07,
"logits/chosen": -2.671875,
"logits/rejected": -2.59375,
"logps/chosen": -390.0,
"logps/rejected": -448.0,
"loss": 1.0301,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -0.8515625,
"rewards/margins": 0.65234375,
"rewards/rejected": -1.5078125,
"step": 190
},
{
"epoch": 0.41841004184100417,
"grad_norm": 106.94783278420675,
"learning_rate": 3.610497133404795e-07,
"logits/chosen": -2.546875,
"logits/rejected": -2.640625,
"logps/chosen": -368.0,
"logps/rejected": -430.0,
"loss": 1.0723,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.828125,
"rewards/margins": 0.73828125,
"rewards/rejected": -1.5703125,
"step": 200
},
{
"epoch": 0.41841004184100417,
"eval_logits/chosen": -2.6875,
"eval_logits/rejected": -2.65625,
"eval_logps/chosen": -376.0,
"eval_logps/rejected": -460.0,
"eval_loss": 0.5170312523841858,
"eval_rewards/accuracies": 0.765625,
"eval_rewards/chosen": -0.8203125,
"eval_rewards/margins": 0.83203125,
"eval_rewards/rejected": -1.65625,
"eval_runtime": 39.9201,
"eval_samples_per_second": 50.1,
"eval_steps_per_second": 0.802,
"step": 200
},
{
"epoch": 0.4393305439330544,
"grad_norm": 56.92194462169853,
"learning_rate": 3.4440382358952115e-07,
"logits/chosen": -2.671875,
"logits/rejected": -2.640625,
"logps/chosen": -402.0,
"logps/rejected": -438.0,
"loss": 1.0592,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.8828125,
"rewards/margins": 0.734375,
"rewards/rejected": -1.6171875,
"step": 210
},
{
"epoch": 0.4602510460251046,
"grad_norm": 91.8785262604961,
"learning_rate": 3.272542485937368e-07,
"logits/chosen": -2.515625,
"logits/rejected": -2.59375,
"logps/chosen": -386.0,
"logps/rejected": -454.0,
"loss": 1.036,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.96484375,
"rewards/margins": 0.81640625,
"rewards/rejected": -1.78125,
"step": 220
},
{
"epoch": 0.4811715481171548,
"grad_norm": 51.218599770252155,
"learning_rate": 3.096924887558854e-07,
"logits/chosen": -2.484375,
"logits/rejected": -2.53125,
"logps/chosen": -376.0,
"logps/rejected": -448.0,
"loss": 1.0678,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -1.046875,
"rewards/margins": 0.79296875,
"rewards/rejected": -1.8359375,
"step": 230
},
{
"epoch": 0.502092050209205,
"grad_norm": 52.33600440489297,
"learning_rate": 2.9181224366319943e-07,
"logits/chosen": -2.65625,
"logits/rejected": -2.546875,
"logps/chosen": -372.0,
"logps/rejected": -428.0,
"loss": 1.0064,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.80859375,
"rewards/margins": 0.6953125,
"rewards/rejected": -1.5,
"step": 240
},
{
"epoch": 0.5230125523012552,
"grad_norm": 51.27684803998907,
"learning_rate": 2.7370891215954565e-07,
"logits/chosen": -2.625,
"logits/rejected": -2.5625,
"logps/chosen": -424.0,
"logps/rejected": -478.0,
"loss": 1.0087,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -1.0390625,
"rewards/margins": 0.8828125,
"rewards/rejected": -1.921875,
"step": 250
},
{
"epoch": 0.5439330543933054,
"grad_norm": 63.264740099851814,
"learning_rate": 2.55479083351317e-07,
"logits/chosen": -2.625,
"logits/rejected": -2.65625,
"logps/chosen": -424.0,
"logps/rejected": -472.0,
"loss": 1.0101,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -1.09375,
"rewards/margins": 0.875,
"rewards/rejected": -1.96875,
"step": 260
},
{
"epoch": 0.5648535564853556,
"grad_norm": 43.895031684867064,
"learning_rate": 2.3722002126275822e-07,
"logits/chosen": -2.59375,
"logits/rejected": -2.59375,
"logps/chosen": -396.0,
"logps/rejected": -438.0,
"loss": 1.0423,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -0.8984375,
"rewards/margins": 0.68359375,
"rewards/rejected": -1.5859375,
"step": 270
},
{
"epoch": 0.5857740585774058,
"grad_norm": 53.26137168286982,
"learning_rate": 2.19029145890313e-07,
"logits/chosen": -2.546875,
"logits/rejected": -2.5625,
"logps/chosen": -374.0,
"logps/rejected": -426.0,
"loss": 1.0536,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.81640625,
"rewards/margins": 0.80078125,
"rewards/rejected": -1.6171875,
"step": 280
},
{
"epoch": 0.606694560669456,
"grad_norm": 57.71809127266035,
"learning_rate": 2.0100351342479216e-07,
"logits/chosen": -2.640625,
"logits/rejected": -2.609375,
"logps/chosen": -386.0,
"logps/rejected": -450.0,
"loss": 1.0334,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.1171875,
"rewards/margins": 0.75,
"rewards/rejected": -1.859375,
"step": 290
},
{
"epoch": 0.6276150627615062,
"grad_norm": 59.26080657165494,
"learning_rate": 1.8323929841460178e-07,
"logits/chosen": -2.640625,
"logits/rejected": -2.609375,
"logps/chosen": -422.0,
"logps/rejected": -460.0,
"loss": 0.9917,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.94140625,
"rewards/margins": 0.81640625,
"rewards/rejected": -1.7578125,
"step": 300
},
{
"epoch": 0.6276150627615062,
"eval_logits/chosen": -2.625,
"eval_logits/rejected": -2.59375,
"eval_logps/chosen": -372.0,
"eval_logps/rejected": -464.0,
"eval_loss": 0.5082695484161377,
"eval_rewards/accuracies": 0.75390625,
"eval_rewards/chosen": -0.77734375,
"eval_rewards/margins": 0.92578125,
"eval_rewards/rejected": -1.703125,
"eval_runtime": 39.9557,
"eval_samples_per_second": 50.055,
"eval_steps_per_second": 0.801,
"step": 300
},
{
"epoch": 0.6485355648535565,
"grad_norm": 63.68800654846809,
"learning_rate": 1.6583128063291573e-07,
"logits/chosen": -2.421875,
"logits/rejected": -2.53125,
"logps/chosen": -416.0,
"logps/rejected": -452.0,
"loss": 0.9939,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.94140625,
"rewards/margins": 0.7734375,
"rewards/rejected": -1.71875,
"step": 310
},
{
"epoch": 0.6694560669456067,
"grad_norm": 65.12912368302251,
"learning_rate": 1.488723393865766e-07,
"logits/chosen": -2.59375,
"logits/rejected": -2.546875,
"logps/chosen": -412.0,
"logps/rejected": -444.0,
"loss": 0.97,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.95703125,
"rewards/margins": 0.80078125,
"rewards/rejected": -1.7578125,
"step": 320
},
{
"epoch": 0.6903765690376569,
"grad_norm": 64.88040587462643,
"learning_rate": 1.3245295796480788e-07,
"logits/chosen": -2.625,
"logits/rejected": -2.578125,
"logps/chosen": -388.0,
"logps/rejected": -452.0,
"loss": 0.9984,
"rewards/accuracies": 0.668749988079071,
"rewards/chosen": -0.91796875,
"rewards/margins": 0.67578125,
"rewards/rejected": -1.59375,
"step": 330
},
{
"epoch": 0.7112970711297071,
"grad_norm": 97.36761933497387,
"learning_rate": 1.1666074087171627e-07,
"logits/chosen": -2.65625,
"logits/rejected": -2.546875,
"logps/chosen": -400.0,
"logps/rejected": -490.0,
"loss": 0.9862,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.80859375,
"rewards/margins": 0.9921875,
"rewards/rejected": -1.796875,
"step": 340
},
{
"epoch": 0.7322175732217573,
"grad_norm": 55.29816737345383,
"learning_rate": 1.0157994641835734e-07,
"logits/chosen": -2.453125,
"logits/rejected": -2.484375,
"logps/chosen": -390.0,
"logps/rejected": -452.0,
"loss": 0.9503,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": -1.0,
"rewards/margins": 0.87109375,
"rewards/rejected": -1.875,
"step": 350
},
{
"epoch": 0.7531380753138075,
"grad_norm": 50.20493217819224,
"learning_rate": 8.729103716819111e-08,
"logits/chosen": -2.625,
"logits/rejected": -2.46875,
"logps/chosen": -438.0,
"logps/rejected": -486.0,
"loss": 1.0227,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -1.09375,
"rewards/margins": 0.921875,
"rewards/rejected": -2.015625,
"step": 360
},
{
"epoch": 0.7740585774058577,
"grad_norm": 52.540692008100564,
"learning_rate": 7.387025063449081e-08,
"logits/chosen": -2.46875,
"logits/rejected": -2.4375,
"logps/chosen": -420.0,
"logps/rejected": -448.0,
"loss": 1.0167,
"rewards/accuracies": 0.71875,
"rewards/chosen": -1.2109375,
"rewards/margins": 0.6796875,
"rewards/rejected": -1.890625,
"step": 370
},
{
"epoch": 0.7949790794979079,
"grad_norm": 52.1938302087835,
"learning_rate": 6.138919252022435e-08,
"logits/chosen": -2.296875,
"logits/rejected": -2.34375,
"logps/chosen": -390.0,
"logps/rejected": -506.0,
"loss": 1.0047,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -1.296875,
"rewards/margins": 0.94921875,
"rewards/rejected": -2.25,
"step": 380
},
{
"epoch": 0.8158995815899581,
"grad_norm": 58.62340593152117,
"learning_rate": 4.991445467064689e-08,
"logits/chosen": -2.515625,
"logits/rejected": -2.46875,
"logps/chosen": -442.0,
"logps/rejected": -500.0,
"loss": 0.9798,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -1.046875,
"rewards/margins": 0.85546875,
"rewards/rejected": -1.8984375,
"step": 390
},
{
"epoch": 0.8368200836820083,
"grad_norm": 59.42878062470151,
"learning_rate": 3.9507259776993954e-08,
"logits/chosen": -2.515625,
"logits/rejected": -2.453125,
"logps/chosen": -404.0,
"logps/rejected": -490.0,
"loss": 1.012,
"rewards/accuracies": 0.75,
"rewards/chosen": -1.09375,
"rewards/margins": 0.89453125,
"rewards/rejected": -1.984375,
"step": 400
},
{
"epoch": 0.8368200836820083,
"eval_logits/chosen": -2.515625,
"eval_logits/rejected": -2.46875,
"eval_logps/chosen": -386.0,
"eval_logps/rejected": -484.0,
"eval_loss": 0.4995781183242798,
"eval_rewards/accuracies": 0.78125,
"eval_rewards/chosen": -0.91015625,
"eval_rewards/margins": 0.984375,
"eval_rewards/rejected": -1.890625,
"eval_runtime": 39.9442,
"eval_samples_per_second": 50.07,
"eval_steps_per_second": 0.801,
"step": 400
},
{
"epoch": 0.8577405857740585,
"grad_norm": 57.94077111648529,
"learning_rate": 3.022313472693447e-08,
"logits/chosen": -2.5625,
"logits/rejected": -2.53125,
"logps/chosen": -418.0,
"logps/rejected": -468.0,
"loss": 1.0227,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -1.0,
"rewards/margins": 0.84375,
"rewards/rejected": -1.84375,
"step": 410
},
{
"epoch": 0.8786610878661087,
"grad_norm": 54.220495002656904,
"learning_rate": 2.2111614344599684e-08,
"logits/chosen": -2.421875,
"logits/rejected": -2.484375,
"logps/chosen": -442.0,
"logps/rejected": -502.0,
"loss": 0.9755,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -1.0234375,
"rewards/margins": 0.859375,
"rewards/rejected": -1.8828125,
"step": 420
},
{
"epoch": 0.899581589958159,
"grad_norm": 57.28244651782191,
"learning_rate": 1.521597710086439e-08,
"logits/chosen": -2.375,
"logits/rejected": -2.265625,
"logps/chosen": -418.0,
"logps/rejected": -480.0,
"loss": 0.9483,
"rewards/accuracies": 0.800000011920929,
"rewards/chosen": -1.0703125,
"rewards/margins": 0.91015625,
"rewards/rejected": -1.9765625,
"step": 430
},
{
"epoch": 0.9205020920502092,
"grad_norm": 63.185522298582946,
"learning_rate": 9.57301420397924e-09,
"logits/chosen": -2.484375,
"logits/rejected": -2.4375,
"logps/chosen": -408.0,
"logps/rejected": -460.0,
"loss": 0.9976,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.9140625,
"rewards/margins": 0.796875,
"rewards/rejected": -1.7109375,
"step": 440
},
{
"epoch": 0.9414225941422594,
"grad_norm": 72.06238722374589,
"learning_rate": 5.212833302556258e-09,
"logits/chosen": -2.484375,
"logits/rejected": -2.515625,
"logps/chosen": -420.0,
"logps/rejected": -510.0,
"loss": 0.9918,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -1.0,
"rewards/margins": 0.8125,
"rewards/rejected": -1.8125,
"step": 450
},
{
"epoch": 0.9623430962343096,
"grad_norm": 63.02015092631578,
"learning_rate": 2.158697848236607e-09,
"logits/chosen": -2.484375,
"logits/rejected": -2.453125,
"logps/chosen": -396.0,
"logps/rejected": -440.0,
"loss": 0.9981,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -0.95703125,
"rewards/margins": 0.82421875,
"rewards/rejected": -1.78125,
"step": 460
},
{
"epoch": 0.9832635983263598,
"grad_norm": 58.50751073465125,
"learning_rate": 4.269029751107489e-10,
"logits/chosen": -2.4375,
"logits/rejected": -2.4375,
"logps/chosen": -404.0,
"logps/rejected": -498.0,
"loss": 0.9687,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.93359375,
"rewards/margins": 0.8984375,
"rewards/rejected": -1.8359375,
"step": 470
},
{
"epoch": 1.0,
"step": 478,
"total_flos": 0.0,
"train_loss": 1.0759161147113625,
"train_runtime": 3593.5482,
"train_samples_per_second": 17.012,
"train_steps_per_second": 0.133
}
],
"logging_steps": 10,
"max_steps": 478,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}