Qwen2-57B-A14B-SFT-Step-DPO / trainer_state.json
xinlai's picture
upload model
9c522b3
raw
history blame
81.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.9493670886075947,
"eval_steps": 1,
"global_step": 156,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02531645569620253,
"grad_norm": 57.114834738699926,
"learning_rate": 3.125e-08,
"logits/chosen": 3.3286685943603516,
"logits/rejected": 3.4021615982055664,
"logps/chosen": -32.21625900268555,
"logps/rejected": -38.764957427978516,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.05063291139240506,
"grad_norm": 55.56377943215762,
"learning_rate": 6.25e-08,
"logits/chosen": 2.647796630859375,
"logits/rejected": 2.733036994934082,
"logps/chosen": -24.994104385375977,
"logps/rejected": -29.78559112548828,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 2
},
{
"epoch": 0.0759493670886076,
"grad_norm": 61.65658878594894,
"learning_rate": 9.375e-08,
"logits/chosen": 2.33656644821167,
"logits/rejected": 2.518265962600708,
"logps/chosen": -28.91141128540039,
"logps/rejected": -35.79951477050781,
"loss": 0.7163,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.046910468488931656,
"rewards/margins": -0.0570392943918705,
"rewards/rejected": 0.01012883335351944,
"step": 3
},
{
"epoch": 0.10126582278481013,
"grad_norm": 54.332498272912595,
"learning_rate": 1.25e-07,
"logits/chosen": 2.9658162593841553,
"logits/rejected": 3.0012216567993164,
"logps/chosen": -29.552040100097656,
"logps/rejected": -34.8863639831543,
"loss": 0.7065,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.047019001096487045,
"rewards/margins": -0.010130930691957474,
"rewards/rejected": -0.03688807412981987,
"step": 4
},
{
"epoch": 0.12658227848101267,
"grad_norm": 55.132631513210136,
"learning_rate": 1.5624999999999999e-07,
"logits/chosen": 3.340583086013794,
"logits/rejected": 3.325289487838745,
"logps/chosen": -29.905162811279297,
"logps/rejected": -38.20769500732422,
"loss": 0.7069,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.06609320640563965,
"rewards/margins": -0.07786056399345398,
"rewards/rejected": 0.01176736131310463,
"step": 5
},
{
"epoch": 0.1518987341772152,
"grad_norm": 57.59086219970169,
"learning_rate": 1.875e-07,
"logits/chosen": 2.2882869243621826,
"logits/rejected": 2.7739901542663574,
"logps/chosen": -23.262107849121094,
"logps/rejected": -38.5238151550293,
"loss": 0.7044,
"rewards/accuracies": 0.4375,
"rewards/chosen": -0.004146900027990341,
"rewards/margins": -0.010677304118871689,
"rewards/rejected": 0.006530404556542635,
"step": 6
},
{
"epoch": 0.17721518987341772,
"grad_norm": 51.18146497947048,
"learning_rate": 2.1875e-07,
"logits/chosen": 2.7440547943115234,
"logits/rejected": 2.5763754844665527,
"logps/chosen": -29.953731536865234,
"logps/rejected": -33.62974166870117,
"loss": 0.7102,
"rewards/accuracies": 0.375,
"rewards/chosen": -0.038678932934999466,
"rewards/margins": -0.042138125747442245,
"rewards/rejected": 0.0034591909497976303,
"step": 7
},
{
"epoch": 0.20253164556962025,
"grad_norm": 53.68918501282459,
"learning_rate": 2.5e-07,
"logits/chosen": 3.2939491271972656,
"logits/rejected": 3.003464698791504,
"logps/chosen": -28.833343505859375,
"logps/rejected": -23.717449188232422,
"loss": 0.6967,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.013088438659906387,
"rewards/margins": 0.03873240202665329,
"rewards/rejected": -0.025643955916166306,
"step": 8
},
{
"epoch": 0.22784810126582278,
"grad_norm": 56.10725202078908,
"learning_rate": 2.8125e-07,
"logits/chosen": 2.243440866470337,
"logits/rejected": 2.4725394248962402,
"logps/chosen": -22.95738983154297,
"logps/rejected": -27.134145736694336,
"loss": 0.684,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.04334143549203873,
"rewards/margins": 0.07354923337697983,
"rewards/rejected": -0.03020780347287655,
"step": 9
},
{
"epoch": 0.25316455696202533,
"grad_norm": 52.307052055168896,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": 3.2282490730285645,
"logits/rejected": 3.332897424697876,
"logps/chosen": -26.25811004638672,
"logps/rejected": -33.90528869628906,
"loss": 0.69,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.05821816250681877,
"rewards/margins": 0.11382907629013062,
"rewards/rejected": -0.05561092123389244,
"step": 10
},
{
"epoch": 0.27848101265822783,
"grad_norm": 51.178561062412925,
"learning_rate": 3.4375e-07,
"logits/chosen": 3.0024123191833496,
"logits/rejected": 2.8223249912261963,
"logps/chosen": -25.597423553466797,
"logps/rejected": -27.973861694335938,
"loss": 0.6937,
"rewards/accuracies": 0.375,
"rewards/chosen": -0.006550817750394344,
"rewards/margins": 0.020815890282392502,
"rewards/rejected": -0.02736670896410942,
"step": 11
},
{
"epoch": 0.3037974683544304,
"grad_norm": 55.45441942378304,
"learning_rate": 3.75e-07,
"logits/chosen": 2.8794424533843994,
"logits/rejected": 2.8852243423461914,
"logps/chosen": -24.754030227661133,
"logps/rejected": -37.79888916015625,
"loss": 0.6747,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.028544973582029343,
"rewards/margins": 0.1005675345659256,
"rewards/rejected": -0.12911249697208405,
"step": 12
},
{
"epoch": 0.3291139240506329,
"grad_norm": 53.09392363916735,
"learning_rate": 4.0625e-07,
"logits/chosen": 2.4571850299835205,
"logits/rejected": 2.484537363052368,
"logps/chosen": -25.80780601501465,
"logps/rejected": -29.53820037841797,
"loss": 0.6911,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.03155774995684624,
"rewards/margins": 0.07387223094701767,
"rewards/rejected": -0.04231448099017143,
"step": 13
},
{
"epoch": 0.35443037974683544,
"grad_norm": 52.83620354330049,
"learning_rate": 4.375e-07,
"logits/chosen": 2.509875535964966,
"logits/rejected": 2.7940757274627686,
"logps/chosen": -23.630701065063477,
"logps/rejected": -30.94167709350586,
"loss": 0.6538,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.0901968702673912,
"rewards/margins": 0.12045808881521225,
"rewards/rejected": -0.030261218547821045,
"step": 14
},
{
"epoch": 0.379746835443038,
"grad_norm": 54.43440442749398,
"learning_rate": 4.6874999999999996e-07,
"logits/chosen": 2.544405460357666,
"logits/rejected": 2.4906702041625977,
"logps/chosen": -31.145732879638672,
"logps/rejected": -35.83900451660156,
"loss": 0.6699,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.015418028458952904,
"rewards/margins": 0.1095210611820221,
"rewards/rejected": -0.09410304576158524,
"step": 15
},
{
"epoch": 0.4050632911392405,
"grad_norm": 56.228300664828,
"learning_rate": 5e-07,
"logits/chosen": 2.400423049926758,
"logits/rejected": 2.620924949645996,
"logps/chosen": -19.128610610961914,
"logps/rejected": -33.12566375732422,
"loss": 0.6456,
"rewards/accuracies": 0.5,
"rewards/chosen": -0.02211858704686165,
"rewards/margins": 0.07731227576732635,
"rewards/rejected": -0.0994308590888977,
"step": 16
},
{
"epoch": 0.43037974683544306,
"grad_norm": 55.89193538913802,
"learning_rate": 4.999370587356267e-07,
"logits/chosen": 2.4614853858947754,
"logits/rejected": 2.2287514209747314,
"logps/chosen": -30.090726852416992,
"logps/rejected": -36.996307373046875,
"loss": 0.6714,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.010743052698671818,
"rewards/margins": 0.19052858650684357,
"rewards/rejected": -0.17978551983833313,
"step": 17
},
{
"epoch": 0.45569620253164556,
"grad_norm": 47.67611457430014,
"learning_rate": 4.997482666353286e-07,
"logits/chosen": 3.0267255306243896,
"logits/rejected": 3.2108960151672363,
"logps/chosen": -24.398426055908203,
"logps/rejected": -36.53913879394531,
"loss": 0.6385,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.06351403892040253,
"rewards/margins": 0.35269594192504883,
"rewards/rejected": -0.2891818583011627,
"step": 18
},
{
"epoch": 0.4810126582278481,
"grad_norm": 52.87018041987955,
"learning_rate": 4.99433718761614e-07,
"logits/chosen": 1.927664041519165,
"logits/rejected": 2.412555694580078,
"logps/chosen": -23.279611587524414,
"logps/rejected": -35.23127746582031,
"loss": 0.6107,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.028891097754240036,
"rewards/margins": 0.2483980357646942,
"rewards/rejected": -0.21950694918632507,
"step": 19
},
{
"epoch": 0.5063291139240507,
"grad_norm": 48.602746524439674,
"learning_rate": 4.989935734988097e-07,
"logits/chosen": 2.6588048934936523,
"logits/rejected": 2.663198232650757,
"logps/chosen": -27.881851196289062,
"logps/rejected": -40.07163619995117,
"loss": 0.6026,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.008863937109708786,
"rewards/margins": 0.33766672015190125,
"rewards/rejected": -0.32880276441574097,
"step": 20
},
{
"epoch": 0.5316455696202531,
"grad_norm": 54.16428877770723,
"learning_rate": 4.984280524733107e-07,
"logits/chosen": 2.961024284362793,
"logits/rejected": 2.982067584991455,
"logps/chosen": -33.65000915527344,
"logps/rejected": -44.283145904541016,
"loss": 0.6255,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.0565052255988121,
"rewards/margins": 0.3312607407569885,
"rewards/rejected": -0.387766033411026,
"step": 21
},
{
"epoch": 0.5569620253164557,
"grad_norm": 45.46343620685162,
"learning_rate": 4.977374404419837e-07,
"logits/chosen": 2.731034278869629,
"logits/rejected": 2.8560142517089844,
"logps/chosen": -28.184062957763672,
"logps/rejected": -29.667505264282227,
"loss": 0.5798,
"rewards/accuracies": 0.6875,
"rewards/chosen": 0.03637414053082466,
"rewards/margins": 0.30975764989852905,
"rewards/rejected": -0.2733834683895111,
"step": 22
},
{
"epoch": 0.5822784810126582,
"grad_norm": 44.12365829762132,
"learning_rate": 4.969220851487844e-07,
"logits/chosen": 2.936208963394165,
"logits/rejected": 2.651503086090088,
"logps/chosen": -33.56488037109375,
"logps/rejected": -41.19189453125,
"loss": 0.5772,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.06098739802837372,
"rewards/margins": 0.4615795612335205,
"rewards/rejected": -0.4005921483039856,
"step": 23
},
{
"epoch": 0.6075949367088608,
"grad_norm": 45.017464152313536,
"learning_rate": 4.959823971496574e-07,
"logits/chosen": 2.6883764266967773,
"logits/rejected": 2.8039445877075195,
"logps/chosen": -25.476293563842773,
"logps/rejected": -27.924108505249023,
"loss": 0.5669,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.003826950676739216,
"rewards/margins": 0.374502956867218,
"rewards/rejected": -0.37067604064941406,
"step": 24
},
{
"epoch": 0.6329113924050633,
"grad_norm": 42.27141207917208,
"learning_rate": 4.949188496058089e-07,
"logits/chosen": 2.4697136878967285,
"logits/rejected": 2.4843673706054688,
"logps/chosen": -29.330278396606445,
"logps/rejected": -34.346717834472656,
"loss": 0.5643,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.015232307836413383,
"rewards/margins": 0.2725878059864044,
"rewards/rejected": -0.28782013058662415,
"step": 25
},
{
"epoch": 0.6582278481012658,
"grad_norm": 43.50860864037927,
"learning_rate": 4.937319780454559e-07,
"logits/chosen": 2.4729130268096924,
"logits/rejected": 2.728221893310547,
"logps/chosen": -15.063579559326172,
"logps/rejected": -29.431615829467773,
"loss": 0.5485,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.010220827534794807,
"rewards/margins": 0.6472443342208862,
"rewards/rejected": -0.6370234489440918,
"step": 26
},
{
"epoch": 0.6835443037974683,
"grad_norm": 44.10326676433438,
"learning_rate": 4.924223800941717e-07,
"logits/chosen": 2.506294012069702,
"logits/rejected": 2.611067295074463,
"logps/chosen": -27.957015991210938,
"logps/rejected": -34.36131286621094,
"loss": 0.5338,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.09378986805677414,
"rewards/margins": 0.7992368936538696,
"rewards/rejected": -0.7054470777511597,
"step": 27
},
{
"epoch": 0.7088607594936709,
"grad_norm": 42.53652604653026,
"learning_rate": 4.909907151739633e-07,
"logits/chosen": 2.370758295059204,
"logits/rejected": 2.3593924045562744,
"logps/chosen": -24.23151969909668,
"logps/rejected": -28.833358764648438,
"loss": 0.5266,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.015818338841199875,
"rewards/margins": 0.35423219203948975,
"rewards/rejected": -0.3384138345718384,
"step": 28
},
{
"epoch": 0.7341772151898734,
"grad_norm": 43.92462963258578,
"learning_rate": 4.894377041712326e-07,
"logits/chosen": 2.85575532913208,
"logits/rejected": 2.9239468574523926,
"logps/chosen": -24.336238861083984,
"logps/rejected": -32.19173049926758,
"loss": 0.5788,
"rewards/accuracies": 0.625,
"rewards/chosen": -0.1497105062007904,
"rewards/margins": 0.41176801919937134,
"rewards/rejected": -0.5614784955978394,
"step": 29
},
{
"epoch": 0.759493670886076,
"grad_norm": 45.52462540230979,
"learning_rate": 4.877641290737883e-07,
"logits/chosen": 2.577241897583008,
"logits/rejected": 2.6502695083618164,
"logps/chosen": -27.839168548583984,
"logps/rejected": -36.543941497802734,
"loss": 0.5261,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.1546507328748703,
"rewards/margins": 1.282554268836975,
"rewards/rejected": -1.4372049570083618,
"step": 30
},
{
"epoch": 0.7848101265822784,
"grad_norm": 40.84962297403327,
"learning_rate": 4.859708325770919e-07,
"logits/chosen": 2.821880340576172,
"logits/rejected": 2.7289583683013916,
"logps/chosen": -32.00937271118164,
"logps/rejected": -32.9297981262207,
"loss": 0.5013,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.05743463337421417,
"rewards/margins": 0.7840094566345215,
"rewards/rejected": -0.8414440155029297,
"step": 31
},
{
"epoch": 0.810126582278481,
"grad_norm": 42.08408504733762,
"learning_rate": 4.840587176599343e-07,
"logits/chosen": 2.684711456298828,
"logits/rejected": 3.0184218883514404,
"logps/chosen": -29.447864532470703,
"logps/rejected": -45.021995544433594,
"loss": 0.5126,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.03239244967699051,
"rewards/margins": 1.4539411067962646,
"rewards/rejected": -1.4215487241744995,
"step": 32
},
{
"epoch": 0.8354430379746836,
"grad_norm": 39.28461553560508,
"learning_rate": 4.820287471297597e-07,
"logits/chosen": 2.2539427280426025,
"logits/rejected": 2.2139289379119873,
"logps/chosen": -28.69852066040039,
"logps/rejected": -31.549945831298828,
"loss": 0.5182,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.13107189536094666,
"rewards/margins": 1.036413311958313,
"rewards/rejected": -1.167485237121582,
"step": 33
},
{
"epoch": 0.8607594936708861,
"grad_norm": 40.986833656434555,
"learning_rate": 4.798819431378626e-07,
"logits/chosen": 2.653698444366455,
"logits/rejected": 2.910414218902588,
"logps/chosen": -21.828556060791016,
"logps/rejected": -38.98440170288086,
"loss": 0.4764,
"rewards/accuracies": 0.75,
"rewards/chosen": 0.08699348568916321,
"rewards/margins": 0.9170618057250977,
"rewards/rejected": -0.8300682306289673,
"step": 34
},
{
"epoch": 0.8860759493670886,
"grad_norm": 44.21046799920892,
"learning_rate": 4.776193866647039e-07,
"logits/chosen": 2.2878611087799072,
"logits/rejected": 2.6529476642608643,
"logps/chosen": -32.880462646484375,
"logps/rejected": -48.58502197265625,
"loss": 0.5087,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.1624097228050232,
"rewards/margins": 0.6843761205673218,
"rewards/rejected": -0.8467859029769897,
"step": 35
},
{
"epoch": 0.9113924050632911,
"grad_norm": 37.027834114234714,
"learning_rate": 4.752422169756047e-07,
"logits/chosen": 1.9958323240280151,
"logits/rejected": 2.1072514057159424,
"logps/chosen": -25.151729583740234,
"logps/rejected": -34.56462478637695,
"loss": 0.448,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.035374294966459274,
"rewards/margins": 0.7525378465652466,
"rewards/rejected": -0.7171635627746582,
"step": 36
},
{
"epoch": 0.9367088607594937,
"grad_norm": 42.723620077832024,
"learning_rate": 4.7275163104709194e-07,
"logits/chosen": 2.223971128463745,
"logits/rejected": 2.2867026329040527,
"logps/chosen": -28.47149658203125,
"logps/rejected": -41.30598831176758,
"loss": 0.4752,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.04645688831806183,
"rewards/margins": 0.7158762812614441,
"rewards/rejected": -0.7623331546783447,
"step": 37
},
{
"epoch": 0.9620253164556962,
"grad_norm": 37.84394079738309,
"learning_rate": 4.7014888296418447e-07,
"logits/chosen": 2.7294678688049316,
"logits/rejected": 2.7926061153411865,
"logps/chosen": -27.44596290588379,
"logps/rejected": -37.265743255615234,
"loss": 0.4448,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.049863241612911224,
"rewards/margins": 1.077908992767334,
"rewards/rejected": -1.0280457735061646,
"step": 38
},
{
"epoch": 0.9873417721518988,
"grad_norm": 37.542515394049275,
"learning_rate": 4.6743528328892384e-07,
"logits/chosen": 2.9349310398101807,
"logits/rejected": 2.7823617458343506,
"logps/chosen": -28.974937438964844,
"logps/rejected": -31.127620697021484,
"loss": 0.4247,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.06356163322925568,
"rewards/margins": 0.8182165622711182,
"rewards/rejected": -0.7546550035476685,
"step": 39
},
{
"epoch": 1.0126582278481013,
"grad_norm": 34.306915155273884,
"learning_rate": 4.646121984004665e-07,
"logits/chosen": 2.93210768699646,
"logits/rejected": 2.952573299407959,
"logps/chosen": -22.667604446411133,
"logps/rejected": -33.4359130859375,
"loss": 0.4023,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.15648899972438812,
"rewards/margins": 1.7330085039138794,
"rewards/rejected": -1.5765196084976196,
"step": 40
},
{
"epoch": 1.0379746835443038,
"grad_norm": 34.583417240102314,
"learning_rate": 4.6168104980707103e-07,
"logits/chosen": 1.8353512287139893,
"logits/rejected": 1.8640844821929932,
"logps/chosen": -30.39456558227539,
"logps/rejected": -39.79166793823242,
"loss": 0.3791,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.04578995332121849,
"rewards/margins": 1.216768741607666,
"rewards/rejected": -1.2625586986541748,
"step": 41
},
{
"epoch": 1.0632911392405062,
"grad_norm": 34.082479241604844,
"learning_rate": 4.5864331343032565e-07,
"logits/chosen": 1.8534090518951416,
"logits/rejected": 2.3674142360687256,
"logps/chosen": -24.82038116455078,
"logps/rejected": -36.29607009887695,
"loss": 0.3826,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.017678476870059967,
"rewards/margins": 1.1888235807418823,
"rewards/rejected": -1.1711452007293701,
"step": 42
},
{
"epoch": 1.0886075949367089,
"grad_norm": 34.56856221568917,
"learning_rate": 4.555005188619775e-07,
"logits/chosen": 2.3347442150115967,
"logits/rejected": 2.896552562713623,
"logps/chosen": -27.94430923461914,
"logps/rejected": -56.70208740234375,
"loss": 0.3719,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.003501923754811287,
"rewards/margins": 1.5741243362426758,
"rewards/rejected": -1.5706223249435425,
"step": 43
},
{
"epoch": 1.1139240506329113,
"grad_norm": 31.370937274347394,
"learning_rate": 4.5225424859373684e-07,
"logits/chosen": 2.6383707523345947,
"logits/rejected": 2.5485527515411377,
"logps/chosen": -19.298486709594727,
"logps/rejected": -25.37928009033203,
"loss": 0.3744,
"rewards/accuracies": 0.8125,
"rewards/chosen": 0.22318463027477264,
"rewards/margins": 0.6971991658210754,
"rewards/rejected": -0.474014550447464,
"step": 44
},
{
"epoch": 1.139240506329114,
"grad_norm": 34.10062660077868,
"learning_rate": 4.489061372204452e-07,
"logits/chosen": 2.376051664352417,
"logits/rejected": 2.3887717723846436,
"logps/chosen": -27.828815460205078,
"logps/rejected": -35.97361755371094,
"loss": 0.3561,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.13765862584114075,
"rewards/margins": 0.8190426826477051,
"rewards/rejected": -0.6813840866088867,
"step": 45
},
{
"epoch": 1.1645569620253164,
"grad_norm": 34.550138211560416,
"learning_rate": 4.4545787061700746e-07,
"logits/chosen": 1.5758471488952637,
"logits/rejected": 1.6474454402923584,
"logps/chosen": -26.35508155822754,
"logps/rejected": -33.66876983642578,
"loss": 0.3738,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.11953898519277573,
"rewards/margins": 1.4261627197265625,
"rewards/rejected": -1.3066238164901733,
"step": 46
},
{
"epoch": 1.189873417721519,
"grad_norm": 32.97226834077475,
"learning_rate": 4.4191118508950277e-07,
"logits/chosen": 2.352506160736084,
"logits/rejected": 2.5829250812530518,
"logps/chosen": -30.464948654174805,
"logps/rejected": -48.602386474609375,
"loss": 0.3773,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.28972989320755005,
"rewards/margins": 1.533113956451416,
"rewards/rejected": -1.2433841228485107,
"step": 47
},
{
"epoch": 1.2151898734177216,
"grad_norm": 31.173818641433147,
"learning_rate": 4.3826786650090273e-07,
"logits/chosen": 2.6230416297912598,
"logits/rejected": 2.9404265880584717,
"logps/chosen": -26.442947387695312,
"logps/rejected": -40.41877746582031,
"loss": 0.3218,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.13079282641410828,
"rewards/margins": 1.8220040798187256,
"rewards/rejected": -1.6912113428115845,
"step": 48
},
{
"epoch": 1.240506329113924,
"grad_norm": 31.65330353079338,
"learning_rate": 4.345297493718352e-07,
"logits/chosen": 2.0961148738861084,
"logits/rejected": 2.31225848197937,
"logps/chosen": -28.76830291748047,
"logps/rejected": -41.471527099609375,
"loss": 0.324,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.17320382595062256,
"rewards/margins": 1.8003501892089844,
"rewards/rejected": -1.6271462440490723,
"step": 49
},
{
"epoch": 1.2658227848101267,
"grad_norm": 32.0517210362914,
"learning_rate": 4.3069871595684787e-07,
"logits/chosen": 2.477304458618164,
"logits/rejected": 2.607898473739624,
"logps/chosen": -25.64235496520996,
"logps/rejected": -30.789039611816406,
"loss": 0.3365,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.04965633898973465,
"rewards/margins": 1.5173263549804688,
"rewards/rejected": -1.4676700830459595,
"step": 50
},
{
"epoch": 1.2911392405063291,
"grad_norm": 34.75297133689012,
"learning_rate": 4.2677669529663686e-07,
"logits/chosen": 2.4322314262390137,
"logits/rejected": 2.3749780654907227,
"logps/chosen": -28.2196102142334,
"logps/rejected": -36.66654968261719,
"loss": 0.376,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.014162863604724407,
"rewards/margins": 1.2780933380126953,
"rewards/rejected": -1.292256236076355,
"step": 51
},
{
"epoch": 1.3164556962025316,
"grad_norm": 30.679462903169394,
"learning_rate": 4.227656622467162e-07,
"logits/chosen": 2.16550350189209,
"logits/rejected": 2.483671188354492,
"logps/chosen": -27.87743377685547,
"logps/rejected": -35.79537582397461,
"loss": 0.328,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.016814690083265305,
"rewards/margins": 0.7990709543228149,
"rewards/rejected": -0.8158857226371765,
"step": 52
},
{
"epoch": 1.3417721518987342,
"grad_norm": 33.362419000257866,
"learning_rate": 4.186676364830186e-07,
"logits/chosen": 1.6940243244171143,
"logits/rejected": 1.8324474096298218,
"logps/chosen": -22.575485229492188,
"logps/rejected": -37.813812255859375,
"loss": 0.3899,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.18489199876785278,
"rewards/margins": 1.3045916557312012,
"rewards/rejected": -1.4894835948944092,
"step": 53
},
{
"epoch": 1.3670886075949367,
"grad_norm": 34.27040807040801,
"learning_rate": 4.1448468148492814e-07,
"logits/chosen": 2.4027748107910156,
"logits/rejected": 2.6370065212249756,
"logps/chosen": -25.746681213378906,
"logps/rejected": -39.262149810791016,
"loss": 0.3439,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.14556269347667694,
"rewards/margins": 1.5188615322113037,
"rewards/rejected": -1.664424180984497,
"step": 54
},
{
"epoch": 1.3924050632911391,
"grad_norm": 30.395014985650786,
"learning_rate": 4.10218903496256e-07,
"logits/chosen": 1.9827089309692383,
"logits/rejected": 2.0387563705444336,
"logps/chosen": -23.929346084594727,
"logps/rejected": -31.56798553466797,
"loss": 0.3347,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.012682847678661346,
"rewards/margins": 1.1076587438583374,
"rewards/rejected": -1.0949759483337402,
"step": 55
},
{
"epoch": 1.4177215189873418,
"grad_norm": 30.07232120551146,
"learning_rate": 4.058724504646834e-07,
"logits/chosen": 2.144460916519165,
"logits/rejected": 2.3883056640625,
"logps/chosen": -29.003910064697266,
"logps/rejected": -48.30259704589844,
"loss": 0.3069,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.12483976036310196,
"rewards/margins": 2.0643203258514404,
"rewards/rejected": -1.9394805431365967,
"step": 56
},
{
"epoch": 1.4430379746835442,
"grad_norm": 29.37804816237709,
"learning_rate": 4.0144751096020497e-07,
"logits/chosen": 2.4025626182556152,
"logits/rejected": 2.6997711658477783,
"logps/chosen": -20.084449768066406,
"logps/rejected": -41.12477493286133,
"loss": 0.2932,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.05090276896953583,
"rewards/margins": 1.5857845544815063,
"rewards/rejected": -1.6366872787475586,
"step": 57
},
{
"epoch": 1.4683544303797469,
"grad_norm": 28.707949914480356,
"learning_rate": 3.9694631307311825e-07,
"logits/chosen": 1.7879891395568848,
"logits/rejected": 1.9085421562194824,
"logps/chosen": -24.52581214904785,
"logps/rejected": -42.650333404541016,
"loss": 0.2848,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.09958541393280029,
"rewards/margins": 1.8922325372695923,
"rewards/rejected": -1.991817831993103,
"step": 58
},
{
"epoch": 1.4936708860759493,
"grad_norm": 30.2249376141746,
"learning_rate": 3.92371123292113e-07,
"logits/chosen": 2.3484342098236084,
"logits/rejected": 2.6477420330047607,
"logps/chosen": -25.54952621459961,
"logps/rejected": -42.36656951904297,
"loss": 0.2884,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.025862978771328926,
"rewards/margins": 2.4160983562469482,
"rewards/rejected": -2.4419612884521484,
"step": 59
},
{
"epoch": 1.518987341772152,
"grad_norm": 29.710974044035055,
"learning_rate": 3.877242453630256e-07,
"logits/chosen": 2.434805154800415,
"logits/rejected": 2.456228256225586,
"logps/chosen": -20.521100997924805,
"logps/rejected": -28.69353485107422,
"loss": 0.3295,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.3444966673851013,
"rewards/margins": 2.016552448272705,
"rewards/rejected": -1.672055959701538,
"step": 60
},
{
"epoch": 1.5443037974683544,
"grad_norm": 33.402955887536194,
"learning_rate": 3.8300801912883414e-07,
"logits/chosen": 2.511925220489502,
"logits/rejected": 2.43487286567688,
"logps/chosen": -30.91568946838379,
"logps/rejected": -38.48870086669922,
"loss": 0.297,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.009598510339856148,
"rewards/margins": 1.2743103504180908,
"rewards/rejected": -1.2839089632034302,
"step": 61
},
{
"epoch": 1.5696202531645569,
"grad_norm": 34.221583251712744,
"learning_rate": 3.7822481935147655e-07,
"logits/chosen": 2.2030210494995117,
"logits/rejected": 2.4195468425750732,
"logps/chosen": -26.423166275024414,
"logps/rejected": -36.79461669921875,
"loss": 0.3281,
"rewards/accuracies": 0.8125,
"rewards/chosen": -0.2358216643333435,
"rewards/margins": 1.8056204319000244,
"rewards/rejected": -2.0414421558380127,
"step": 62
},
{
"epoch": 1.5949367088607596,
"grad_norm": 27.39193509195394,
"learning_rate": 3.7337705451608667e-07,
"logits/chosen": 2.442267894744873,
"logits/rejected": 2.6541049480438232,
"logps/chosen": -24.891948699951172,
"logps/rejected": -40.318904876708984,
"loss": 0.2564,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.08293318003416061,
"rewards/margins": 1.7536163330078125,
"rewards/rejected": -1.8365494012832642,
"step": 63
},
{
"epoch": 1.620253164556962,
"grad_norm": 32.95811212676831,
"learning_rate": 3.6846716561824967e-07,
"logits/chosen": 2.404311180114746,
"logits/rejected": 2.50986385345459,
"logps/chosen": -24.17348861694336,
"logps/rejected": -35.517120361328125,
"loss": 0.3643,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.00808901246637106,
"rewards/margins": 1.9171310663223267,
"rewards/rejected": -1.925220251083374,
"step": 64
},
{
"epoch": 1.6455696202531644,
"grad_norm": 31.89102457068134,
"learning_rate": 3.634976249348867e-07,
"logits/chosen": 1.8697429895401,
"logits/rejected": 1.9369537830352783,
"logps/chosen": -26.27237319946289,
"logps/rejected": -43.584659576416016,
"loss": 0.3047,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.018543783575296402,
"rewards/margins": 1.540452480316162,
"rewards/rejected": -1.5219086408615112,
"step": 65
},
{
"epoch": 1.6708860759493671,
"grad_norm": 29.55969704854734,
"learning_rate": 3.584709347793895e-07,
"logits/chosen": 2.12164044380188,
"logits/rejected": 2.12618350982666,
"logps/chosen": -19.538429260253906,
"logps/rejected": -34.374759674072266,
"loss": 0.3159,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.1228712797164917,
"rewards/margins": 1.97737455368042,
"rewards/rejected": -1.8545031547546387,
"step": 66
},
{
"epoch": 1.6962025316455698,
"grad_norm": 29.63374295371732,
"learning_rate": 3.5338962624163016e-07,
"logits/chosen": 2.3299381732940674,
"logits/rejected": 2.5256855487823486,
"logps/chosen": -24.86815643310547,
"logps/rejected": -32.63747024536133,
"loss": 0.304,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.14013400673866272,
"rewards/margins": 1.9178043603897095,
"rewards/rejected": -2.05793833732605,
"step": 67
},
{
"epoch": 1.721518987341772,
"grad_norm": 29.816270030062956,
"learning_rate": 3.482562579134809e-07,
"logits/chosen": 2.947877883911133,
"logits/rejected": 2.919872760772705,
"logps/chosen": -24.169422149658203,
"logps/rejected": -35.171504974365234,
"loss": 0.2947,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.18604925274848938,
"rewards/margins": 2.742734432220459,
"rewards/rejected": -2.556685209274292,
"step": 68
},
{
"epoch": 1.7468354430379747,
"grad_norm": 28.204359911989563,
"learning_rate": 3.430734146004863e-07,
"logits/chosen": 2.15493106842041,
"logits/rejected": 2.3170015811920166,
"logps/chosen": -22.2800350189209,
"logps/rejected": -35.93194580078125,
"loss": 0.2844,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.17889335751533508,
"rewards/margins": 2.3182311058044434,
"rewards/rejected": -2.497124433517456,
"step": 69
},
{
"epoch": 1.7721518987341773,
"grad_norm": 26.787627596958465,
"learning_rate": 3.378437060203357e-07,
"logits/chosen": 2.5400497913360596,
"logits/rejected": 2.5072226524353027,
"logps/chosen": -21.618453979492188,
"logps/rejected": -33.122589111328125,
"loss": 0.2766,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.05331309512257576,
"rewards/margins": 2.7737677097320557,
"rewards/rejected": -2.720454692840576,
"step": 70
},
{
"epoch": 1.7974683544303798,
"grad_norm": 31.205788121930055,
"learning_rate": 3.325697654887918e-07,
"logits/chosen": 1.4359736442565918,
"logits/rejected": 1.0823308229446411,
"logps/chosen": -26.589662551879883,
"logps/rejected": -38.18647766113281,
"loss": 0.2822,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.2071526050567627,
"rewards/margins": 2.4934191703796387,
"rewards/rejected": -2.286266565322876,
"step": 71
},
{
"epoch": 1.8227848101265822,
"grad_norm": 29.788415172955876,
"learning_rate": 3.272542485937368e-07,
"logits/chosen": 2.800675868988037,
"logits/rejected": 2.9707438945770264,
"logps/chosen": -25.928173065185547,
"logps/rejected": -38.25574493408203,
"loss": 0.2955,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.0182593185454607,
"rewards/margins": 2.1046228408813477,
"rewards/rejected": -2.0863637924194336,
"step": 72
},
{
"epoch": 1.8481012658227849,
"grad_norm": 31.197362620805283,
"learning_rate": 3.218998318580043e-07,
"logits/chosen": 2.2215137481689453,
"logits/rejected": 2.1676788330078125,
"logps/chosen": -23.131969451904297,
"logps/rejected": -34.484920501708984,
"loss": 0.276,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.19679217040538788,
"rewards/margins": 2.1039047241210938,
"rewards/rejected": -1.9071124792099,
"step": 73
},
{
"epoch": 1.8734177215189873,
"grad_norm": 27.14206740038897,
"learning_rate": 3.1650921139166874e-07,
"logits/chosen": 2.0885462760925293,
"logits/rejected": 2.1695892810821533,
"logps/chosen": -24.332992553710938,
"logps/rejected": -39.35213088989258,
"loss": 0.2598,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.13339604437351227,
"rewards/margins": 2.6932549476623535,
"rewards/rejected": -2.559858798980713,
"step": 74
},
{
"epoch": 1.8987341772151898,
"grad_norm": 31.93987675909966,
"learning_rate": 3.110851015344735e-07,
"logits/chosen": 2.1219115257263184,
"logits/rejected": 2.1965231895446777,
"logps/chosen": -25.27581024169922,
"logps/rejected": -34.003536224365234,
"loss": 0.3142,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.1961415410041809,
"rewards/margins": 2.4378857612609863,
"rewards/rejected": -2.24174427986145,
"step": 75
},
{
"epoch": 1.9240506329113924,
"grad_norm": 29.021478328579693,
"learning_rate": 3.056302334890786e-07,
"logits/chosen": 1.8746654987335205,
"logits/rejected": 2.389371871948242,
"logps/chosen": -22.446157455444336,
"logps/rejected": -39.833770751953125,
"loss": 0.2671,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.18668973445892334,
"rewards/margins": 3.256779909133911,
"rewards/rejected": -3.0700900554656982,
"step": 76
},
{
"epoch": 1.9493670886075949,
"grad_norm": 31.861473137885586,
"learning_rate": 3.001473539458182e-07,
"logits/chosen": 2.0852279663085938,
"logits/rejected": 2.2871861457824707,
"logps/chosen": -27.48174285888672,
"logps/rejected": -38.38823699951172,
"loss": 0.2623,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.12376511096954346,
"rewards/margins": 2.879195213317871,
"rewards/rejected": -2.755429983139038,
"step": 77
},
{
"epoch": 1.9746835443037973,
"grad_norm": 30.580581160948984,
"learning_rate": 2.9463922369965915e-07,
"logits/chosen": 2.183192253112793,
"logits/rejected": 1.8703649044036865,
"logps/chosen": -25.529539108276367,
"logps/rejected": -38.35371780395508,
"loss": 0.2812,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.09511762112379074,
"rewards/margins": 1.8313283920288086,
"rewards/rejected": -1.9264459609985352,
"step": 78
},
{
"epoch": 2.0,
"grad_norm": 28.28932497860918,
"learning_rate": 2.8910861626005773e-07,
"logits/chosen": 2.1175851821899414,
"logits/rejected": 2.7737534046173096,
"logps/chosen": -22.948108673095703,
"logps/rejected": -54.805484771728516,
"loss": 0.2737,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.07266075909137726,
"rewards/margins": 2.9845314025878906,
"rewards/rejected": -3.057192087173462,
"step": 79
},
{
"epoch": 2.0253164556962027,
"grad_norm": 22.84435575128268,
"learning_rate": 2.8355831645441387e-07,
"logits/chosen": 2.129598617553711,
"logits/rejected": 2.214559316635132,
"logps/chosen": -33.85695266723633,
"logps/rejected": -40.15086364746094,
"loss": 0.2116,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.04292801395058632,
"rewards/margins": 2.009500741958618,
"rewards/rejected": -2.052428722381592,
"step": 80
},
{
"epoch": 2.050632911392405,
"grad_norm": 22.68444850198704,
"learning_rate": 2.7799111902582693e-07,
"logits/chosen": 2.512767791748047,
"logits/rejected": 2.5582704544067383,
"logps/chosen": -25.28483009338379,
"logps/rejected": -36.232017517089844,
"loss": 0.2011,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.14101354777812958,
"rewards/margins": 2.3776185512542725,
"rewards/rejected": -2.236604690551758,
"step": 81
},
{
"epoch": 2.0759493670886076,
"grad_norm": 23.440024916384186,
"learning_rate": 2.7240982722585837e-07,
"logits/chosen": 1.941485047340393,
"logits/rejected": 2.076082944869995,
"logps/chosen": -25.91452407836914,
"logps/rejected": -34.40084457397461,
"loss": 0.2306,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.1705533266067505,
"rewards/margins": 1.7886393070220947,
"rewards/rejected": -1.9591926336288452,
"step": 82
},
{
"epoch": 2.1012658227848102,
"grad_norm": 24.39463073368113,
"learning_rate": 2.6681725140300995e-07,
"logits/chosen": 1.5756129026412964,
"logits/rejected": 1.9195802211761475,
"logps/chosen": -21.2039737701416,
"logps/rejected": -43.85890579223633,
"loss": 0.2048,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.012569274753332138,
"rewards/margins": 3.1985719203948975,
"rewards/rejected": -3.2111411094665527,
"step": 83
},
{
"epoch": 2.1265822784810124,
"grad_norm": 21.14214956646525,
"learning_rate": 2.6121620758762875e-07,
"logits/chosen": 2.5736641883850098,
"logits/rejected": 2.7494866847991943,
"logps/chosen": -27.16839027404785,
"logps/rejected": -37.205440521240234,
"loss": 0.1933,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.010486233979463577,
"rewards/margins": 2.571336269378662,
"rewards/rejected": -2.560849905014038,
"step": 84
},
{
"epoch": 2.151898734177215,
"grad_norm": 21.59512651806526,
"learning_rate": 2.5560951607395126e-07,
"logits/chosen": 2.4533567428588867,
"logits/rejected": 2.3579037189483643,
"logps/chosen": -28.007740020751953,
"logps/rejected": -36.33196258544922,
"loss": 0.192,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.04641330614686012,
"rewards/margins": 1.9481606483459473,
"rewards/rejected": -1.901747465133667,
"step": 85
},
{
"epoch": 2.1772151898734178,
"grad_norm": 24.879344750048478,
"learning_rate": 2.5e-07,
"logits/chosen": 2.1627023220062256,
"logits/rejected": 2.059532403945923,
"logps/chosen": -32.905269622802734,
"logps/rejected": -31.235652923583984,
"loss": 0.2379,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.02136234939098358,
"rewards/margins": 1.5867081880569458,
"rewards/rejected": -1.6080706119537354,
"step": 86
},
{
"epoch": 2.2025316455696204,
"grad_norm": 22.69000503277292,
"learning_rate": 2.4439048392604877e-07,
"logits/chosen": 2.3072502613067627,
"logits/rejected": 2.6580121517181396,
"logps/chosen": -23.412647247314453,
"logps/rejected": -42.0001106262207,
"loss": 0.2005,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.16801214218139648,
"rewards/margins": 2.332923412322998,
"rewards/rejected": -2.5009355545043945,
"step": 87
},
{
"epoch": 2.2278481012658227,
"grad_norm": 24.693555115896494,
"learning_rate": 2.3878379241237134e-07,
"logits/chosen": 2.4124932289123535,
"logits/rejected": 2.2819831371307373,
"logps/chosen": -25.710378646850586,
"logps/rejected": -32.972537994384766,
"loss": 0.2211,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.0028405077755451202,
"rewards/margins": 2.1923816204071045,
"rewards/rejected": -2.1895413398742676,
"step": 88
},
{
"epoch": 2.2531645569620253,
"grad_norm": 22.755579101890042,
"learning_rate": 2.3318274859699008e-07,
"logits/chosen": 2.006254196166992,
"logits/rejected": 2.4240238666534424,
"logps/chosen": -26.35239028930664,
"logps/rejected": -42.78788757324219,
"loss": 0.2437,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.381151020526886,
"rewards/margins": 2.395555019378662,
"rewards/rejected": -2.7767062187194824,
"step": 89
},
{
"epoch": 2.278481012658228,
"grad_norm": 23.73790882415418,
"learning_rate": 2.2759017277414164e-07,
"logits/chosen": 2.482501745223999,
"logits/rejected": 2.456974744796753,
"logps/chosen": -32.156795501708984,
"logps/rejected": -35.610713958740234,
"loss": 0.2328,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.08202718943357468,
"rewards/margins": 1.4416414499282837,
"rewards/rejected": -1.523668646812439,
"step": 90
},
{
"epoch": 2.3037974683544302,
"grad_norm": 22.47212835812806,
"learning_rate": 2.2200888097417302e-07,
"logits/chosen": 2.5853986740112305,
"logits/rejected": 2.6601390838623047,
"logps/chosen": -33.39643859863281,
"logps/rejected": -43.778709411621094,
"loss": 0.1828,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.010579407215118408,
"rewards/margins": 2.473262310028076,
"rewards/rejected": -2.483841896057129,
"step": 91
},
{
"epoch": 2.329113924050633,
"grad_norm": 22.509363583762184,
"learning_rate": 2.164416835455862e-07,
"logits/chosen": 2.3972582817077637,
"logits/rejected": 2.534813642501831,
"logps/chosen": -22.962604522705078,
"logps/rejected": -37.55415344238281,
"loss": 0.2213,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.0858815610408783,
"rewards/margins": 2.756964683532715,
"rewards/rejected": -2.842846155166626,
"step": 92
},
{
"epoch": 2.3544303797468356,
"grad_norm": 22.253217260948635,
"learning_rate": 2.1089138373994222e-07,
"logits/chosen": 2.153351306915283,
"logits/rejected": 2.3578603267669678,
"logps/chosen": -22.811275482177734,
"logps/rejected": -39.044639587402344,
"loss": 0.1953,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.2400507628917694,
"rewards/margins": 2.045787811279297,
"rewards/rejected": -2.2858386039733887,
"step": 93
},
{
"epoch": 2.379746835443038,
"grad_norm": 23.13742394622883,
"learning_rate": 2.0536077630034085e-07,
"logits/chosen": 1.7368831634521484,
"logits/rejected": 1.9436362981796265,
"logps/chosen": -26.176815032958984,
"logps/rejected": -40.83625411987305,
"loss": 0.2061,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.39153048396110535,
"rewards/margins": 2.8905537128448486,
"rewards/rejected": -2.499023199081421,
"step": 94
},
{
"epoch": 2.4050632911392404,
"grad_norm": 21.94757471208971,
"learning_rate": 1.998526460541818e-07,
"logits/chosen": 2.3502449989318848,
"logits/rejected": 2.3544671535491943,
"logps/chosen": -26.833280563354492,
"logps/rejected": -37.404605865478516,
"loss": 0.1962,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.0008228495717048645,
"rewards/margins": 2.2684872150421143,
"rewards/rejected": -2.2676644325256348,
"step": 95
},
{
"epoch": 2.430379746835443,
"grad_norm": 20.124080148253146,
"learning_rate": 1.9436976651092142e-07,
"logits/chosen": 1.1564311981201172,
"logits/rejected": 1.8700743913650513,
"logps/chosen": -25.664215087890625,
"logps/rejected": -42.25444030761719,
"loss": 0.1803,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.013121239840984344,
"rewards/margins": 2.5192627906799316,
"rewards/rejected": -2.5061416625976562,
"step": 96
},
{
"epoch": 2.4556962025316453,
"grad_norm": 22.336736253170702,
"learning_rate": 1.8891489846552644e-07,
"logits/chosen": 2.3158211708068848,
"logits/rejected": 2.718118667602539,
"logps/chosen": -23.182619094848633,
"logps/rejected": -41.77816390991211,
"loss": 0.2237,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.010374765843153,
"rewards/margins": 2.8450756072998047,
"rewards/rejected": -2.8347012996673584,
"step": 97
},
{
"epoch": 2.481012658227848,
"grad_norm": 21.95104798778307,
"learning_rate": 1.8349078860833124e-07,
"logits/chosen": 2.2794296741485596,
"logits/rejected": 2.176607847213745,
"logps/chosen": -33.21621322631836,
"logps/rejected": -38.940025329589844,
"loss": 0.1762,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.013390496373176575,
"rewards/margins": 2.8369154930114746,
"rewards/rejected": -2.8503060340881348,
"step": 98
},
{
"epoch": 2.5063291139240507,
"grad_norm": 22.129805040414322,
"learning_rate": 1.781001681419957e-07,
"logits/chosen": 1.8799982070922852,
"logits/rejected": 2.1679162979125977,
"logps/chosen": -20.39841079711914,
"logps/rejected": -40.38523864746094,
"loss": 0.2032,
"rewards/accuracies": 0.875,
"rewards/chosen": 0.04029037803411484,
"rewards/margins": 2.2781383991241455,
"rewards/rejected": -2.2378478050231934,
"step": 99
},
{
"epoch": 2.5316455696202533,
"grad_norm": 18.828604860640993,
"learning_rate": 1.7274575140626315e-07,
"logits/chosen": 2.4270401000976562,
"logits/rejected": 2.768435478210449,
"logps/chosen": -28.433048248291016,
"logps/rejected": -47.266197204589844,
"loss": 0.1502,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.2632801830768585,
"rewards/margins": 2.7442736625671387,
"rewards/rejected": -3.007554054260254,
"step": 100
},
{
"epoch": 2.5569620253164556,
"grad_norm": 20.880074717956166,
"learning_rate": 1.674302345112083e-07,
"logits/chosen": 1.9880956411361694,
"logits/rejected": 2.2008445262908936,
"logps/chosen": -24.262271881103516,
"logps/rejected": -45.60854721069336,
"loss": 0.2089,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.04222775995731354,
"rewards/margins": 3.01908802986145,
"rewards/rejected": -2.9768600463867188,
"step": 101
},
{
"epoch": 2.5822784810126582,
"grad_norm": 21.838535925363054,
"learning_rate": 1.621562939796643e-07,
"logits/chosen": 2.205430746078491,
"logits/rejected": 2.4456026554107666,
"logps/chosen": -24.633563995361328,
"logps/rejected": -37.809478759765625,
"loss": 0.2187,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.2103780210018158,
"rewards/margins": 2.4951250553131104,
"rewards/rejected": -2.2847468852996826,
"step": 102
},
{
"epoch": 2.607594936708861,
"grad_norm": 22.277589914673204,
"learning_rate": 1.569265853995137e-07,
"logits/chosen": 1.4377330541610718,
"logits/rejected": 1.4871635437011719,
"logps/chosen": -24.512638092041016,
"logps/rejected": -37.595733642578125,
"loss": 0.1964,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.3236038088798523,
"rewards/margins": 2.621075391769409,
"rewards/rejected": -2.9446792602539062,
"step": 103
},
{
"epoch": 2.632911392405063,
"grad_norm": 21.07481872439906,
"learning_rate": 1.517437420865191e-07,
"logits/chosen": 1.6525717973709106,
"logits/rejected": 2.0419435501098633,
"logps/chosen": -24.287813186645508,
"logps/rejected": -43.26752853393555,
"loss": 0.1873,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.2492070347070694,
"rewards/margins": 3.5699872970581055,
"rewards/rejected": -3.3207802772521973,
"step": 104
},
{
"epoch": 2.6582278481012658,
"grad_norm": 20.677444088444823,
"learning_rate": 1.4661037375836987e-07,
"logits/chosen": 2.6604502201080322,
"logits/rejected": 2.587705373764038,
"logps/chosen": -25.429487228393555,
"logps/rejected": -36.87110137939453,
"loss": 0.1855,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.07871632277965546,
"rewards/margins": 2.1279850006103516,
"rewards/rejected": -2.2067012786865234,
"step": 105
},
{
"epoch": 2.6835443037974684,
"grad_norm": 23.5751426913938,
"learning_rate": 1.4152906522061047e-07,
"logits/chosen": 2.1078615188598633,
"logits/rejected": 2.089411735534668,
"logps/chosen": -25.450345993041992,
"logps/rejected": -33.47675323486328,
"loss": 0.1944,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.11998379230499268,
"rewards/margins": 2.037046432495117,
"rewards/rejected": -1.917062759399414,
"step": 106
},
{
"epoch": 2.708860759493671,
"grad_norm": 22.767643882863975,
"learning_rate": 1.365023750651133e-07,
"logits/chosen": 1.7534555196762085,
"logits/rejected": 2.2760560512542725,
"logps/chosen": -27.695234298706055,
"logps/rejected": -46.436767578125,
"loss": 0.1638,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.030735721811652184,
"rewards/margins": 3.136977434158325,
"rewards/rejected": -3.1677134037017822,
"step": 107
},
{
"epoch": 2.7341772151898733,
"grad_norm": 20.58426575505033,
"learning_rate": 1.3153283438175034e-07,
"logits/chosen": 2.3597166538238525,
"logits/rejected": 2.5851235389709473,
"logps/chosen": -23.936431884765625,
"logps/rejected": -47.1025390625,
"loss": 0.1785,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.1647489368915558,
"rewards/margins": 3.3675107955932617,
"rewards/rejected": -3.532259941101074,
"step": 108
},
{
"epoch": 2.759493670886076,
"grad_norm": 21.349116101829402,
"learning_rate": 1.2662294548391328e-07,
"logits/chosen": 1.9581003189086914,
"logits/rejected": 1.994449496269226,
"logps/chosen": -28.851770401000977,
"logps/rejected": -39.374019622802734,
"loss": 0.1772,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.023888343945145607,
"rewards/margins": 2.033907890319824,
"rewards/rejected": -2.0100197792053223,
"step": 109
},
{
"epoch": 2.7848101265822782,
"grad_norm": 20.523600951939002,
"learning_rate": 1.2177518064852348e-07,
"logits/chosen": 2.149280071258545,
"logits/rejected": 2.338773012161255,
"logps/chosen": -24.151710510253906,
"logps/rejected": -44.250370025634766,
"loss": 0.197,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.008578717708587646,
"rewards/margins": 2.208198308944702,
"rewards/rejected": -2.1996195316314697,
"step": 110
},
{
"epoch": 2.810126582278481,
"grad_norm": 21.793053109415474,
"learning_rate": 1.1699198087116588e-07,
"logits/chosen": 1.9670238494873047,
"logits/rejected": 2.2674317359924316,
"logps/chosen": -19.423011779785156,
"logps/rejected": -43.466705322265625,
"loss": 0.1912,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.0942595824599266,
"rewards/margins": 2.482712984085083,
"rewards/rejected": -2.576972484588623,
"step": 111
},
{
"epoch": 2.8354430379746836,
"grad_norm": 18.7267582468967,
"learning_rate": 1.1227575463697439e-07,
"logits/chosen": 2.125753402709961,
"logits/rejected": 2.3489747047424316,
"logps/chosen": -24.716629028320312,
"logps/rejected": -35.32937240600586,
"loss": 0.1497,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.06388971209526062,
"rewards/margins": 2.4911532402038574,
"rewards/rejected": -2.5550432205200195,
"step": 112
},
{
"epoch": 2.8607594936708862,
"grad_norm": 21.953907512144703,
"learning_rate": 1.0762887670788701e-07,
"logits/chosen": 1.75847327709198,
"logits/rejected": 1.766452431678772,
"logps/chosen": -33.99777603149414,
"logps/rejected": -49.58311080932617,
"loss": 0.1887,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.09381230175495148,
"rewards/margins": 3.5883729457855225,
"rewards/rejected": -3.682185649871826,
"step": 113
},
{
"epoch": 2.8860759493670884,
"grad_norm": 21.263544038283367,
"learning_rate": 1.0305368692688174e-07,
"logits/chosen": 1.9139025211334229,
"logits/rejected": 1.8650161027908325,
"logps/chosen": -17.992523193359375,
"logps/rejected": -38.388519287109375,
"loss": 0.1854,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.09737363457679749,
"rewards/margins": 3.4405698776245117,
"rewards/rejected": -3.3431966304779053,
"step": 114
},
{
"epoch": 2.911392405063291,
"grad_norm": 19.070922554256914,
"learning_rate": 9.855248903979505e-08,
"logits/chosen": 2.087536096572876,
"logits/rejected": 2.118912935256958,
"logps/chosen": -30.79559326171875,
"logps/rejected": -47.3471565246582,
"loss": 0.1693,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.07975523173809052,
"rewards/margins": 3.129413366317749,
"rewards/rejected": -3.2091684341430664,
"step": 115
},
{
"epoch": 2.9367088607594938,
"grad_norm": 21.46961371171142,
"learning_rate": 9.412754953531663e-08,
"logits/chosen": 1.7869651317596436,
"logits/rejected": 1.881705641746521,
"logps/chosen": -24.043615341186523,
"logps/rejected": -38.01569366455078,
"loss": 0.1852,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.10924415290355682,
"rewards/margins": 2.661862373352051,
"rewards/rejected": -2.5526182651519775,
"step": 116
},
{
"epoch": 2.962025316455696,
"grad_norm": 19.929759669575105,
"learning_rate": 8.978109650374396e-08,
"logits/chosen": 1.7048578262329102,
"logits/rejected": 1.9161901473999023,
"logps/chosen": -26.87795639038086,
"logps/rejected": -42.6606330871582,
"loss": 0.1589,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.13115011155605316,
"rewards/margins": 3.121182441711426,
"rewards/rejected": -2.990032196044922,
"step": 117
},
{
"epoch": 2.9873417721518987,
"grad_norm": 25.118856703851232,
"learning_rate": 8.551531851507185e-08,
"logits/chosen": 2.3607137203216553,
"logits/rejected": 2.5181257724761963,
"logps/chosen": -30.176773071289062,
"logps/rejected": -44.625003814697266,
"loss": 0.224,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.30477192997932434,
"rewards/margins": 2.4585647583007812,
"rewards/rejected": -2.763336658477783,
"step": 118
},
{
"epoch": 3.0126582278481013,
"grad_norm": 24.215057475486347,
"learning_rate": 8.133236351698142e-08,
"logits/chosen": 1.6108707189559937,
"logits/rejected": 1.7527028322219849,
"logps/chosen": -18.345888137817383,
"logps/rejected": -42.66941833496094,
"loss": 0.2033,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.02492370456457138,
"rewards/margins": 3.3315927982330322,
"rewards/rejected": -3.306669235229492,
"step": 119
},
{
"epoch": 3.037974683544304,
"grad_norm": 18.940438234622814,
"learning_rate": 7.723433775328384e-08,
"logits/chosen": 2.0556139945983887,
"logits/rejected": 2.3014981746673584,
"logps/chosen": -21.665407180786133,
"logps/rejected": -41.777896881103516,
"loss": 0.1836,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.17341355979442596,
"rewards/margins": 2.674577236175537,
"rewards/rejected": -2.5011634826660156,
"step": 120
},
{
"epoch": 3.0632911392405062,
"grad_norm": 18.297883681094888,
"learning_rate": 7.322330470336313e-08,
"logits/chosen": 2.301410675048828,
"logits/rejected": 2.2454984188079834,
"logps/chosen": -23.815502166748047,
"logps/rejected": -41.840091705322266,
"loss": 0.1505,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.02547144889831543,
"rewards/margins": 3.206540584564209,
"rewards/rejected": -3.1810688972473145,
"step": 121
},
{
"epoch": 3.088607594936709,
"grad_norm": 18.507140827349733,
"learning_rate": 6.930128404315214e-08,
"logits/chosen": 2.0005404949188232,
"logits/rejected": 2.2419371604919434,
"logps/chosen": -23.189014434814453,
"logps/rejected": -44.778133392333984,
"loss": 0.1697,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.21837717294692993,
"rewards/margins": 2.497182846069336,
"rewards/rejected": -2.715559959411621,
"step": 122
},
{
"epoch": 3.1139240506329116,
"grad_norm": 20.458085524449526,
"learning_rate": 6.547025062816486e-08,
"logits/chosen": 2.5110156536102295,
"logits/rejected": 2.835902452468872,
"logps/chosen": -21.34715461730957,
"logps/rejected": -42.06149673461914,
"loss": 0.1769,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.2778359651565552,
"rewards/margins": 3.1256825923919678,
"rewards/rejected": -2.847846508026123,
"step": 123
},
{
"epoch": 3.1392405063291138,
"grad_norm": 19.657986671859284,
"learning_rate": 6.173213349909728e-08,
"logits/chosen": 2.4499258995056152,
"logits/rejected": 2.4804461002349854,
"logps/chosen": -24.912944793701172,
"logps/rejected": -38.67371368408203,
"loss": 0.172,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.06854649633169174,
"rewards/margins": 2.4865663051605225,
"rewards/rejected": -2.5551130771636963,
"step": 124
},
{
"epoch": 3.1645569620253164,
"grad_norm": 18.87674548749799,
"learning_rate": 5.808881491049722e-08,
"logits/chosen": 1.3738226890563965,
"logits/rejected": 1.6148171424865723,
"logps/chosen": -25.101566314697266,
"logps/rejected": -40.51878356933594,
"loss": 0.1786,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.33920496702194214,
"rewards/margins": 2.1792898178100586,
"rewards/rejected": -2.5184948444366455,
"step": 125
},
{
"epoch": 3.189873417721519,
"grad_norm": 20.113483471649342,
"learning_rate": 5.454212938299255e-08,
"logits/chosen": 2.0692553520202637,
"logits/rejected": 2.1005959510803223,
"logps/chosen": -26.763628005981445,
"logps/rejected": -37.69628143310547,
"loss": 0.1624,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.04393099620938301,
"rewards/margins": 2.375842809677124,
"rewards/rejected": -2.419773817062378,
"step": 126
},
{
"epoch": 3.2151898734177213,
"grad_norm": 19.33193488722329,
"learning_rate": 5.109386277955477e-08,
"logits/chosen": 1.9354777336120605,
"logits/rejected": 2.3222734928131104,
"logps/chosen": -22.444690704345703,
"logps/rejected": -42.732322692871094,
"loss": 0.1615,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.1692490577697754,
"rewards/margins": 2.950117349624634,
"rewards/rejected": -2.7808682918548584,
"step": 127
},
{
"epoch": 3.240506329113924,
"grad_norm": 17.657011863002797,
"learning_rate": 4.774575140626316e-08,
"logits/chosen": 1.7825671434402466,
"logits/rejected": 2.0206806659698486,
"logps/chosen": -26.314062118530273,
"logps/rejected": -44.959896087646484,
"loss": 0.1292,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.18020519614219666,
"rewards/margins": 3.4240498542785645,
"rewards/rejected": -3.243844747543335,
"step": 128
},
{
"epoch": 3.2658227848101267,
"grad_norm": 17.36688426753045,
"learning_rate": 4.449948113802254e-08,
"logits/chosen": 1.963316559791565,
"logits/rejected": 2.073986530303955,
"logps/chosen": -29.986591339111328,
"logps/rejected": -46.73766326904297,
"loss": 0.1476,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.1175435334444046,
"rewards/margins": 3.3601157665252686,
"rewards/rejected": -3.477659225463867,
"step": 129
},
{
"epoch": 3.291139240506329,
"grad_norm": 17.66376104354918,
"learning_rate": 4.1356686569674335e-08,
"logits/chosen": 1.4700018167495728,
"logits/rejected": 1.7809115648269653,
"logps/chosen": -27.53030014038086,
"logps/rejected": -43.54850387573242,
"loss": 0.1642,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.1137082502245903,
"rewards/margins": 2.9248647689819336,
"rewards/rejected": -2.811156749725342,
"step": 130
},
{
"epoch": 3.3164556962025316,
"grad_norm": 19.797305830292228,
"learning_rate": 3.831895019292897e-08,
"logits/chosen": 1.8914071321487427,
"logits/rejected": 1.8628309965133667,
"logps/chosen": -25.410337448120117,
"logps/rejected": -42.06156921386719,
"loss": 0.1525,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.044954825192689896,
"rewards/margins": 2.8561882972717285,
"rewards/rejected": -2.9011433124542236,
"step": 131
},
{
"epoch": 3.3417721518987342,
"grad_norm": 18.851413557062596,
"learning_rate": 3.538780159953347e-08,
"logits/chosen": 2.0251123905181885,
"logits/rejected": 2.210965156555176,
"logps/chosen": -20.657623291015625,
"logps/rejected": -45.97637939453125,
"loss": 0.1488,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.21129505336284637,
"rewards/margins": 3.6020712852478027,
"rewards/rejected": -3.3907763957977295,
"step": 132
},
{
"epoch": 3.367088607594937,
"grad_norm": 18.383406639630245,
"learning_rate": 3.256471671107616e-08,
"logits/chosen": 1.5468469858169556,
"logits/rejected": 1.8942471742630005,
"logps/chosen": -34.759246826171875,
"logps/rejected": -54.94669723510742,
"loss": 0.161,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.08364612609148026,
"rewards/margins": 3.432290554046631,
"rewards/rejected": -3.348644495010376,
"step": 133
},
{
"epoch": 3.392405063291139,
"grad_norm": 18.442661691122066,
"learning_rate": 2.98511170358155e-08,
"logits/chosen": 1.9410444498062134,
"logits/rejected": 1.9713021516799927,
"logps/chosen": -33.468505859375,
"logps/rejected": -40.28277587890625,
"loss": 0.1517,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.1791667342185974,
"rewards/margins": 2.8069636821746826,
"rewards/rejected": -2.986130475997925,
"step": 134
},
{
"epoch": 3.4177215189873418,
"grad_norm": 19.123016466489233,
"learning_rate": 2.724836895290805e-08,
"logits/chosen": 2.172494649887085,
"logits/rejected": 2.3771188259124756,
"logps/chosen": -18.799209594726562,
"logps/rejected": -27.414329528808594,
"loss": 0.1574,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.05184905230998993,
"rewards/margins": 1.7008121013641357,
"rewards/rejected": -1.648963212966919,
"step": 135
},
{
"epoch": 3.4430379746835444,
"grad_norm": 19.389448097902036,
"learning_rate": 2.475778302439524e-08,
"logits/chosen": 1.5435059070587158,
"logits/rejected": 2.0342299938201904,
"logps/chosen": -23.09898567199707,
"logps/rejected": -41.35944366455078,
"loss": 0.1673,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.01025979220867157,
"rewards/margins": 3.0842089653015137,
"rewards/rejected": -3.094468832015991,
"step": 136
},
{
"epoch": 3.4683544303797467,
"grad_norm": 18.168150870329598,
"learning_rate": 2.2380613335296033e-08,
"logits/chosen": 1.5661985874176025,
"logits/rejected": 1.6321345567703247,
"logps/chosen": -21.557811737060547,
"logps/rejected": -37.444087982177734,
"loss": 0.1457,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.32843151688575745,
"rewards/margins": 3.217196464538574,
"rewards/rejected": -2.888765335083008,
"step": 137
},
{
"epoch": 3.4936708860759493,
"grad_norm": 18.10048373008008,
"learning_rate": 2.0118056862137354e-08,
"logits/chosen": 2.2138497829437256,
"logits/rejected": 2.3827362060546875,
"logps/chosen": -25.050493240356445,
"logps/rejected": -40.5790901184082,
"loss": 0.1574,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.05445308983325958,
"rewards/margins": 2.6866414546966553,
"rewards/rejected": -2.7410943508148193,
"step": 138
},
{
"epoch": 3.518987341772152,
"grad_norm": 17.844608513376855,
"learning_rate": 1.797125287024029e-08,
"logits/chosen": 2.252528667449951,
"logits/rejected": 2.517775297164917,
"logps/chosen": -25.718944549560547,
"logps/rejected": -40.703636169433594,
"loss": 0.1633,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.23676270246505737,
"rewards/margins": 2.2277328968048096,
"rewards/rejected": -2.4644956588745117,
"step": 139
},
{
"epoch": 3.5443037974683547,
"grad_norm": 19.82721673855706,
"learning_rate": 1.5941282340065697e-08,
"logits/chosen": 2.1378254890441895,
"logits/rejected": 2.3837673664093018,
"logps/chosen": -21.759735107421875,
"logps/rejected": -35.555789947509766,
"loss": 0.1725,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.32154813408851624,
"rewards/margins": 2.5137619972229004,
"rewards/rejected": -2.192214012145996,
"step": 140
},
{
"epoch": 3.569620253164557,
"grad_norm": 19.687984330411137,
"learning_rate": 1.4029167422908105e-08,
"logits/chosen": 1.8691318035125732,
"logits/rejected": 1.951183557510376,
"logps/chosen": -22.21922492980957,
"logps/rejected": -38.082176208496094,
"loss": 0.1587,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.12926092743873596,
"rewards/margins": 3.5831055641174316,
"rewards/rejected": -3.4538450241088867,
"step": 141
},
{
"epoch": 3.5949367088607596,
"grad_norm": 19.210992875859024,
"learning_rate": 1.2235870926211616e-08,
"logits/chosen": 1.878167748451233,
"logits/rejected": 2.1185381412506104,
"logps/chosen": -25.917736053466797,
"logps/rejected": -45.47472381591797,
"loss": 0.1359,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.0160946324467659,
"rewards/margins": 3.645671844482422,
"rewards/rejected": -3.629577398300171,
"step": 142
},
{
"epoch": 3.620253164556962,
"grad_norm": 20.116883252259765,
"learning_rate": 1.0562295828767387e-08,
"logits/chosen": 1.2207794189453125,
"logits/rejected": 1.1344801187515259,
"logps/chosen": -23.36668586730957,
"logps/rejected": -31.341829299926758,
"loss": 0.1841,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.16667915880680084,
"rewards/margins": 1.5721771717071533,
"rewards/rejected": -1.7388561964035034,
"step": 143
},
{
"epoch": 3.6455696202531644,
"grad_norm": 19.720241233310634,
"learning_rate": 9.009284826036689e-09,
"logits/chosen": 2.4453952312469482,
"logits/rejected": 2.662018299102783,
"logps/chosen": -22.387048721313477,
"logps/rejected": -36.92119216918945,
"loss": 0.1617,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.031851306557655334,
"rewards/margins": 2.5965640544891357,
"rewards/rejected": -2.5647125244140625,
"step": 144
},
{
"epoch": 3.670886075949367,
"grad_norm": 17.86661138635714,
"learning_rate": 7.577619905828281e-09,
"logits/chosen": 2.745617628097534,
"logits/rejected": 2.844001531600952,
"logps/chosen": -26.597410202026367,
"logps/rejected": -36.82072448730469,
"loss": 0.142,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.17600667476654053,
"rewards/margins": 2.553907632827759,
"rewards/rejected": -2.7299141883850098,
"step": 145
},
{
"epoch": 3.6962025316455698,
"grad_norm": 16.0477696571449,
"learning_rate": 6.268021954544095e-09,
"logits/chosen": 2.1746973991394043,
"logits/rejected": 2.2613565921783447,
"logps/chosen": -32.3970947265625,
"logps/rejected": -40.7056884765625,
"loss": 0.1372,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.09012872725725174,
"rewards/margins": 4.017242431640625,
"rewards/rejected": -4.1073713302612305,
"step": 146
},
{
"epoch": 3.721518987341772,
"grad_norm": 18.112204362761403,
"learning_rate": 5.08115039419113e-09,
"logits/chosen": 2.2588939666748047,
"logits/rejected": 2.217837333679199,
"logps/chosen": -23.21999740600586,
"logps/rejected": -38.22819519042969,
"loss": 0.1574,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.07647465169429779,
"rewards/margins": 3.384047031402588,
"rewards/rejected": -3.460521697998047,
"step": 147
},
{
"epoch": 3.7468354430379747,
"grad_norm": 19.146595468250382,
"learning_rate": 4.0176028503425826e-09,
"logits/chosen": 2.2898073196411133,
"logits/rejected": 2.612645149230957,
"logps/chosen": -21.40774917602539,
"logps/rejected": -39.644142150878906,
"loss": 0.1575,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.14267057180404663,
"rewards/margins": 2.3787121772766113,
"rewards/rejected": -2.23604154586792,
"step": 148
},
{
"epoch": 3.7721518987341773,
"grad_norm": 16.066181981705437,
"learning_rate": 3.077914851215585e-09,
"logits/chosen": 2.0213255882263184,
"logits/rejected": 2.22232723236084,
"logps/chosen": -20.719486236572266,
"logps/rejected": -33.82309341430664,
"loss": 0.1448,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.04364713281393051,
"rewards/margins": 2.438537836074829,
"rewards/rejected": -2.482184886932373,
"step": 149
},
{
"epoch": 3.7974683544303796,
"grad_norm": 20.168906914662397,
"learning_rate": 2.2625595580163247e-09,
"logits/chosen": 1.8664175271987915,
"logits/rejected": 2.135633945465088,
"logps/chosen": -24.14510726928711,
"logps/rejected": -39.59516906738281,
"loss": 0.1844,
"rewards/accuracies": 0.875,
"rewards/chosen": -0.004626031965017319,
"rewards/margins": 2.253840446472168,
"rewards/rejected": -2.2584664821624756,
"step": 150
},
{
"epoch": 3.8227848101265822,
"grad_norm": 21.962898348700495,
"learning_rate": 1.5719475266893489e-09,
"logits/chosen": 1.7901225090026855,
"logits/rejected": 1.9218878746032715,
"logps/chosen": -22.082956314086914,
"logps/rejected": -43.000667572021484,
"loss": 0.192,
"rewards/accuracies": 0.9375,
"rewards/chosen": 0.07910831272602081,
"rewards/margins": 2.6802215576171875,
"rewards/rejected": -2.6011130809783936,
"step": 151
},
{
"epoch": 3.848101265822785,
"grad_norm": 16.511033653339577,
"learning_rate": 1.0064265011902328e-09,
"logits/chosen": 2.4072799682617188,
"logits/rejected": 2.4377200603485107,
"logps/chosen": -25.803665161132812,
"logps/rejected": -40.92607879638672,
"loss": 0.1549,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.035745151340961456,
"rewards/margins": 3.1956663131713867,
"rewards/rejected": -3.159921169281006,
"step": 152
},
{
"epoch": 3.8734177215189876,
"grad_norm": 18.636452488465395,
"learning_rate": 5.662812383859794e-10,
"logits/chosen": 2.361091136932373,
"logits/rejected": 2.3107473850250244,
"logps/chosen": -27.91613006591797,
"logps/rejected": -39.84597396850586,
"loss": 0.1544,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.1522400677204132,
"rewards/margins": 3.036912441253662,
"rewards/rejected": -3.189152240753174,
"step": 153
},
{
"epoch": 3.8987341772151898,
"grad_norm": 17.763028400462222,
"learning_rate": 2.5173336467135263e-10,
"logits/chosen": 2.0824742317199707,
"logits/rejected": 2.3250155448913574,
"logps/chosen": -23.416114807128906,
"logps/rejected": -38.985511779785156,
"loss": 0.1542,
"rewards/accuracies": 0.9375,
"rewards/chosen": -0.06766586005687714,
"rewards/margins": 2.1996846199035645,
"rewards/rejected": -2.267350673675537,
"step": 154
},
{
"epoch": 3.9240506329113924,
"grad_norm": 19.457837238018815,
"learning_rate": 6.294126437336733e-11,
"logits/chosen": 2.00872540473938,
"logits/rejected": 2.4769511222839355,
"logps/chosen": -24.827199935913086,
"logps/rejected": -44.62668991088867,
"loss": 0.1614,
"rewards/accuracies": 1.0,
"rewards/chosen": -0.19776426255702972,
"rewards/margins": 2.720571756362915,
"rewards/rejected": -2.9183361530303955,
"step": 155
},
{
"epoch": 3.9493670886075947,
"grad_norm": 19.565085246341297,
"learning_rate": 0.0,
"logits/chosen": 1.8402495384216309,
"logits/rejected": 2.033839464187622,
"logps/chosen": -28.689044952392578,
"logps/rejected": -46.74669647216797,
"loss": 0.1636,
"rewards/accuracies": 1.0,
"rewards/chosen": 0.04416623339056969,
"rewards/margins": 3.22959041595459,
"rewards/rejected": -3.185424327850342,
"step": 156
},
{
"epoch": 3.9493670886075947,
"step": 156,
"total_flos": 0.0,
"train_loss": 0.3210896818110576,
"train_runtime": 45702.9058,
"train_samples_per_second": 0.442,
"train_steps_per_second": 0.003
}
],
"logging_steps": 1,
"max_steps": 156,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 200,
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}