|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.6, |
|
"eval_steps": 5, |
|
"global_step": 130, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.024615384615384615, |
|
"grad_norm": 51.137206373851456, |
|
"learning_rate": 5.88235294117647e-08, |
|
"logits/chosen": -0.5265347361564636, |
|
"logits/rejected": -0.5229614973068237, |
|
"logps/chosen": -40.1543083190918, |
|
"logps/rejected": -40.267845153808594, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.04923076923076923, |
|
"grad_norm": 53.426708668382766, |
|
"learning_rate": 1.176470588235294e-07, |
|
"logits/chosen": -0.5090410113334656, |
|
"logits/rejected": -0.502047061920166, |
|
"logps/chosen": -45.320037841796875, |
|
"logps/rejected": -42.6551399230957, |
|
"loss": 0.6918, |
|
"rewards/accuracies": 0.453125, |
|
"rewards/chosen": 0.014461740851402283, |
|
"rewards/margins": 0.0072987377643585205, |
|
"rewards/rejected": 0.007163003087043762, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.06153846153846154, |
|
"eval_logits/chosen": -0.49676236510276794, |
|
"eval_logits/rejected": -0.4936961829662323, |
|
"eval_logps/chosen": -45.20378875732422, |
|
"eval_logps/rejected": -42.88367462158203, |
|
"eval_loss": 0.6931462287902832, |
|
"eval_rewards/accuracies": 0.4623287618160248, |
|
"eval_rewards/chosen": 0.0008219562005251646, |
|
"eval_rewards/margins": 0.0035426486283540726, |
|
"eval_rewards/rejected": -0.002720692427828908, |
|
"eval_runtime": 400.2137, |
|
"eval_samples_per_second": 4.333, |
|
"eval_steps_per_second": 0.182, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.07384615384615385, |
|
"grad_norm": 47.768106379014085, |
|
"learning_rate": 1.764705882352941e-07, |
|
"logits/chosen": -0.4896703064441681, |
|
"logits/rejected": -0.48678484559059143, |
|
"logps/chosen": -47.16059875488281, |
|
"logps/rejected": -35.45697021484375, |
|
"loss": 0.6958, |
|
"rewards/accuracies": 0.484375, |
|
"rewards/chosen": 0.007428005337715149, |
|
"rewards/margins": 0.012338653206825256, |
|
"rewards/rejected": -0.004910647869110107, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.09846153846153846, |
|
"grad_norm": 39.51012410260543, |
|
"learning_rate": 2.352941176470588e-07, |
|
"logits/chosen": -0.4996076822280884, |
|
"logits/rejected": -0.49669766426086426, |
|
"logps/chosen": -47.94187545776367, |
|
"logps/rejected": -41.556766510009766, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.018062740564346313, |
|
"rewards/margins": 0.025705993175506592, |
|
"rewards/rejected": -0.007643252611160278, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.12307692307692308, |
|
"grad_norm": 39.08254813329663, |
|
"learning_rate": 2.941176470588235e-07, |
|
"logits/chosen": -0.49092355370521545, |
|
"logits/rejected": -0.48874035477638245, |
|
"logps/chosen": -50.65516662597656, |
|
"logps/rejected": -30.806886672973633, |
|
"loss": 0.6842, |
|
"rewards/accuracies": 0.515625, |
|
"rewards/chosen": 0.016676604747772217, |
|
"rewards/margins": 0.011630743741989136, |
|
"rewards/rejected": 0.005045861005783081, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12307692307692308, |
|
"eval_logits/chosen": -0.49970191717147827, |
|
"eval_logits/rejected": -0.4963047206401825, |
|
"eval_logps/chosen": -45.12245178222656, |
|
"eval_logps/rejected": -42.913116455078125, |
|
"eval_loss": 0.6740441918373108, |
|
"eval_rewards/accuracies": 0.5410959124565125, |
|
"eval_rewards/chosen": 0.041489191353321075, |
|
"eval_rewards/margins": 0.058929312974214554, |
|
"eval_rewards/rejected": -0.017440123483538628, |
|
"eval_runtime": 398.3897, |
|
"eval_samples_per_second": 4.353, |
|
"eval_steps_per_second": 0.183, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1476923076923077, |
|
"grad_norm": 38.09131172735928, |
|
"learning_rate": 3.529411764705882e-07, |
|
"logits/chosen": -0.48407822847366333, |
|
"logits/rejected": -0.4791830778121948, |
|
"logps/chosen": -45.49330139160156, |
|
"logps/rejected": -42.8103141784668, |
|
"loss": 0.6725, |
|
"rewards/accuracies": 0.609375, |
|
"rewards/chosen": 0.04486130177974701, |
|
"rewards/margins": 0.06311255693435669, |
|
"rewards/rejected": -0.01825125515460968, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.1723076923076923, |
|
"grad_norm": 35.41525379746354, |
|
"learning_rate": 4.117647058823529e-07, |
|
"logits/chosen": -0.5208038091659546, |
|
"logits/rejected": -0.5175592303276062, |
|
"logps/chosen": -40.418609619140625, |
|
"logps/rejected": -34.53902816772461, |
|
"loss": 0.6501, |
|
"rewards/accuracies": 0.546875, |
|
"rewards/chosen": 0.11583159863948822, |
|
"rewards/margins": 0.07396885752677917, |
|
"rewards/rejected": 0.041862741112709045, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.18461538461538463, |
|
"eval_logits/chosen": -0.4988827705383301, |
|
"eval_logits/rejected": -0.4956616461277008, |
|
"eval_logps/chosen": -44.772987365722656, |
|
"eval_logps/rejected": -42.95915985107422, |
|
"eval_loss": 0.6234598159790039, |
|
"eval_rewards/accuracies": 0.6404109597206116, |
|
"eval_rewards/chosen": 0.2162197083234787, |
|
"eval_rewards/margins": 0.25668132305145264, |
|
"eval_rewards/rejected": -0.040461596101522446, |
|
"eval_runtime": 398.8168, |
|
"eval_samples_per_second": 4.348, |
|
"eval_steps_per_second": 0.183, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.19692307692307692, |
|
"grad_norm": 33.20243289262748, |
|
"learning_rate": 4.705882352941176e-07, |
|
"logits/chosen": -0.49464067816734314, |
|
"logits/rejected": -0.4926251173019409, |
|
"logps/chosen": -41.81456756591797, |
|
"logps/rejected": -34.730552673339844, |
|
"loss": 0.6139, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": 0.22246630489826202, |
|
"rewards/margins": 0.1789122372865677, |
|
"rewards/rejected": 0.043554067611694336, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.22153846153846155, |
|
"grad_norm": 31.414401498072234, |
|
"learning_rate": 4.99941324504621e-07, |
|
"logits/chosen": -0.5083720684051514, |
|
"logits/rejected": -0.5054486393928528, |
|
"logps/chosen": -40.7899169921875, |
|
"logps/rejected": -41.80813217163086, |
|
"loss": 0.6038, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 0.22346775233745575, |
|
"rewards/margins": 0.36676231026649475, |
|
"rewards/rejected": -0.143294557929039, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.24615384615384617, |
|
"grad_norm": 26.94475877791274, |
|
"learning_rate": 4.99472085783721e-07, |
|
"logits/chosen": -0.5102497935295105, |
|
"logits/rejected": -0.5054219961166382, |
|
"logps/chosen": -38.036109924316406, |
|
"logps/rejected": -41.01247787475586, |
|
"loss": 0.5437, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.5671411752700806, |
|
"rewards/margins": 0.6098884344100952, |
|
"rewards/rejected": -0.04274718463420868, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.24615384615384617, |
|
"eval_logits/chosen": -0.4962932765483856, |
|
"eval_logits/rejected": -0.49319401383399963, |
|
"eval_logps/chosen": -43.80841827392578, |
|
"eval_logps/rejected": -42.96743392944336, |
|
"eval_loss": 0.5607677698135376, |
|
"eval_rewards/accuracies": 0.6952054500579834, |
|
"eval_rewards/chosen": 0.6985027194023132, |
|
"eval_rewards/margins": 0.7430992722511292, |
|
"eval_rewards/rejected": -0.04459657892584801, |
|
"eval_runtime": 398.3125, |
|
"eval_samples_per_second": 4.353, |
|
"eval_steps_per_second": 0.183, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.27076923076923076, |
|
"grad_norm": 24.12367725777412, |
|
"learning_rate": 4.985344892885899e-07, |
|
"logits/chosen": -0.5035347938537598, |
|
"logits/rejected": -0.5017133951187134, |
|
"logps/chosen": -42.291107177734375, |
|
"logps/rejected": -43.623016357421875, |
|
"loss": 0.5756, |
|
"rewards/accuracies": 0.734375, |
|
"rewards/chosen": 0.6317887306213379, |
|
"rewards/margins": 0.7202781438827515, |
|
"rewards/rejected": -0.08848930895328522, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.2953846153846154, |
|
"grad_norm": 24.794972937394068, |
|
"learning_rate": 4.971302952586796e-07, |
|
"logits/chosen": -0.5023949146270752, |
|
"logits/rejected": -0.49643266201019287, |
|
"logps/chosen": -39.85929489135742, |
|
"logps/rejected": -46.9812126159668, |
|
"loss": 0.5219, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 0.8194392919540405, |
|
"rewards/margins": 1.2944234609603882, |
|
"rewards/rejected": -0.4749842882156372, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.3076923076923077, |
|
"eval_logits/chosen": -0.4914375841617584, |
|
"eval_logits/rejected": -0.4883214831352234, |
|
"eval_logps/chosen": -43.38737869262695, |
|
"eval_logps/rejected": -43.16010284423828, |
|
"eval_loss": 0.516257107257843, |
|
"eval_rewards/accuracies": 0.7397260069847107, |
|
"eval_rewards/chosen": 0.9090243577957153, |
|
"eval_rewards/margins": 1.0499579906463623, |
|
"eval_rewards/rejected": -0.14093351364135742, |
|
"eval_runtime": 397.9655, |
|
"eval_samples_per_second": 4.357, |
|
"eval_steps_per_second": 0.183, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 25.854991033804026, |
|
"learning_rate": 4.952621399215597e-07, |
|
"logits/chosen": -0.49028468132019043, |
|
"logits/rejected": -0.48677563667297363, |
|
"logps/chosen": -39.38325500488281, |
|
"logps/rejected": -30.141273498535156, |
|
"loss": 0.5432, |
|
"rewards/accuracies": 0.765625, |
|
"rewards/chosen": 0.9173061847686768, |
|
"rewards/margins": 0.6880545020103455, |
|
"rewards/rejected": 0.2292517125606537, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3446153846153846, |
|
"grad_norm": 22.546249636399228, |
|
"learning_rate": 4.929335305436764e-07, |
|
"logits/chosen": -0.4758816063404083, |
|
"logits/rejected": -0.47193384170532227, |
|
"logps/chosen": -37.941322326660156, |
|
"logps/rejected": -36.9672966003418, |
|
"loss": 0.4989, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 0.9205312132835388, |
|
"rewards/margins": 0.96444171667099, |
|
"rewards/rejected": -0.0439104288816452, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.36923076923076925, |
|
"grad_norm": 25.277496467379798, |
|
"learning_rate": 4.901488388458247e-07, |
|
"logits/chosen": -0.47848933935165405, |
|
"logits/rejected": -0.4744369089603424, |
|
"logps/chosen": -42.54041290283203, |
|
"logps/rejected": -32.181236267089844, |
|
"loss": 0.5002, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": 0.949561595916748, |
|
"rewards/margins": 0.7569789886474609, |
|
"rewards/rejected": 0.19258266687393188, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.36923076923076925, |
|
"eval_logits/chosen": -0.48240911960601807, |
|
"eval_logits/rejected": -0.47941771149635315, |
|
"eval_logps/chosen": -42.97114944458008, |
|
"eval_logps/rejected": -43.50387191772461, |
|
"eval_loss": 0.4467584788799286, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": 1.1171396970748901, |
|
"eval_rewards/margins": 1.429957389831543, |
|
"eval_rewards/rejected": -0.3128177523612976, |
|
"eval_runtime": 399.6945, |
|
"eval_samples_per_second": 4.338, |
|
"eval_steps_per_second": 0.183, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.39384615384615385, |
|
"grad_norm": 21.934080715012698, |
|
"learning_rate": 4.869132927957006e-07, |
|
"logits/chosen": -0.47030550241470337, |
|
"logits/rejected": -0.4669092893600464, |
|
"logps/chosen": -39.564884185791016, |
|
"logps/rejected": -46.268707275390625, |
|
"loss": 0.4561, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": 1.1370558738708496, |
|
"rewards/margins": 1.5885730981826782, |
|
"rewards/rejected": -0.45151710510253906, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.41846153846153844, |
|
"grad_norm": 17.9530062267105, |
|
"learning_rate": 4.832329667929376e-07, |
|
"logits/chosen": -0.48381492495536804, |
|
"logits/rejected": -0.47740432620048523, |
|
"logps/chosen": -37.79485321044922, |
|
"logps/rejected": -41.405555725097656, |
|
"loss": 0.3974, |
|
"rewards/accuracies": 0.890625, |
|
"rewards/chosen": 1.0800038576126099, |
|
"rewards/margins": 1.8046053647994995, |
|
"rewards/rejected": -0.7246013879776001, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.4307692307692308, |
|
"eval_logits/chosen": -0.4790378212928772, |
|
"eval_logits/rejected": -0.4759667217731476, |
|
"eval_logps/chosen": -42.852088928222656, |
|
"eval_logps/rejected": -43.939449310302734, |
|
"eval_loss": 0.3961867392063141, |
|
"eval_rewards/accuracies": 0.7602739930152893, |
|
"eval_rewards/chosen": 1.1766709089279175, |
|
"eval_rewards/margins": 1.7072778940200806, |
|
"eval_rewards/rejected": -0.5306068062782288, |
|
"eval_runtime": 402.2584, |
|
"eval_samples_per_second": 4.311, |
|
"eval_steps_per_second": 0.181, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.4430769230769231, |
|
"grad_norm": 18.01450637894319, |
|
"learning_rate": 4.791147702650565e-07, |
|
"logits/chosen": -0.4862886071205139, |
|
"logits/rejected": -0.48092207312583923, |
|
"logps/chosen": -40.004295349121094, |
|
"logps/rejected": -39.387386322021484, |
|
"loss": 0.4067, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.3134092092514038, |
|
"rewards/margins": 1.8237063884735107, |
|
"rewards/rejected": -0.5102972984313965, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.4676923076923077, |
|
"grad_norm": 18.54804560370934, |
|
"learning_rate": 4.745664346957361e-07, |
|
"logits/chosen": -0.46789872646331787, |
|
"logits/rejected": -0.4679996967315674, |
|
"logps/chosen": -42.09858322143555, |
|
"logps/rejected": -31.129802703857422, |
|
"loss": 0.3702, |
|
"rewards/accuracies": 0.796875, |
|
"rewards/chosen": 1.3026843070983887, |
|
"rewards/margins": 1.3334376811981201, |
|
"rewards/rejected": -0.030753374099731445, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.49230769230769234, |
|
"grad_norm": 13.439566896592863, |
|
"learning_rate": 4.695964991097616e-07, |
|
"logits/chosen": -0.449634313583374, |
|
"logits/rejected": -0.4447551965713501, |
|
"logps/chosen": -36.52521514892578, |
|
"logps/rejected": -42.37124252319336, |
|
"loss": 0.3514, |
|
"rewards/accuracies": 0.859375, |
|
"rewards/chosen": 1.1417609453201294, |
|
"rewards/margins": 1.7916193008422852, |
|
"rewards/rejected": -0.6498584747314453, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.49230769230769234, |
|
"eval_logits/chosen": -0.47975781559944153, |
|
"eval_logits/rejected": -0.47623777389526367, |
|
"eval_logps/chosen": -42.85760498046875, |
|
"eval_logps/rejected": -44.426246643066406, |
|
"eval_loss": 0.35519111156463623, |
|
"eval_rewards/accuracies": 0.7876712083816528, |
|
"eval_rewards/chosen": 1.1739110946655273, |
|
"eval_rewards/margins": 1.9479175806045532, |
|
"eval_rewards/rejected": -0.7740064859390259, |
|
"eval_runtime": 398.0514, |
|
"eval_samples_per_second": 4.356, |
|
"eval_steps_per_second": 0.183, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.5169230769230769, |
|
"grad_norm": 14.688902217061264, |
|
"learning_rate": 4.642142940418973e-07, |
|
"logits/chosen": -0.5011200904846191, |
|
"logits/rejected": -0.49806085228919983, |
|
"logps/chosen": -37.53756332397461, |
|
"logps/rejected": -43.31938171386719, |
|
"loss": 0.3629, |
|
"rewards/accuracies": 0.828125, |
|
"rewards/chosen": 1.0989573001861572, |
|
"rewards/margins": 1.9275836944580078, |
|
"rewards/rejected": -0.8286263346672058, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.5415384615384615, |
|
"grad_norm": 14.193796961040897, |
|
"learning_rate": 4.5842992401978256e-07, |
|
"logits/chosen": -0.4911167323589325, |
|
"logits/rejected": -0.4909789264202118, |
|
"logps/chosen": -46.16688537597656, |
|
"logps/rejected": -35.1140251159668, |
|
"loss": 0.316, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 1.0063791275024414, |
|
"rewards/margins": 1.4864487648010254, |
|
"rewards/rejected": -0.48006969690322876, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5538461538461539, |
|
"eval_logits/chosen": -0.472987562417984, |
|
"eval_logits/rejected": -0.46926581859588623, |
|
"eval_logps/chosen": -42.71507263183594, |
|
"eval_logps/rejected": -45.037986755371094, |
|
"eval_loss": 0.31506600975990295, |
|
"eval_rewards/accuracies": 0.801369845867157, |
|
"eval_rewards/chosen": 1.2451775074005127, |
|
"eval_rewards/margins": 2.3250503540039062, |
|
"eval_rewards/rejected": -1.0798726081848145, |
|
"eval_runtime": 398.07, |
|
"eval_samples_per_second": 4.356, |
|
"eval_steps_per_second": 0.183, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5661538461538461, |
|
"grad_norm": 11.94835713175047, |
|
"learning_rate": 4.5225424859373684e-07, |
|
"logits/chosen": -0.48655006289482117, |
|
"logits/rejected": -0.4802987575531006, |
|
"logps/chosen": -45.382694244384766, |
|
"logps/rejected": -34.198219299316406, |
|
"loss": 0.2964, |
|
"rewards/accuracies": 0.828125, |
|
"rewards/chosen": 1.3039376735687256, |
|
"rewards/margins": 1.7199229001998901, |
|
"rewards/rejected": -0.4159852862358093, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.5907692307692308, |
|
"grad_norm": 11.665118422994077, |
|
"learning_rate": 4.456988619490889e-07, |
|
"logits/chosen": -0.4525223672389984, |
|
"logits/rejected": -0.4487767517566681, |
|
"logps/chosen": -42.471214294433594, |
|
"logps/rejected": -37.61225128173828, |
|
"loss": 0.2878, |
|
"rewards/accuracies": 0.828125, |
|
"rewards/chosen": 1.1848539113998413, |
|
"rewards/margins": 1.884321928024292, |
|
"rewards/rejected": -0.6994677782058716, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": 11.208056217338184, |
|
"learning_rate": 4.3877607113930516e-07, |
|
"logits/chosen": -0.4693542420864105, |
|
"logits/rejected": -0.46645450592041016, |
|
"logps/chosen": -38.541446685791016, |
|
"logps/rejected": -28.433433532714844, |
|
"loss": 0.2786, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.3120430707931519, |
|
"rewards/margins": 1.601030945777893, |
|
"rewards/rejected": -0.2889878749847412, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"eval_logits/chosen": -0.46347275376319885, |
|
"eval_logits/rejected": -0.45981672406196594, |
|
"eval_logps/chosen": -42.53354263305664, |
|
"eval_logps/rejected": -45.56825256347656, |
|
"eval_loss": 0.28788629174232483, |
|
"eval_rewards/accuracies": 0.818493127822876, |
|
"eval_rewards/chosen": 1.335942029953003, |
|
"eval_rewards/margins": 2.6809515953063965, |
|
"eval_rewards/rejected": -1.345009684562683, |
|
"eval_runtime": 398.8881, |
|
"eval_samples_per_second": 4.347, |
|
"eval_steps_per_second": 0.183, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 12.527059053107184, |
|
"learning_rate": 4.314988729807827e-07, |
|
"logits/chosen": -0.46812835335731506, |
|
"logits/rejected": -0.4670564532279968, |
|
"logps/chosen": -39.73173904418945, |
|
"logps/rejected": -31.038818359375, |
|
"loss": 0.277, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.1246474981307983, |
|
"rewards/margins": 1.668458342552185, |
|
"rewards/rejected": -0.5438107252120972, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.6646153846153846, |
|
"grad_norm": 8.553726428740159, |
|
"learning_rate": 4.238809296526846e-07, |
|
"logits/chosen": -0.4466942548751831, |
|
"logits/rejected": -0.4432934820652008, |
|
"logps/chosen": -40.79918670654297, |
|
"logps/rejected": -43.690181732177734, |
|
"loss": 0.2475, |
|
"rewards/accuracies": 0.8125, |
|
"rewards/chosen": 1.295045256614685, |
|
"rewards/margins": 2.5195388793945312, |
|
"rewards/rejected": -1.2244938611984253, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.676923076923077, |
|
"eval_logits/chosen": -0.4640098512172699, |
|
"eval_logits/rejected": -0.4600638151168823, |
|
"eval_logps/chosen": -42.25896453857422, |
|
"eval_logps/rejected": -45.97068786621094, |
|
"eval_loss": 0.26564928889274597, |
|
"eval_rewards/accuracies": 0.818493127822876, |
|
"eval_rewards/chosen": 1.47323477268219, |
|
"eval_rewards/margins": 3.019458293914795, |
|
"eval_rewards/rejected": -1.5462236404418945, |
|
"eval_runtime": 397.9539, |
|
"eval_samples_per_second": 4.357, |
|
"eval_steps_per_second": 0.183, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.6892307692307692, |
|
"grad_norm": 10.956104922328452, |
|
"learning_rate": 4.159365430476261e-07, |
|
"logits/chosen": -0.45851805806159973, |
|
"logits/rejected": -0.4554572105407715, |
|
"logps/chosen": -39.361473083496094, |
|
"logps/rejected": -40.478187561035156, |
|
"loss": 0.2571, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.2780680656433105, |
|
"rewards/margins": 2.710892915725708, |
|
"rewards/rejected": -1.4328248500823975, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.7138461538461538, |
|
"grad_norm": 8.998450490495424, |
|
"learning_rate": 4.076806279213655e-07, |
|
"logits/chosen": -0.44970858097076416, |
|
"logits/rejected": -0.4487365484237671, |
|
"logps/chosen": -39.6558723449707, |
|
"logps/rejected": -30.797224044799805, |
|
"loss": 0.2377, |
|
"rewards/accuracies": 0.859375, |
|
"rewards/chosen": 1.423360824584961, |
|
"rewards/margins": 2.117220163345337, |
|
"rewards/rejected": -0.6938591003417969, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.7384615384615385, |
|
"grad_norm": 7.997921172873259, |
|
"learning_rate": 3.991286838919086e-07, |
|
"logits/chosen": -0.45197734236717224, |
|
"logits/rejected": -0.4494423270225525, |
|
"logps/chosen": -39.821556091308594, |
|
"logps/rejected": -30.172922134399414, |
|
"loss": 0.2308, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.3537344932556152, |
|
"rewards/margins": 2.035463571548462, |
|
"rewards/rejected": -0.6817290186882019, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.7384615384615385, |
|
"eval_logits/chosen": -0.4599209725856781, |
|
"eval_logits/rejected": -0.45611828565597534, |
|
"eval_logps/chosen": -41.97780990600586, |
|
"eval_logps/rejected": -46.275150299072266, |
|
"eval_loss": 0.25006040930747986, |
|
"eval_rewards/accuracies": 0.8219178318977356, |
|
"eval_rewards/chosen": 1.6138089895248413, |
|
"eval_rewards/margins": 3.3122646808624268, |
|
"eval_rewards/rejected": -1.698456048965454, |
|
"eval_runtime": 400.364, |
|
"eval_samples_per_second": 4.331, |
|
"eval_steps_per_second": 0.182, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.7630769230769231, |
|
"grad_norm": 8.82980837299946, |
|
"learning_rate": 3.902967663405956e-07, |
|
"logits/chosen": -0.4859851002693176, |
|
"logits/rejected": -0.48067888617515564, |
|
"logps/chosen": -41.49687194824219, |
|
"logps/rejected": -52.49277114868164, |
|
"loss": 0.2392, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.802024483680725, |
|
"rewards/margins": 3.978426456451416, |
|
"rewards/rejected": -2.1764023303985596, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.7876923076923077, |
|
"grad_norm": 9.426495089553443, |
|
"learning_rate": 3.8120145626980015e-07, |
|
"logits/chosen": -0.4271140694618225, |
|
"logits/rejected": -0.42757901549339294, |
|
"logps/chosen": -42.58659744262695, |
|
"logps/rejected": -34.0533561706543, |
|
"loss": 0.2168, |
|
"rewards/accuracies": 0.828125, |
|
"rewards/chosen": 1.4099901914596558, |
|
"rewards/margins": 2.444286346435547, |
|
"rewards/rejected": -1.0342963933944702, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_logits/chosen": -0.45980003476142883, |
|
"eval_logits/rejected": -0.4559204876422882, |
|
"eval_logps/chosen": -41.840850830078125, |
|
"eval_logps/rejected": -46.55259323120117, |
|
"eval_loss": 0.24060946702957153, |
|
"eval_rewards/accuracies": 0.8287671208381653, |
|
"eval_rewards/chosen": 1.6822888851165771, |
|
"eval_rewards/margins": 3.519465208053589, |
|
"eval_rewards/rejected": -1.8371765613555908, |
|
"eval_runtime": 400.9472, |
|
"eval_samples_per_second": 4.325, |
|
"eval_steps_per_second": 0.182, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.8123076923076923, |
|
"grad_norm": 7.070039356412563, |
|
"learning_rate": 3.718598291738298e-07, |
|
"logits/chosen": -0.44447261095046997, |
|
"logits/rejected": -0.44296732544898987, |
|
"logps/chosen": -37.90412902832031, |
|
"logps/rejected": -36.56926727294922, |
|
"loss": 0.2129, |
|
"rewards/accuracies": 0.828125, |
|
"rewards/chosen": 1.5699365139007568, |
|
"rewards/margins": 2.4697201251983643, |
|
"rewards/rejected": -0.899783730506897, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.8369230769230769, |
|
"grad_norm": 7.560682128141536, |
|
"learning_rate": 3.622894229814698e-07, |
|
"logits/chosen": -0.4488967955112457, |
|
"logits/rejected": -0.44460177421569824, |
|
"logps/chosen": -38.6946907043457, |
|
"logps/rejected": -46.51150894165039, |
|
"loss": 0.2148, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.4161229133605957, |
|
"rewards/margins": 3.171121835708618, |
|
"rewards/rejected": -1.754999041557312, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.8615384615384616, |
|
"grad_norm": 8.217469156292612, |
|
"learning_rate": 3.52508205130354e-07, |
|
"logits/chosen": -0.45258235931396484, |
|
"logits/rejected": -0.44472819566726685, |
|
"logps/chosen": -41.79392623901367, |
|
"logps/rejected": -49.990150451660156, |
|
"loss": 0.2015, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.6353414058685303, |
|
"rewards/margins": 4.414577960968018, |
|
"rewards/rejected": -2.779236316680908, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.8615384615384616, |
|
"eval_logits/chosen": -0.45598042011260986, |
|
"eval_logits/rejected": -0.4519526958465576, |
|
"eval_logps/chosen": -41.811824798583984, |
|
"eval_logps/rejected": -46.88448715209961, |
|
"eval_loss": 0.23113983869552612, |
|
"eval_rewards/accuracies": 0.835616409778595, |
|
"eval_rewards/chosen": 1.696802020072937, |
|
"eval_rewards/margins": 3.699925661087036, |
|
"eval_rewards/rejected": -2.0031237602233887, |
|
"eval_runtime": 397.814, |
|
"eval_samples_per_second": 4.359, |
|
"eval_steps_per_second": 0.184, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.8861538461538462, |
|
"grad_norm": 9.912800042727127, |
|
"learning_rate": 3.4253453883497864e-07, |
|
"logits/chosen": -0.46372586488723755, |
|
"logits/rejected": -0.45979729294776917, |
|
"logps/chosen": -38.839900970458984, |
|
"logps/rejected": -39.225364685058594, |
|
"loss": 0.1988, |
|
"rewards/accuracies": 0.890625, |
|
"rewards/chosen": 1.1858875751495361, |
|
"rewards/margins": 3.046271562576294, |
|
"rewards/rejected": -1.8603839874267578, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.9107692307692308, |
|
"grad_norm": 8.369939820458308, |
|
"learning_rate": 3.323871486116851e-07, |
|
"logits/chosen": -0.4712873697280884, |
|
"logits/rejected": -0.4673464894294739, |
|
"logps/chosen": -38.399444580078125, |
|
"logps/rejected": -31.575439453125, |
|
"loss": 0.2334, |
|
"rewards/accuracies": 0.859375, |
|
"rewards/chosen": 1.5223883390426636, |
|
"rewards/margins": 2.529682159423828, |
|
"rewards/rejected": -1.0072938203811646, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.9230769230769231, |
|
"eval_logits/chosen": -0.4527950584888458, |
|
"eval_logits/rejected": -0.44881972670555115, |
|
"eval_logps/chosen": -41.899085998535156, |
|
"eval_logps/rejected": -47.179649353027344, |
|
"eval_loss": 0.2244771271944046, |
|
"eval_rewards/accuracies": 0.8424657583236694, |
|
"eval_rewards/chosen": 1.6531713008880615, |
|
"eval_rewards/margins": 3.803877830505371, |
|
"eval_rewards/rejected": -2.1507065296173096, |
|
"eval_runtime": 398.9461, |
|
"eval_samples_per_second": 4.346, |
|
"eval_steps_per_second": 0.183, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.9353846153846154, |
|
"grad_norm": 9.041284945416068, |
|
"learning_rate": 3.220850851253377e-07, |
|
"logits/chosen": -0.48120999336242676, |
|
"logits/rejected": -0.476688027381897, |
|
"logps/chosen": -41.46022415161133, |
|
"logps/rejected": -46.435829162597656, |
|
"loss": 0.232, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.5242944955825806, |
|
"rewards/margins": 3.8246002197265625, |
|
"rewards/rejected": -2.3003056049346924, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 5.93400233545759, |
|
"learning_rate": 3.1164768942369053e-07, |
|
"logits/chosen": -0.4919479787349701, |
|
"logits/rejected": -0.4866538941860199, |
|
"logps/chosen": -35.453460693359375, |
|
"logps/rejected": -63.363746643066406, |
|
"loss": 0.2211, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.4180715084075928, |
|
"rewards/margins": 4.756035804748535, |
|
"rewards/rejected": -3.3379647731781006, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.9846153846153847, |
|
"grad_norm": 6.808355501392862, |
|
"learning_rate": 3.010945566265912e-07, |
|
"logits/chosen": -0.4714447855949402, |
|
"logits/rejected": -0.46636244654655457, |
|
"logps/chosen": -44.541507720947266, |
|
"logps/rejected": -33.366180419921875, |
|
"loss": 0.2075, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.5286719799041748, |
|
"rewards/margins": 2.761920928955078, |
|
"rewards/rejected": -1.233249306678772, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.9846153846153847, |
|
"eval_logits/chosen": -0.45233336091041565, |
|
"eval_logits/rejected": -0.44827964901924133, |
|
"eval_logps/chosen": -41.91486740112305, |
|
"eval_logps/rejected": -47.406375885009766, |
|
"eval_loss": 0.2178829461336136, |
|
"eval_rewards/accuracies": 0.8458904027938843, |
|
"eval_rewards/chosen": 1.645279884338379, |
|
"eval_rewards/margins": 3.909348964691162, |
|
"eval_rewards/rejected": -2.2640695571899414, |
|
"eval_runtime": 398.4802, |
|
"eval_samples_per_second": 4.352, |
|
"eval_steps_per_second": 0.183, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.0092307692307692, |
|
"grad_norm": 7.772486195689839, |
|
"learning_rate": 2.9044549913819124e-07, |
|
"logits/chosen": -0.4541618227958679, |
|
"logits/rejected": -0.4516042470932007, |
|
"logps/chosen": -42.46653747558594, |
|
"logps/rejected": -42.07926940917969, |
|
"loss": 0.1903, |
|
"rewards/accuracies": 0.890625, |
|
"rewards/chosen": 1.5551599264144897, |
|
"rewards/margins": 3.8793962001800537, |
|
"rewards/rejected": -2.3242363929748535, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.0338461538461539, |
|
"grad_norm": 5.782441199522606, |
|
"learning_rate": 2.797205094512266e-07, |
|
"logits/chosen": -0.43424034118652344, |
|
"logits/rejected": -0.42985162138938904, |
|
"logps/chosen": -41.127593994140625, |
|
"logps/rejected": -45.52932357788086, |
|
"loss": 0.1885, |
|
"rewards/accuracies": 0.890625, |
|
"rewards/chosen": 1.399495005607605, |
|
"rewards/margins": 3.8588943481445312, |
|
"rewards/rejected": -2.4593992233276367, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.0461538461538462, |
|
"eval_logits/chosen": -0.45614686608314514, |
|
"eval_logits/rejected": -0.45181941986083984, |
|
"eval_logps/chosen": -41.860015869140625, |
|
"eval_logps/rejected": -47.55991744995117, |
|
"eval_loss": 0.21262364089488983, |
|
"eval_rewards/accuracies": 0.8493150472640991, |
|
"eval_rewards/chosen": 1.6727051734924316, |
|
"eval_rewards/margins": 4.013547897338867, |
|
"eval_rewards/rejected": -2.3408427238464355, |
|
"eval_runtime": 401.4695, |
|
"eval_samples_per_second": 4.319, |
|
"eval_steps_per_second": 0.182, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.0584615384615386, |
|
"grad_norm": 4.97735971019092, |
|
"learning_rate": 2.6893972261320264e-07, |
|
"logits/chosen": -0.42422690987586975, |
|
"logits/rejected": -0.4213142693042755, |
|
"logps/chosen": -39.9366340637207, |
|
"logps/rejected": -40.46057891845703, |
|
"loss": 0.1749, |
|
"rewards/accuracies": 0.953125, |
|
"rewards/chosen": 1.7523412704467773, |
|
"rewards/margins": 3.447618007659912, |
|
"rewards/rejected": -1.6952769756317139, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.083076923076923, |
|
"grad_norm": 5.4006356704238465, |
|
"learning_rate": 2.5812337842494516e-07, |
|
"logits/chosen": -0.4536130726337433, |
|
"logits/rejected": -0.45070070028305054, |
|
"logps/chosen": -41.13835906982422, |
|
"logps/rejected": -36.39931106567383, |
|
"loss": 0.1732, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.473576545715332, |
|
"rewards/margins": 3.01824688911438, |
|
"rewards/rejected": -1.5446703433990479, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.1076923076923078, |
|
"grad_norm": 5.007052371554692, |
|
"learning_rate": 2.4729178344249006e-07, |
|
"logits/chosen": -0.45735257863998413, |
|
"logits/rejected": -0.45181047916412354, |
|
"logps/chosen": -39.739837646484375, |
|
"logps/rejected": -40.88286590576172, |
|
"loss": 0.1838, |
|
"rewards/accuracies": 0.859375, |
|
"rewards/chosen": 1.5848816633224487, |
|
"rewards/margins": 3.2226974964141846, |
|
"rewards/rejected": -1.6378157138824463, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.1076923076923078, |
|
"eval_logits/chosen": -0.44746431708335876, |
|
"eval_logits/rejected": -0.44344425201416016, |
|
"eval_logps/chosen": -41.768062591552734, |
|
"eval_logps/rejected": -47.707096099853516, |
|
"eval_loss": 0.20802097022533417, |
|
"eval_rewards/accuracies": 0.8527397513389587, |
|
"eval_rewards/chosen": 1.7186800241470337, |
|
"eval_rewards/margins": 4.133108615875244, |
|
"eval_rewards/rejected": -2.414428472518921, |
|
"eval_runtime": 398.368, |
|
"eval_samples_per_second": 4.353, |
|
"eval_steps_per_second": 0.183, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.1323076923076922, |
|
"grad_norm": 5.422550680740592, |
|
"learning_rate": 2.3646527285364563e-07, |
|
"logits/chosen": -0.45245465636253357, |
|
"logits/rejected": -0.4461210072040558, |
|
"logps/chosen": -40.16954040527344, |
|
"logps/rejected": -40.06535339355469, |
|
"loss": 0.1908, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.9867323637008667, |
|
"rewards/margins": 3.7591819763183594, |
|
"rewards/rejected": -1.7724494934082031, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.156923076923077, |
|
"grad_norm": 5.242542300523203, |
|
"learning_rate": 2.256641723008026e-07, |
|
"logits/chosen": -0.4320615530014038, |
|
"logits/rejected": -0.4263082444667816, |
|
"logps/chosen": -37.380733489990234, |
|
"logps/rejected": -34.89535903930664, |
|
"loss": 0.2062, |
|
"rewards/accuracies": 0.859375, |
|
"rewards/chosen": 1.6694892644882202, |
|
"rewards/margins": 3.161728858947754, |
|
"rewards/rejected": -1.4922394752502441, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.1692307692307693, |
|
"eval_logits/chosen": -0.4461810886859894, |
|
"eval_logits/rejected": -0.44209808111190796, |
|
"eval_logps/chosen": -41.719635009765625, |
|
"eval_logps/rejected": -47.83554458618164, |
|
"eval_loss": 0.20408523082733154, |
|
"eval_rewards/accuracies": 0.8595890402793884, |
|
"eval_rewards/chosen": 1.7428948879241943, |
|
"eval_rewards/margins": 4.221549987792969, |
|
"eval_rewards/rejected": -2.4786548614501953, |
|
"eval_runtime": 399.3763, |
|
"eval_samples_per_second": 4.342, |
|
"eval_steps_per_second": 0.183, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.1815384615384614, |
|
"grad_norm": 5.9451928875167805, |
|
"learning_rate": 2.1490875972166393e-07, |
|
"logits/chosen": -0.471175879240036, |
|
"logits/rejected": -0.46583351492881775, |
|
"logps/chosen": -34.772342681884766, |
|
"logps/rejected": -39.63105392456055, |
|
"loss": 0.1776, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.5616707801818848, |
|
"rewards/margins": 3.4220144748687744, |
|
"rewards/rejected": -1.8603436946868896, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.2061538461538461, |
|
"grad_norm": 5.4858414240825075, |
|
"learning_rate": 2.0421922727953595e-07, |
|
"logits/chosen": -0.4286724925041199, |
|
"logits/rejected": -0.4263175427913666, |
|
"logps/chosen": -36.57586669921875, |
|
"logps/rejected": -31.930763244628906, |
|
"loss": 0.1655, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.753572940826416, |
|
"rewards/margins": 2.9586939811706543, |
|
"rewards/rejected": -1.2051211595535278, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.2307692307692308, |
|
"grad_norm": 5.720592020809117, |
|
"learning_rate": 1.9361564345465145e-07, |
|
"logits/chosen": -0.4527323842048645, |
|
"logits/rejected": -0.44913169741630554, |
|
"logps/chosen": -34.954219818115234, |
|
"logps/rejected": -37.62247085571289, |
|
"loss": 0.1794, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.6976581811904907, |
|
"rewards/margins": 3.37191104888916, |
|
"rewards/rejected": -1.674253225326538, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.2307692307692308, |
|
"eval_logits/chosen": -0.446783185005188, |
|
"eval_logits/rejected": -0.44245585799217224, |
|
"eval_logps/chosen": -41.71480178833008, |
|
"eval_logps/rejected": -47.994571685791016, |
|
"eval_loss": 0.20066353678703308, |
|
"eval_rewards/accuracies": 0.8561643958091736, |
|
"eval_rewards/chosen": 1.745313048362732, |
|
"eval_rewards/margins": 4.30348014831543, |
|
"eval_rewards/rejected": -2.558166742324829, |
|
"eval_runtime": 400.8371, |
|
"eval_samples_per_second": 4.326, |
|
"eval_steps_per_second": 0.182, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.2553846153846153, |
|
"grad_norm": 5.495318529151337, |
|
"learning_rate": 1.8311791536769483e-07, |
|
"logits/chosen": -0.4154907763004303, |
|
"logits/rejected": -0.41147375106811523, |
|
"logps/chosen": -37.79768753051758, |
|
"logps/rejected": -48.32915496826172, |
|
"loss": 0.1734, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.4379969835281372, |
|
"rewards/margins": 4.379321098327637, |
|
"rewards/rejected": -2.94132399559021, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 5.371136709550833, |
|
"learning_rate": 1.7274575140626315e-07, |
|
"logits/chosen": -0.44362062215805054, |
|
"logits/rejected": -0.4401937425136566, |
|
"logps/chosen": -38.68498992919922, |
|
"logps/rejected": -41.304710388183594, |
|
"loss": 0.1857, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.9046180248260498, |
|
"rewards/margins": 3.721491813659668, |
|
"rewards/rejected": -1.8168736696243286, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 1.2923076923076924, |
|
"eval_logits/chosen": -0.44231978058815, |
|
"eval_logits/rejected": -0.43791159987449646, |
|
"eval_logps/chosen": -41.76345443725586, |
|
"eval_logps/rejected": -48.174468994140625, |
|
"eval_loss": 0.1971779763698578, |
|
"eval_rewards/accuracies": 0.8595890402793884, |
|
"eval_rewards/chosen": 1.7209877967834473, |
|
"eval_rewards/margins": 4.369105339050293, |
|
"eval_rewards/rejected": -2.6481170654296875, |
|
"eval_runtime": 400.535, |
|
"eval_samples_per_second": 4.329, |
|
"eval_steps_per_second": 0.182, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 1.3046153846153845, |
|
"grad_norm": 5.888788259237849, |
|
"learning_rate": 1.6251862422442788e-07, |
|
"logits/chosen": -0.4513101577758789, |
|
"logits/rejected": -0.4436649978160858, |
|
"logps/chosen": -40.830169677734375, |
|
"logps/rejected": -56.550262451171875, |
|
"loss": 0.1686, |
|
"rewards/accuracies": 0.890625, |
|
"rewards/chosen": 1.6578915119171143, |
|
"rewards/margins": 4.946527481079102, |
|
"rewards/rejected": -3.2886364459991455, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 1.3292307692307692, |
|
"grad_norm": 6.612486006770989, |
|
"learning_rate": 1.5245573418486135e-07, |
|
"logits/chosen": -0.44994574785232544, |
|
"logits/rejected": -0.4477950930595398, |
|
"logps/chosen": -38.80488586425781, |
|
"logps/rejected": -41.393211364746094, |
|
"loss": 0.1875, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.4876614809036255, |
|
"rewards/margins": 3.6893627643585205, |
|
"rewards/rejected": -2.2017014026641846, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 1.353846153846154, |
|
"grad_norm": 4.656660129291058, |
|
"learning_rate": 1.4257597331216208e-07, |
|
"logits/chosen": -0.4406875669956207, |
|
"logits/rejected": -0.43555599451065063, |
|
"logps/chosen": -42.75905990600586, |
|
"logps/rejected": -54.37929153442383, |
|
"loss": 0.1647, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 1.7431519031524658, |
|
"rewards/margins": 5.2682318687438965, |
|
"rewards/rejected": -3.5250799655914307, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.353846153846154, |
|
"eval_logits/chosen": -0.44183987379074097, |
|
"eval_logits/rejected": -0.4375387728214264, |
|
"eval_logps/chosen": -41.78536605834961, |
|
"eval_logps/rejected": -48.30013656616211, |
|
"eval_loss": 0.19461464881896973, |
|
"eval_rewards/accuracies": 0.8595890402793884, |
|
"eval_rewards/chosen": 1.710031509399414, |
|
"eval_rewards/margins": 4.420979976654053, |
|
"eval_rewards/rejected": -2.7109484672546387, |
|
"eval_runtime": 398.8533, |
|
"eval_samples_per_second": 4.347, |
|
"eval_steps_per_second": 0.183, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.3784615384615384, |
|
"grad_norm": 4.429475200922326, |
|
"learning_rate": 1.328978898250525e-07, |
|
"logits/chosen": -0.45591893792152405, |
|
"logits/rejected": -0.45284152030944824, |
|
"logps/chosen": -37.928653717041016, |
|
"logps/rejected": -38.99214172363281, |
|
"loss": 0.1596, |
|
"rewards/accuracies": 0.953125, |
|
"rewards/chosen": 1.702134609222412, |
|
"rewards/margins": 3.827345371246338, |
|
"rewards/rejected": -2.125210762023926, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 1.403076923076923, |
|
"grad_norm": 4.278591337865169, |
|
"learning_rate": 1.234396533140365e-07, |
|
"logits/chosen": -0.42062410712242126, |
|
"logits/rejected": -0.41781002283096313, |
|
"logps/chosen": -41.77610778808594, |
|
"logps/rejected": -48.5487060546875, |
|
"loss": 0.1524, |
|
"rewards/accuracies": 0.890625, |
|
"rewards/chosen": 1.599095106124878, |
|
"rewards/margins": 4.177494049072266, |
|
"rewards/rejected": -2.578399181365967, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 1.4153846153846155, |
|
"eval_logits/chosen": -0.44146084785461426, |
|
"eval_logits/rejected": -0.437191367149353, |
|
"eval_logps/chosen": -41.83814239501953, |
|
"eval_logps/rejected": -48.426334381103516, |
|
"eval_loss": 0.1936241090297699, |
|
"eval_rewards/accuracies": 0.8561643958091736, |
|
"eval_rewards/chosen": 1.6836414337158203, |
|
"eval_rewards/margins": 4.457688331604004, |
|
"eval_rewards/rejected": -2.7740468978881836, |
|
"eval_runtime": 398.1677, |
|
"eval_samples_per_second": 4.355, |
|
"eval_steps_per_second": 0.183, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 1.4276923076923076, |
|
"grad_norm": 4.8647599997437805, |
|
"learning_rate": 1.1421902062989178e-07, |
|
"logits/chosen": -0.44749611616134644, |
|
"logits/rejected": -0.4411754608154297, |
|
"logps/chosen": -43.03426742553711, |
|
"logps/rejected": -60.566200256347656, |
|
"loss": 0.1551, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.691102147102356, |
|
"rewards/margins": 5.743535041809082, |
|
"rewards/rejected": -4.052433013916016, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 1.4523076923076923, |
|
"grad_norm": 4.32184296596162, |
|
"learning_rate": 1.0525330254703788e-07, |
|
"logits/chosen": -0.4085264205932617, |
|
"logits/rejected": -0.4047048091888428, |
|
"logps/chosen": -40.241764068603516, |
|
"logps/rejected": -48.12974166870117, |
|
"loss": 0.1681, |
|
"rewards/accuracies": 0.796875, |
|
"rewards/chosen": 1.730395793914795, |
|
"rewards/margins": 4.292797088623047, |
|
"rewards/rejected": -2.5624003410339355, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 1.476923076923077, |
|
"grad_norm": 6.544542585214649, |
|
"learning_rate": 9.655933126436563e-08, |
|
"logits/chosen": -0.4505607783794403, |
|
"logits/rejected": -0.44797009229660034, |
|
"logps/chosen": -42.54741668701172, |
|
"logps/rejected": -34.47932052612305, |
|
"loss": 0.1668, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.5434067249298096, |
|
"rewards/margins": 3.265448570251465, |
|
"rewards/rejected": -1.7220419645309448, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.476923076923077, |
|
"eval_logits/chosen": -0.4385281801223755, |
|
"eval_logits/rejected": -0.43417835235595703, |
|
"eval_logps/chosen": -41.83414077758789, |
|
"eval_logps/rejected": -48.51993179321289, |
|
"eval_loss": 0.19258952140808105, |
|
"eval_rewards/accuracies": 0.8527397513389587, |
|
"eval_rewards/chosen": 1.6856427192687988, |
|
"eval_rewards/margins": 4.5064897537231445, |
|
"eval_rewards/rejected": -2.8208470344543457, |
|
"eval_runtime": 398.4093, |
|
"eval_samples_per_second": 4.352, |
|
"eval_steps_per_second": 0.183, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.5015384615384615, |
|
"grad_norm": 3.8817405874572923, |
|
"learning_rate": 8.81534288045431e-08, |
|
"logits/chosen": -0.4345233738422394, |
|
"logits/rejected": -0.432235985994339, |
|
"logps/chosen": -38.9439697265625, |
|
"logps/rejected": -40.60162353515625, |
|
"loss": 0.155, |
|
"rewards/accuracies": 0.875, |
|
"rewards/chosen": 1.4539457559585571, |
|
"rewards/margins": 3.9496073722839355, |
|
"rewards/rejected": -2.4956612586975098, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 1.5261538461538462, |
|
"grad_norm": 4.9773505616027425, |
|
"learning_rate": 8.005137637112302e-08, |
|
"logits/chosen": -0.42935946583747864, |
|
"logits/rejected": -0.4225008487701416, |
|
"logps/chosen": -40.33964157104492, |
|
"logps/rejected": -37.91059875488281, |
|
"loss": 0.1685, |
|
"rewards/accuracies": 0.90625, |
|
"rewards/chosen": 1.3645392656326294, |
|
"rewards/margins": 3.7918734550476074, |
|
"rewards/rejected": -2.4273343086242676, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 1.5384615384615383, |
|
"eval_logits/chosen": -0.4404720067977905, |
|
"eval_logits/rejected": -0.4361659288406372, |
|
"eval_logps/chosen": -41.75288391113281, |
|
"eval_logps/rejected": -48.5331916809082, |
|
"eval_loss": 0.19096876680850983, |
|
"eval_rewards/accuracies": 0.8561643958091736, |
|
"eval_rewards/chosen": 1.7262725830078125, |
|
"eval_rewards/margins": 4.553750991821289, |
|
"eval_rewards/rejected": -2.8274781703948975, |
|
"eval_runtime": 397.8296, |
|
"eval_samples_per_second": 4.359, |
|
"eval_steps_per_second": 0.183, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 1.5507692307692307, |
|
"grad_norm": 3.816285562997914, |
|
"learning_rate": 7.226838472098237e-08, |
|
"logits/chosen": -0.4305260181427002, |
|
"logits/rejected": -0.427184522151947, |
|
"logps/chosen": -38.50707244873047, |
|
"logps/rejected": -31.813642501831055, |
|
"loss": 0.1785, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 1.653688669204712, |
|
"rewards/margins": 3.0635831356048584, |
|
"rewards/rejected": -1.4098942279815674, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 1.5753846153846154, |
|
"grad_norm": 5.070564484576485, |
|
"learning_rate": 6.481906560771524e-08, |
|
"logits/chosen": -0.45029404759407043, |
|
"logits/rejected": -0.44603830575942993, |
|
"logps/chosen": -40.4008674621582, |
|
"logps/rejected": -34.62392044067383, |
|
"loss": 0.162, |
|
"rewards/accuracies": 0.890625, |
|
"rewards/chosen": 1.847829818725586, |
|
"rewards/margins": 3.5869626998901367, |
|
"rewards/rejected": -1.7391327619552612, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 4.291727231427144, |
|
"learning_rate": 5.771740434959277e-08, |
|
"logits/chosen": -0.43359375, |
|
"logits/rejected": -0.4286433160305023, |
|
"logps/chosen": -38.0141716003418, |
|
"logps/rejected": -41.48249053955078, |
|
"loss": 0.1637, |
|
"rewards/accuracies": 0.828125, |
|
"rewards/chosen": 1.5690267086029053, |
|
"rewards/margins": 3.8614985942840576, |
|
"rewards/rejected": -2.2924716472625732, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_logits/chosen": -0.43712207674980164, |
|
"eval_logits/rejected": -0.43287885189056396, |
|
"eval_logps/chosen": -41.706233978271484, |
|
"eval_logps/rejected": -48.57079315185547, |
|
"eval_loss": 0.18982651829719543, |
|
"eval_rewards/accuracies": 0.8595890402793884, |
|
"eval_rewards/chosen": 1.7495962381362915, |
|
"eval_rewards/margins": 4.5958757400512695, |
|
"eval_rewards/rejected": -2.8462791442871094, |
|
"eval_runtime": 399.3255, |
|
"eval_samples_per_second": 4.342, |
|
"eval_steps_per_second": 0.183, |
|
"step": 130 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 162, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|