|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9988998899889989, |
|
"eval_steps": 100, |
|
"global_step": 454, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.0869565217391303e-08, |
|
"logits/chosen": -3.1411595344543457, |
|
"logits/rejected": -3.165358543395996, |
|
"logps/chosen": -646.6878662109375, |
|
"logps/rejected": -403.175048828125, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.0869565217391303e-07, |
|
"logits/chosen": -3.141064167022705, |
|
"logits/rejected": -3.1610403060913086, |
|
"logps/chosen": -401.3539123535156, |
|
"logps/rejected": -352.9660339355469, |
|
"loss": 0.6892, |
|
"rewards/accuracies": 0.5277777910232544, |
|
"rewards/chosen": 0.00452665239572525, |
|
"rewards/margins": 0.008447266183793545, |
|
"rewards/rejected": -0.003920613322407007, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 2.1739130434782607e-07, |
|
"logits/chosen": -3.1017208099365234, |
|
"logits/rejected": -3.138807773590088, |
|
"logps/chosen": -500.6651916503906, |
|
"logps/rejected": -493.1768493652344, |
|
"loss": 0.575, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.0947389230132103, |
|
"rewards/margins": 0.26824384927749634, |
|
"rewards/rejected": -0.17350491881370544, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 3.260869565217391e-07, |
|
"logits/chosen": -3.1067874431610107, |
|
"logits/rejected": -3.1405322551727295, |
|
"logps/chosen": -480.84271240234375, |
|
"logps/rejected": -494.9310607910156, |
|
"loss": 0.2943, |
|
"rewards/accuracies": 0.9375, |
|
"rewards/chosen": 0.1479349583387375, |
|
"rewards/margins": 1.3809252977371216, |
|
"rewards/rejected": -1.2329903841018677, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.3478260869565214e-07, |
|
"logits/chosen": -3.101059913635254, |
|
"logits/rejected": -3.1369175910949707, |
|
"logps/chosen": -513.6007690429688, |
|
"logps/rejected": -482.24749755859375, |
|
"loss": 0.1427, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": -0.13470908999443054, |
|
"rewards/margins": 3.3313612937927246, |
|
"rewards/rejected": -3.4660701751708984, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.998814299283415e-07, |
|
"logits/chosen": -3.0929319858551025, |
|
"logits/rejected": -3.12284779548645, |
|
"logps/chosen": -425.8465881347656, |
|
"logps/rejected": -536.5078125, |
|
"loss": 0.1082, |
|
"rewards/accuracies": 0.96875, |
|
"rewards/chosen": -0.6660862565040588, |
|
"rewards/margins": 4.888504981994629, |
|
"rewards/rejected": -5.554592132568359, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.985488079432037e-07, |
|
"logits/chosen": -3.074950933456421, |
|
"logits/rejected": -3.1008801460266113, |
|
"logps/chosen": -448.5619201660156, |
|
"logps/rejected": -580.5223388671875, |
|
"loss": 0.0652, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -0.6906077861785889, |
|
"rewards/margins": 6.580750465393066, |
|
"rewards/rejected": -7.271357536315918, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.957432749209755e-07, |
|
"logits/chosen": -3.050081491470337, |
|
"logits/rejected": -3.0804741382598877, |
|
"logps/chosen": -531.9468994140625, |
|
"logps/rejected": -544.5992431640625, |
|
"loss": 0.0433, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -0.3599981665611267, |
|
"rewards/margins": 7.281102180480957, |
|
"rewards/rejected": -7.6411004066467285, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.91481456572267e-07, |
|
"logits/chosen": -3.0391383171081543, |
|
"logits/rejected": -3.078916072845459, |
|
"logps/chosen": -484.3565368652344, |
|
"logps/rejected": -530.3445434570312, |
|
"loss": 0.0331, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.578689455986023, |
|
"rewards/margins": 8.04452896118164, |
|
"rewards/rejected": -8.62321662902832, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 4.857886086178193e-07, |
|
"logits/chosen": -3.023233652114868, |
|
"logits/rejected": -3.063034772872925, |
|
"logps/chosen": -486.5849609375, |
|
"logps/rejected": -520.8024291992188, |
|
"loss": 0.0323, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": -0.9276763200759888, |
|
"rewards/margins": 8.495800971984863, |
|
"rewards/rejected": -9.423477172851562, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 4.786984671220053e-07, |
|
"logits/chosen": -3.0411081314086914, |
|
"logits/rejected": -3.0742766857147217, |
|
"logps/chosen": -500.1527404785156, |
|
"logps/rejected": -571.0709838867188, |
|
"loss": 0.03, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -0.9822515249252319, |
|
"rewards/margins": 9.350059509277344, |
|
"rewards/rejected": -10.332311630249023, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_logits/chosen": -3.0315322875976562, |
|
"eval_logits/rejected": -3.0753097534179688, |
|
"eval_logps/chosen": -508.1890563964844, |
|
"eval_logps/rejected": -524.89404296875, |
|
"eval_loss": 0.025990812107920647, |
|
"eval_rewards/accuracies": 0.9913366436958313, |
|
"eval_rewards/chosen": -0.9739615321159363, |
|
"eval_rewards/margins": 8.889510154724121, |
|
"eval_rewards/rejected": -9.863470077514648, |
|
"eval_runtime": 1403.5729, |
|
"eval_samples_per_second": 4.605, |
|
"eval_steps_per_second": 0.144, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.702530485714461e-07, |
|
"logits/chosen": -3.0334174633026123, |
|
"logits/rejected": -3.067246913909912, |
|
"logps/chosen": -483.8785095214844, |
|
"logps/rejected": -502.6128845214844, |
|
"loss": 0.022, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -0.67408287525177, |
|
"rewards/margins": 8.81060791015625, |
|
"rewards/rejected": -9.484691619873047, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.605024008834863e-07, |
|
"logits/chosen": -3.015190601348877, |
|
"logits/rejected": -3.060077667236328, |
|
"logps/chosen": -545.3342895507812, |
|
"logps/rejected": -669.9097290039062, |
|
"loss": 0.025, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -0.818019688129425, |
|
"rewards/margins": 9.867290496826172, |
|
"rewards/rejected": -10.685310363769531, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.495043068200599e-07, |
|
"logits/chosen": -3.0233702659606934, |
|
"logits/rejected": -3.066175937652588, |
|
"logps/chosen": -504.80859375, |
|
"logps/rejected": -587.1600341796875, |
|
"loss": 0.0278, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": -1.0258172750473022, |
|
"rewards/margins": 9.689732551574707, |
|
"rewards/rejected": -10.715550422668457, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.373239415645323e-07, |
|
"logits/chosen": -3.010415554046631, |
|
"logits/rejected": -3.0565664768218994, |
|
"logps/chosen": -552.8411254882812, |
|
"logps/rejected": -613.7688598632812, |
|
"loss": 0.0255, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.403742790222168, |
|
"rewards/margins": 10.52885627746582, |
|
"rewards/rejected": -11.932600021362305, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.2403348649073167e-07, |
|
"logits/chosen": -3.017612934112549, |
|
"logits/rejected": -3.050736665725708, |
|
"logps/chosen": -428.8033752441406, |
|
"logps/rejected": -508.48492431640625, |
|
"loss": 0.0141, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.4764821529388428, |
|
"rewards/margins": 9.42202091217041, |
|
"rewards/rejected": -10.898504257202148, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.097117014129903e-07, |
|
"logits/chosen": -3.011472225189209, |
|
"logits/rejected": -3.0492606163024902, |
|
"logps/chosen": -469.72100830078125, |
|
"logps/rejected": -583.81787109375, |
|
"loss": 0.0165, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.3786561489105225, |
|
"rewards/margins": 10.20110034942627, |
|
"rewards/rejected": -11.579755783081055, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 3.944434578520628e-07, |
|
"logits/chosen": -3.000749111175537, |
|
"logits/rejected": -3.0481386184692383, |
|
"logps/chosen": -480.67205810546875, |
|
"logps/rejected": -470.3484802246094, |
|
"loss": 0.0168, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.5696184635162354, |
|
"rewards/margins": 10.106954574584961, |
|
"rewards/rejected": -11.676572799682617, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.7831923608280514e-07, |
|
"logits/chosen": -2.9912686347961426, |
|
"logits/rejected": -3.0422778129577637, |
|
"logps/chosen": -507.1240234375, |
|
"logps/rejected": -651.5630493164062, |
|
"loss": 0.0218, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -1.5567066669464111, |
|
"rewards/margins": 11.426664352416992, |
|
"rewards/rejected": -12.983370780944824, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.614345889441346e-07, |
|
"logits/chosen": -2.9957118034362793, |
|
"logits/rejected": -3.0390748977661133, |
|
"logps/chosen": -489.7295837402344, |
|
"logps/rejected": -562.1702880859375, |
|
"loss": 0.0134, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.4575971364974976, |
|
"rewards/margins": 10.89696979522705, |
|
"rewards/rejected": -12.354567527770996, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4388957558875316e-07, |
|
"logits/chosen": -2.993417739868164, |
|
"logits/rejected": -3.037677526473999, |
|
"logps/chosen": -493.8095703125, |
|
"logps/rejected": -655.7631225585938, |
|
"loss": 0.0184, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.747943639755249, |
|
"rewards/margins": 11.710515022277832, |
|
"rewards/rejected": -13.458457946777344, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_logits/chosen": -2.987339735031128, |
|
"eval_logits/rejected": -3.0349130630493164, |
|
"eval_logps/chosen": -515.6115112304688, |
|
"eval_logps/rejected": -551.03173828125, |
|
"eval_loss": 0.016356179490685463, |
|
"eval_rewards/accuracies": 0.9925742745399475, |
|
"eval_rewards/chosen": -1.716205358505249, |
|
"eval_rewards/margins": 10.761037826538086, |
|
"eval_rewards/rejected": -12.47724437713623, |
|
"eval_runtime": 1400.2317, |
|
"eval_samples_per_second": 4.616, |
|
"eval_steps_per_second": 0.144, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 3.2578816852826086e-07, |
|
"logits/chosen": -2.9862663745880127, |
|
"logits/rejected": -3.0257186889648438, |
|
"logps/chosen": -524.8349609375, |
|
"logps/rejected": -609.6709594726562, |
|
"loss": 0.0122, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.566489338874817, |
|
"rewards/margins": 11.642340660095215, |
|
"rewards/rejected": -13.208829879760742, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.072376374875335e-07, |
|
"logits/chosen": -2.9962568283081055, |
|
"logits/rejected": -3.0320611000061035, |
|
"logps/chosen": -542.3546142578125, |
|
"logps/rejected": -589.4747314453125, |
|
"loss": 0.0151, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -1.8075675964355469, |
|
"rewards/margins": 11.104395866394043, |
|
"rewards/rejected": -12.911964416503906, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 2.883479137196714e-07, |
|
"logits/chosen": -2.978184700012207, |
|
"logits/rejected": -3.021023750305176, |
|
"logps/chosen": -529.6023559570312, |
|
"logps/rejected": -625.8490600585938, |
|
"loss": 0.0172, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.9884437322616577, |
|
"rewards/margins": 11.636505126953125, |
|
"rewards/rejected": -13.624948501586914, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 2.6923093854861593e-07, |
|
"logits/chosen": -2.9831950664520264, |
|
"logits/rejected": -3.0283496379852295, |
|
"logps/chosen": -533.4076538085938, |
|
"logps/rejected": -628.0453491210938, |
|
"loss": 0.0116, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -2.244208574295044, |
|
"rewards/margins": 11.818167686462402, |
|
"rewards/rejected": -14.062376022338867, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 2.5e-07, |
|
"logits/chosen": -2.977255344390869, |
|
"logits/rejected": -3.029664993286133, |
|
"logps/chosen": -528.9208984375, |
|
"logps/rejected": -582.897216796875, |
|
"loss": 0.0166, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -2.21980357170105, |
|
"rewards/margins": 11.299016952514648, |
|
"rewards/rejected": -13.518819808959961, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 2.3076906145138405e-07, |
|
"logits/chosen": -2.9897754192352295, |
|
"logits/rejected": -3.0283143520355225, |
|
"logps/chosen": -511.42620849609375, |
|
"logps/rejected": -596.39208984375, |
|
"loss": 0.0136, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -2.4839510917663574, |
|
"rewards/margins": 11.071741104125977, |
|
"rewards/rejected": -13.555691719055176, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.1165208628032861e-07, |
|
"logits/chosen": -3.0000545978546143, |
|
"logits/rejected": -3.036176919937134, |
|
"logps/chosen": -471.12225341796875, |
|
"logps/rejected": -500.8485412597656, |
|
"loss": 0.0206, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.9776273965835571, |
|
"rewards/margins": 10.890663146972656, |
|
"rewards/rejected": -12.86828899383545, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.9276236251246653e-07, |
|
"logits/chosen": -2.978591203689575, |
|
"logits/rejected": -3.018188714981079, |
|
"logps/chosen": -539.3682861328125, |
|
"logps/rejected": -672.0533447265625, |
|
"loss": 0.0161, |
|
"rewards/accuracies": 0.981249988079071, |
|
"rewards/chosen": -1.7842662334442139, |
|
"rewards/margins": 12.722532272338867, |
|
"rewards/rejected": -14.506797790527344, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.742118314717391e-07, |
|
"logits/chosen": -2.9845988750457764, |
|
"logits/rejected": -3.0236034393310547, |
|
"logps/chosen": -565.0050659179688, |
|
"logps/rejected": -644.7886962890625, |
|
"loss": 0.0148, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.4795279502868652, |
|
"rewards/margins": 12.094808578491211, |
|
"rewards/rejected": -14.574336051940918, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5611042441124687e-07, |
|
"logits/chosen": -2.9855191707611084, |
|
"logits/rejected": -3.032355308532715, |
|
"logps/chosen": -595.8463134765625, |
|
"logps/rejected": -628.7684326171875, |
|
"loss": 0.0121, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.1751441955566406, |
|
"rewards/margins": 12.155618667602539, |
|
"rewards/rejected": -14.33076286315918, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_logits/chosen": -2.9834606647491455, |
|
"eval_logits/rejected": -3.0325381755828857, |
|
"eval_logps/chosen": -519.0242309570312, |
|
"eval_logps/rejected": -563.0778198242188, |
|
"eval_loss": 0.014188007451593876, |
|
"eval_rewards/accuracies": 0.9938119053840637, |
|
"eval_rewards/chosen": -2.0574822425842285, |
|
"eval_rewards/margins": 11.624366760253906, |
|
"eval_rewards/rejected": -13.681849479675293, |
|
"eval_runtime": 1405.0119, |
|
"eval_samples_per_second": 4.6, |
|
"eval_steps_per_second": 0.144, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.3856541105586545e-07, |
|
"logits/chosen": -2.987239122390747, |
|
"logits/rejected": -3.023975133895874, |
|
"logps/chosen": -528.511962890625, |
|
"logps/rejected": -649.2040405273438, |
|
"loss": 0.0098, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.0592288970947266, |
|
"rewards/margins": 12.022753715515137, |
|
"rewards/rejected": -14.081982612609863, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.2168076391719489e-07, |
|
"logits/chosen": -2.980776309967041, |
|
"logits/rejected": -3.024582624435425, |
|
"logps/chosen": -506.5166015625, |
|
"logps/rejected": -565.2005615234375, |
|
"loss": 0.0134, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -2.1642065048217773, |
|
"rewards/margins": 11.513936042785645, |
|
"rewards/rejected": -13.678143501281738, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.0555654214793722e-07, |
|
"logits/chosen": -2.9802050590515137, |
|
"logits/rejected": -3.0230603218078613, |
|
"logps/chosen": -486.35089111328125, |
|
"logps/rejected": -552.8853759765625, |
|
"loss": 0.0125, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -2.041994571685791, |
|
"rewards/margins": 12.138368606567383, |
|
"rewards/rejected": -14.1803617477417, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 9.028829858700973e-08, |
|
"logits/chosen": -2.96352219581604, |
|
"logits/rejected": -3.0177297592163086, |
|
"logps/chosen": -572.4364013671875, |
|
"logps/rejected": -632.8357543945312, |
|
"loss": 0.0167, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -2.2510409355163574, |
|
"rewards/margins": 12.086463928222656, |
|
"rewards/rejected": -14.337506294250488, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.596651350926836e-08, |
|
"logits/chosen": -2.9755184650421143, |
|
"logits/rejected": -3.0235707759857178, |
|
"logps/chosen": -524.8353271484375, |
|
"logps/rejected": -652.7691040039062, |
|
"loss": 0.0146, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.302769184112549, |
|
"rewards/margins": 12.383835792541504, |
|
"rewards/rejected": -14.686604499816895, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 6.267605843546767e-08, |
|
"logits/chosen": -2.9833624362945557, |
|
"logits/rejected": -3.0215892791748047, |
|
"logps/chosen": -511.0621032714844, |
|
"logps/rejected": -656.1917724609375, |
|
"loss": 0.0112, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.3068997859954834, |
|
"rewards/margins": 12.16324234008789, |
|
"rewards/rejected": -14.47014331817627, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 5.049569317994012e-08, |
|
"logits/chosen": -2.974331855773926, |
|
"logits/rejected": -3.0232813358306885, |
|
"logps/chosen": -505.38720703125, |
|
"logps/rejected": -592.7947998046875, |
|
"loss": 0.0179, |
|
"rewards/accuracies": 0.9750000238418579, |
|
"rewards/chosen": -2.487161636352539, |
|
"rewards/margins": 11.847845077514648, |
|
"rewards/rejected": -14.33500862121582, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.9497599116513705e-08, |
|
"logits/chosen": -2.9868783950805664, |
|
"logits/rejected": -3.0321686267852783, |
|
"logps/chosen": -536.3109130859375, |
|
"logps/rejected": -620.6571044921875, |
|
"loss": 0.0134, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -2.161980152130127, |
|
"rewards/margins": 11.987706184387207, |
|
"rewards/rejected": -14.149685859680176, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 2.974695142855388e-08, |
|
"logits/chosen": -2.9927778244018555, |
|
"logits/rejected": -3.0283336639404297, |
|
"logps/chosen": -508.8233947753906, |
|
"logps/rejected": -651.0311279296875, |
|
"loss": 0.0099, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.224902629852295, |
|
"rewards/margins": 12.823506355285645, |
|
"rewards/rejected": -15.048408508300781, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1301532877994742e-08, |
|
"logits/chosen": -2.975757122039795, |
|
"logits/rejected": -3.0192081928253174, |
|
"logps/chosen": -527.3839111328125, |
|
"logps/rejected": -575.6035766601562, |
|
"loss": 0.0198, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.4137749671936035, |
|
"rewards/margins": 11.909402847290039, |
|
"rewards/rejected": -14.323179244995117, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_logits/chosen": -2.980133056640625, |
|
"eval_logits/rejected": -3.029346466064453, |
|
"eval_logps/chosen": -519.8800659179688, |
|
"eval_logps/rejected": -565.1162719726562, |
|
"eval_loss": 0.013889539055526257, |
|
"eval_rewards/accuracies": 0.9950494766235352, |
|
"eval_rewards/chosen": -2.1430556774139404, |
|
"eval_rewards/margins": 11.742639541625977, |
|
"eval_rewards/rejected": -13.885696411132812, |
|
"eval_runtime": 1399.6154, |
|
"eval_samples_per_second": 4.618, |
|
"eval_steps_per_second": 0.144, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.4211391382180637e-08, |
|
"logits/chosen": -2.9858908653259277, |
|
"logits/rejected": -3.024933338165283, |
|
"logps/chosen": -524.462646484375, |
|
"logps/rejected": -595.0488891601562, |
|
"loss": 0.0157, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -2.5176589488983154, |
|
"rewards/margins": 11.485154151916504, |
|
"rewards/rejected": -14.002812385559082, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 8.518543427732949e-09, |
|
"logits/chosen": -2.978140354156494, |
|
"logits/rejected": -3.022719621658325, |
|
"logps/chosen": -536.0445556640625, |
|
"logps/rejected": -647.928955078125, |
|
"loss": 0.0152, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.0772526264190674, |
|
"rewards/margins": 12.510821342468262, |
|
"rewards/rejected": -14.58807373046875, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 4.256725079024553e-09, |
|
"logits/chosen": -2.9991843700408936, |
|
"logits/rejected": -3.040334463119507, |
|
"logps/chosen": -480.5723571777344, |
|
"logps/rejected": -583.2156982421875, |
|
"loss": 0.0174, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -2.4928371906280518, |
|
"rewards/margins": 11.378791809082031, |
|
"rewards/rejected": -13.87162971496582, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.4511920567963908e-09, |
|
"logits/chosen": -2.968715190887451, |
|
"logits/rejected": -3.013766288757324, |
|
"logps/chosen": -563.4217529296875, |
|
"logps/rejected": -659.7140502929688, |
|
"loss": 0.0212, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.8111333847045898, |
|
"rewards/margins": 12.884384155273438, |
|
"rewards/rejected": -14.695515632629395, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.1857007165852472e-10, |
|
"logits/chosen": -2.9924755096435547, |
|
"logits/rejected": -3.0349550247192383, |
|
"logps/chosen": -521.3587646484375, |
|
"logps/rejected": -614.9534912109375, |
|
"loss": 0.0103, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.195035934448242, |
|
"rewards/margins": 12.315847396850586, |
|
"rewards/rejected": -14.510884284973145, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 454, |
|
"total_flos": 0.0, |
|
"train_loss": 0.05705668801843857, |
|
"train_runtime": 30574.286, |
|
"train_samples_per_second": 1.902, |
|
"train_steps_per_second": 0.015 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 454, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|