|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.968, |
|
"eval_steps": 100, |
|
"global_step": 248, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.0000000000000002e-07, |
|
"logits/chosen": 0.2908777594566345, |
|
"logits/rejected": 0.2437899112701416, |
|
"logps/chosen": -201.0093536376953, |
|
"logps/rejected": -168.08958435058594, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"logits/chosen": 0.1902419924736023, |
|
"logits/rejected": 0.1756160408258438, |
|
"logps/chosen": -197.62713623046875, |
|
"logps/rejected": -158.33740234375, |
|
"loss": 0.0012, |
|
"rewards/accuracies": 0.3680555522441864, |
|
"rewards/chosen": -0.0008540299604646862, |
|
"rewards/margins": 0.0005007751169614494, |
|
"rewards/rejected": -0.0013548050774261355, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.000000000000001e-06, |
|
"logits/chosen": -0.013944757170975208, |
|
"logits/rejected": 0.14085523784160614, |
|
"logps/chosen": -181.885009765625, |
|
"logps/rejected": -141.39883422851562, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.4124999940395355, |
|
"rewards/chosen": -0.002464529126882553, |
|
"rewards/margins": 0.0010917274048551917, |
|
"rewards/rejected": -0.003556256415322423, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.993800445762451e-06, |
|
"logits/chosen": 0.13207468390464783, |
|
"logits/rejected": 0.1876787692308426, |
|
"logps/chosen": -170.35769653320312, |
|
"logps/rejected": -137.86196899414062, |
|
"loss": 0.0012, |
|
"rewards/accuracies": 0.39375001192092896, |
|
"rewards/chosen": -0.0012240457581356168, |
|
"rewards/margins": 0.00047588563757017255, |
|
"rewards/rejected": -0.0016999313374981284, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.944388344834205e-06, |
|
"logits/chosen": 0.10822992026805878, |
|
"logits/rejected": 0.14006157219409943, |
|
"logps/chosen": -189.98333740234375, |
|
"logps/rejected": -149.42813110351562, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.48124998807907104, |
|
"rewards/chosen": 0.0006204573437571526, |
|
"rewards/margins": 0.002387461019679904, |
|
"rewards/rejected": -0.0017670036759227514, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.8465431931347904e-06, |
|
"logits/chosen": 0.19466236233711243, |
|
"logits/rejected": 0.2593666613101959, |
|
"logps/chosen": -200.7144012451172, |
|
"logps/rejected": -158.9434814453125, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.4437499940395355, |
|
"rewards/chosen": 0.0003794836229644716, |
|
"rewards/margins": 0.0014064621645957232, |
|
"rewards/rejected": -0.0010269784834235907, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.702203692102539e-06, |
|
"logits/chosen": 0.12232597917318344, |
|
"logits/rejected": 0.16534267365932465, |
|
"logps/chosen": -186.4813995361328, |
|
"logps/rejected": -143.80760192871094, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.0033827635925263166, |
|
"rewards/margins": 0.0028825027402490377, |
|
"rewards/rejected": 0.0005002607358619571, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.514229781074239e-06, |
|
"logits/chosen": 0.1178612932562828, |
|
"logits/rejected": 0.13041642308235168, |
|
"logps/chosen": -209.6604766845703, |
|
"logps/rejected": -166.01910400390625, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": 0.001676331041380763, |
|
"rewards/margins": 0.002085132524371147, |
|
"rewards/rejected": -0.0004088011628482491, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.286345970517195e-06, |
|
"logits/chosen": 0.08851321786642075, |
|
"logits/rejected": 0.14383965730667114, |
|
"logps/chosen": -195.59994506835938, |
|
"logps/rejected": -156.7967071533203, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.0038293059915304184, |
|
"rewards/margins": 0.0034115822054445744, |
|
"rewards/rejected": 0.0004177238733973354, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.023067544670082e-06, |
|
"logits/chosen": 0.03590596467256546, |
|
"logits/rejected": 0.12467700242996216, |
|
"logps/chosen": -194.0576629638672, |
|
"logps/rejected": -146.26744079589844, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.48124998807907104, |
|
"rewards/chosen": 0.002118715550750494, |
|
"rewards/margins": 0.002578428713604808, |
|
"rewards/rejected": -0.0004597128718160093, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 3.7296110958116845e-06, |
|
"logits/chosen": 0.14186295866966248, |
|
"logits/rejected": 0.1782732456922531, |
|
"logps/chosen": -181.44393920898438, |
|
"logps/rejected": -144.7650146484375, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.0033587156794965267, |
|
"rewards/margins": 0.0031067118979990482, |
|
"rewards/rejected": 0.0002520036359783262, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_logits/chosen": -0.04001983627676964, |
|
"eval_logits/rejected": 0.05830775573849678, |
|
"eval_logps/chosen": -307.1775207519531, |
|
"eval_logps/rejected": -279.2533874511719, |
|
"eval_loss": 0.0017920270329341292, |
|
"eval_rewards/accuracies": 0.47850000858306885, |
|
"eval_rewards/chosen": -0.003455465892329812, |
|
"eval_rewards/margins": -0.0011921715922653675, |
|
"eval_rewards/rejected": -0.002263294532895088, |
|
"eval_runtime": 412.7782, |
|
"eval_samples_per_second": 4.845, |
|
"eval_steps_per_second": 1.211, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 3.4117911628292944e-06, |
|
"logits/chosen": 0.1313779205083847, |
|
"logits/rejected": 0.07039429247379303, |
|
"logps/chosen": -194.87823486328125, |
|
"logps/rejected": -160.7736358642578, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.45625001192092896, |
|
"rewards/chosen": 0.005443253554403782, |
|
"rewards/margins": 0.004203255753964186, |
|
"rewards/rejected": 0.0012399973347783089, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 3.075905022087675e-06, |
|
"logits/chosen": 0.12658381462097168, |
|
"logits/rejected": 0.23149845004081726, |
|
"logps/chosen": -178.28390502929688, |
|
"logps/rejected": -143.67715454101562, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.004203868098556995, |
|
"rewards/margins": 0.004237414337694645, |
|
"rewards/rejected": -3.354633372509852e-05, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 2.728607913349464e-06, |
|
"logits/chosen": 0.15758894383907318, |
|
"logits/rejected": 0.2553151249885559, |
|
"logps/chosen": -181.8848114013672, |
|
"logps/rejected": -135.99990844726562, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.009758567437529564, |
|
"rewards/margins": 0.006436445750296116, |
|
"rewards/rejected": 0.0033221219200640917, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 2.376781173017589e-06, |
|
"logits/chosen": 0.1286291778087616, |
|
"logits/rejected": 0.17975321412086487, |
|
"logps/chosen": -187.51390075683594, |
|
"logps/rejected": -150.4195556640625, |
|
"loss": 0.0009, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.007837988436222076, |
|
"rewards/margins": 0.007232234813272953, |
|
"rewards/rejected": 0.0006057542050257325, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.0273958875043877e-06, |
|
"logits/chosen": 0.15904466807842255, |
|
"logits/rejected": 0.0839221328496933, |
|
"logps/chosen": -204.20040893554688, |
|
"logps/rejected": -167.21383666992188, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.512499988079071, |
|
"rewards/chosen": 0.006473573390394449, |
|
"rewards/margins": 0.005708726122975349, |
|
"rewards/rejected": 0.0007648469763807952, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.6873747682962393e-06, |
|
"logits/chosen": 0.024461204186081886, |
|
"logits/rejected": 0.16477885842323303, |
|
"logps/chosen": -176.70254516601562, |
|
"logps/rejected": -132.0756072998047, |
|
"loss": 0.0009, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.010805860161781311, |
|
"rewards/margins": 0.007954512722790241, |
|
"rewards/rejected": 0.002851347904652357, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.363454985517803e-06, |
|
"logits/chosen": 0.07506619393825531, |
|
"logits/rejected": 0.18663427233695984, |
|
"logps/chosen": -182.42135620117188, |
|
"logps/rejected": -143.20504760742188, |
|
"loss": 0.0009, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.010290954262018204, |
|
"rewards/margins": 0.008597767911851406, |
|
"rewards/rejected": 0.0016931863501667976, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 1.062054677808238e-06, |
|
"logits/chosen": 0.1575741171836853, |
|
"logits/rejected": 0.14560917019844055, |
|
"logps/chosen": -210.0597686767578, |
|
"logps/rejected": -157.42813110351562, |
|
"loss": 0.0008, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.011623604223132133, |
|
"rewards/margins": 0.008374488912522793, |
|
"rewards/rejected": 0.0032491139136254787, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 7.891457834794711e-07, |
|
"logits/chosen": 0.21218101680278778, |
|
"logits/rejected": 0.16236719489097595, |
|
"logps/chosen": -174.3014373779297, |
|
"logps/rejected": -142.51658630371094, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.5, |
|
"rewards/chosen": 0.008685302920639515, |
|
"rewards/margins": 0.005553290247917175, |
|
"rewards/rejected": 0.0031320122070610523, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 5.501357126768117e-07, |
|
"logits/chosen": 0.043852098286151886, |
|
"logits/rejected": 0.14689919352531433, |
|
"logps/chosen": -192.6407012939453, |
|
"logps/rejected": -153.15914916992188, |
|
"loss": 0.0009, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.010076276957988739, |
|
"rewards/margins": 0.0075560063123703, |
|
"rewards/rejected": 0.0025202720426023006, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_logits/chosen": -0.05530371889472008, |
|
"eval_logits/rejected": 0.045542649924755096, |
|
"eval_logps/chosen": -307.6504211425781, |
|
"eval_logps/rejected": -279.6910400390625, |
|
"eval_loss": 0.0019263506401330233, |
|
"eval_rewards/accuracies": 0.45649999380111694, |
|
"eval_rewards/chosen": -0.008184487000107765, |
|
"eval_rewards/margins": -0.0015445188619196415, |
|
"eval_rewards/rejected": -0.006639969069510698, |
|
"eval_runtime": 412.4498, |
|
"eval_samples_per_second": 4.849, |
|
"eval_steps_per_second": 1.212, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.4976020508682345e-07, |
|
"logits/chosen": 0.14779892563819885, |
|
"logits/rejected": 0.2323513776063919, |
|
"logps/chosen": -182.7720489501953, |
|
"logps/rejected": -142.44332885742188, |
|
"loss": 0.0009, |
|
"rewards/accuracies": 0.543749988079071, |
|
"rewards/chosen": 0.009827367961406708, |
|
"rewards/margins": 0.00690328236669302, |
|
"rewards/rejected": 0.0029240844305604696, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 1.9198949610721273e-07, |
|
"logits/chosen": 0.1586412489414215, |
|
"logits/rejected": 0.13802394270896912, |
|
"logps/chosen": -198.50106811523438, |
|
"logps/rejected": -160.4164581298828, |
|
"loss": 0.0009, |
|
"rewards/accuracies": 0.4937500059604645, |
|
"rewards/chosen": 0.010713080875575542, |
|
"rewards/margins": 0.006927688606083393, |
|
"rewards/rejected": 0.0037853927351534367, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 7.994965069994143e-08, |
|
"logits/chosen": 0.07671042531728745, |
|
"logits/rejected": 0.1526561677455902, |
|
"logps/chosen": -182.83004760742188, |
|
"logps/rejected": -152.03683471679688, |
|
"loss": 0.0009, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.00937131978571415, |
|
"rewards/margins": 0.006407036446034908, |
|
"rewards/rejected": 0.002964283572509885, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 1.5860623616664183e-08, |
|
"logits/chosen": 0.07526861876249313, |
|
"logits/rejected": 0.0892513170838356, |
|
"logps/chosen": -178.50074768066406, |
|
"logps/rejected": -141.47157287597656, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": 0.012702028267085552, |
|
"rewards/margins": 0.005834975745528936, |
|
"rewards/rejected": 0.006867053918540478, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"step": 248, |
|
"total_flos": 0.0, |
|
"train_loss": 0.0009980735903222775, |
|
"train_runtime": 2694.3955, |
|
"train_samples_per_second": 1.485, |
|
"train_steps_per_second": 0.092 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 248, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|