|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9984, |
|
"eval_steps": 100, |
|
"global_step": 156, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.125e-07, |
|
"logits/chosen": -2.749149799346924, |
|
"logits/rejected": -2.5982635021209717, |
|
"logps/chosen": -351.88592529296875, |
|
"logps/rejected": -339.1416931152344, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.125e-06, |
|
"logits/chosen": -2.634215831756592, |
|
"logits/rejected": -2.565009593963623, |
|
"logps/chosen": -264.03338623046875, |
|
"logps/rejected": -276.0550842285156, |
|
"loss": 0.6919, |
|
"rewards/accuracies": 0.5277777910232544, |
|
"rewards/chosen": -0.004038373474031687, |
|
"rewards/margins": 0.00274546816945076, |
|
"rewards/rejected": -0.006783841177821159, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.989935734988098e-06, |
|
"logits/chosen": -2.6059346199035645, |
|
"logits/rejected": -2.5347509384155273, |
|
"logps/chosen": -289.2115173339844, |
|
"logps/rejected": -285.11651611328125, |
|
"loss": 0.6797, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -0.030138880014419556, |
|
"rewards/margins": 0.022945497184991837, |
|
"rewards/rejected": -0.05308438092470169, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.8776412907378845e-06, |
|
"logits/chosen": -2.549204111099243, |
|
"logits/rejected": -2.4668586254119873, |
|
"logps/chosen": -296.6357727050781, |
|
"logps/rejected": -286.880615234375, |
|
"loss": 0.6602, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": -0.08948583900928497, |
|
"rewards/margins": 0.06929855048656464, |
|
"rewards/rejected": -0.1587844043970108, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.646121984004666e-06, |
|
"logits/chosen": -2.607750177383423, |
|
"logits/rejected": -2.4750921726226807, |
|
"logps/chosen": -298.0829162597656, |
|
"logps/rejected": -306.82696533203125, |
|
"loss": 0.6514, |
|
"rewards/accuracies": 0.659375011920929, |
|
"rewards/chosen": -0.19320181012153625, |
|
"rewards/margins": 0.12534096837043762, |
|
"rewards/rejected": -0.3185427784919739, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.3069871595684795e-06, |
|
"logits/chosen": -2.520982265472412, |
|
"logits/rejected": -2.4599428176879883, |
|
"logps/chosen": -296.30560302734375, |
|
"logps/rejected": -325.0945129394531, |
|
"loss": 0.6459, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.12883062660694122, |
|
"rewards/margins": 0.13393087685108185, |
|
"rewards/rejected": -0.26276153326034546, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.8772424536302565e-06, |
|
"logits/chosen": -2.5419609546661377, |
|
"logits/rejected": -2.4526138305664062, |
|
"logps/chosen": -303.03753662109375, |
|
"logps/rejected": -309.76702880859375, |
|
"loss": 0.6405, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.13351231813430786, |
|
"rewards/margins": 0.12426233291625977, |
|
"rewards/rejected": -0.2577746510505676, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.3784370602033572e-06, |
|
"logits/chosen": -2.4560115337371826, |
|
"logits/rejected": -2.3811163902282715, |
|
"logps/chosen": -344.1382141113281, |
|
"logps/rejected": -355.2807922363281, |
|
"loss": 0.6231, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.3589150905609131, |
|
"rewards/margins": 0.1809873878955841, |
|
"rewards/rejected": -0.5399025678634644, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 2.835583164544139e-06, |
|
"logits/chosen": -2.4101650714874268, |
|
"logits/rejected": -2.2786593437194824, |
|
"logps/chosen": -342.35260009765625, |
|
"logps/rejected": -337.49755859375, |
|
"loss": 0.6147, |
|
"rewards/accuracies": 0.653124988079071, |
|
"rewards/chosen": -0.3759569525718689, |
|
"rewards/margins": 0.1979154795408249, |
|
"rewards/rejected": -0.573872447013855, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 2.2759017277414165e-06, |
|
"logits/chosen": -2.2483856678009033, |
|
"logits/rejected": -2.258373260498047, |
|
"logps/chosen": -337.4295959472656, |
|
"logps/rejected": -386.83465576171875, |
|
"loss": 0.5889, |
|
"rewards/accuracies": 0.6968749761581421, |
|
"rewards/chosen": -0.4705071449279785, |
|
"rewards/margins": 0.3142710328102112, |
|
"rewards/rejected": -0.7847781181335449, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.7274575140626318e-06, |
|
"logits/chosen": -2.268360137939453, |
|
"logits/rejected": -2.165742874145508, |
|
"logps/chosen": -334.42974853515625, |
|
"logps/rejected": -348.5438232421875, |
|
"loss": 0.5962, |
|
"rewards/accuracies": 0.684374988079071, |
|
"rewards/chosen": -0.475590318441391, |
|
"rewards/margins": 0.3093096613883972, |
|
"rewards/rejected": -0.7849000692367554, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_logits/chosen": -2.2236928939819336, |
|
"eval_logits/rejected": -2.0692262649536133, |
|
"eval_logps/chosen": -337.8192443847656, |
|
"eval_logps/rejected": -356.7066955566406, |
|
"eval_loss": 0.594050407409668, |
|
"eval_rewards/accuracies": 0.7020000219345093, |
|
"eval_rewards/chosen": -0.43988561630249023, |
|
"eval_rewards/margins": 0.31074145436286926, |
|
"eval_rewards/rejected": -0.7506271600723267, |
|
"eval_runtime": 690.4847, |
|
"eval_samples_per_second": 2.897, |
|
"eval_steps_per_second": 0.362, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.217751806485235e-06, |
|
"logits/chosen": -2.207293748855591, |
|
"logits/rejected": -1.979222059249878, |
|
"logps/chosen": -342.3262634277344, |
|
"logps/rejected": -362.3468933105469, |
|
"loss": 0.5798, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.526713490486145, |
|
"rewards/margins": 0.35861119627952576, |
|
"rewards/rejected": -0.8853246569633484, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 7.723433775328385e-07, |
|
"logits/chosen": -2.0455784797668457, |
|
"logits/rejected": -1.9951608180999756, |
|
"logps/chosen": -329.8724670410156, |
|
"logps/rejected": -370.48577880859375, |
|
"loss": 0.5914, |
|
"rewards/accuracies": 0.671875, |
|
"rewards/chosen": -0.56132572889328, |
|
"rewards/margins": 0.291836678981781, |
|
"rewards/rejected": -0.853162407875061, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 4.1356686569674344e-07, |
|
"logits/chosen": -2.083890438079834, |
|
"logits/rejected": -1.9531793594360352, |
|
"logps/chosen": -333.07171630859375, |
|
"logps/rejected": -376.0746765136719, |
|
"loss": 0.5891, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": -0.44948244094848633, |
|
"rewards/margins": 0.361866295337677, |
|
"rewards/rejected": -0.8113487362861633, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 1.59412823400657e-07, |
|
"logits/chosen": -2.076089382171631, |
|
"logits/rejected": -1.8212101459503174, |
|
"logps/chosen": -350.77374267578125, |
|
"logps/rejected": -360.770263671875, |
|
"loss": 0.5677, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.4118824005126953, |
|
"rewards/margins": 0.38872456550598145, |
|
"rewards/rejected": -0.8006070256233215, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 2.262559558016325e-08, |
|
"logits/chosen": -2.0566818714141846, |
|
"logits/rejected": -1.8309047222137451, |
|
"logps/chosen": -340.4973449707031, |
|
"logps/rejected": -353.11956787109375, |
|
"loss": 0.5863, |
|
"rewards/accuracies": 0.721875011920929, |
|
"rewards/chosen": -0.43235865235328674, |
|
"rewards/margins": 0.37298649549484253, |
|
"rewards/rejected": -0.8053451776504517, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 156, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6200787784197391, |
|
"train_runtime": 14536.4272, |
|
"train_samples_per_second": 1.376, |
|
"train_steps_per_second": 0.011 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 156, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|