|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.99581589958159, |
|
"eval_steps": 500, |
|
"global_step": 119, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.166666666666666e-08, |
|
"logits/chosen": -2.6852197647094727, |
|
"logits/rejected": -2.6903719902038574, |
|
"logps/chosen": -263.7275390625, |
|
"logps/rejected": -230.14215087890625, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"logits/chosen": -2.7384653091430664, |
|
"logits/rejected": -2.737618923187256, |
|
"logps/chosen": -278.2864990234375, |
|
"logps/rejected": -254.0323944091797, |
|
"loss": 0.6927, |
|
"rewards/accuracies": 0.4722222089767456, |
|
"rewards/chosen": 0.0012941197492182255, |
|
"rewards/margins": 0.00029929642914794385, |
|
"rewards/rejected": 0.000994823407381773, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 4.931352528237397e-07, |
|
"logits/chosen": -2.8008759021759033, |
|
"logits/rejected": -2.7767581939697266, |
|
"logps/chosen": -285.1484375, |
|
"logps/rejected": -270.07513427734375, |
|
"loss": 0.6848, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.019194435328245163, |
|
"rewards/margins": 0.015600068494677544, |
|
"rewards/rejected": 0.0035943654365837574, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.658920803689553e-07, |
|
"logits/chosen": -2.7777934074401855, |
|
"logits/rejected": -2.7765698432922363, |
|
"logps/chosen": -259.2841796875, |
|
"logps/rejected": -247.3909454345703, |
|
"loss": 0.6659, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": 0.011103945784270763, |
|
"rewards/margins": 0.06937336921691895, |
|
"rewards/rejected": -0.058269429951906204, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.201712553872657e-07, |
|
"logits/chosen": -2.7775871753692627, |
|
"logits/rejected": -2.7704668045043945, |
|
"logps/chosen": -282.95001220703125, |
|
"logps/rejected": -281.3163146972656, |
|
"loss": 0.6464, |
|
"rewards/accuracies": 0.637499988079071, |
|
"rewards/chosen": -0.08944134414196014, |
|
"rewards/margins": 0.11117666959762573, |
|
"rewards/rejected": -0.20061802864074707, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.598859066780754e-07, |
|
"logits/chosen": -2.800177574157715, |
|
"logits/rejected": -2.8010470867156982, |
|
"logps/chosen": -316.843505859375, |
|
"logps/rejected": -291.880126953125, |
|
"loss": 0.6165, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.18551835417747498, |
|
"rewards/margins": 0.25780096650123596, |
|
"rewards/rejected": -0.44331932067871094, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.9019570347986706e-07, |
|
"logits/chosen": -2.77445125579834, |
|
"logits/rejected": -2.755255937576294, |
|
"logps/chosen": -318.5345764160156, |
|
"logps/rejected": -289.75677490234375, |
|
"loss": 0.6159, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.2439030408859253, |
|
"rewards/margins": 0.2745346128940582, |
|
"rewards/rejected": -0.5184376239776611, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 2.1706525253979534e-07, |
|
"logits/chosen": -2.769376277923584, |
|
"logits/rejected": -2.741185188293457, |
|
"logps/chosen": -317.98175048828125, |
|
"logps/rejected": -279.78143310546875, |
|
"loss": 0.6032, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.35874003171920776, |
|
"rewards/margins": 0.24173276126384735, |
|
"rewards/rejected": -0.6004728078842163, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4675360263490295e-07, |
|
"logits/chosen": -2.7139642238616943, |
|
"logits/rejected": -2.6934971809387207, |
|
"logps/chosen": -261.20501708984375, |
|
"logps/rejected": -310.49908447265625, |
|
"loss": 0.6016, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.349643737077713, |
|
"rewards/margins": 0.3635331690311432, |
|
"rewards/rejected": -0.7131768465042114, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.527854855097224e-08, |
|
"logits/chosen": -2.7756199836730957, |
|
"logits/rejected": -2.76503586769104, |
|
"logps/chosen": -308.0489501953125, |
|
"logps/rejected": -295.3426208496094, |
|
"loss": 0.5751, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.3851245045661926, |
|
"rewards/margins": 0.27352777123451233, |
|
"rewards/rejected": -0.6586521863937378, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.790158337517127e-08, |
|
"logits/chosen": -2.7100021839141846, |
|
"logits/rejected": -2.685563325881958, |
|
"logps/chosen": -298.96319580078125, |
|
"logps/rejected": -354.50439453125, |
|
"loss": 0.5944, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.4978106915950775, |
|
"rewards/margins": 0.2795547842979431, |
|
"rewards/rejected": -0.777365505695343, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 8.677580722139671e-09, |
|
"logits/chosen": -2.7827987670898438, |
|
"logits/rejected": -2.761164665222168, |
|
"logps/chosen": -340.18597412109375, |
|
"logps/rejected": -337.910400390625, |
|
"loss": 0.5949, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.42213040590286255, |
|
"rewards/margins": 0.35163527727127075, |
|
"rewards/rejected": -0.7737656831741333, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 119, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6248922909007353, |
|
"train_runtime": 1986.9995, |
|
"train_samples_per_second": 7.691, |
|
"train_steps_per_second": 0.06 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 119, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|