|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9874476987447699, |
|
"eval_steps": 500, |
|
"global_step": 59, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.016736401673640166, |
|
"grad_norm": 12.20743074581383, |
|
"learning_rate": 8.333333333333333e-08, |
|
"logits/chosen": -2.825530767440796, |
|
"logits/rejected": -2.756951332092285, |
|
"logps/chosen": -210.2147979736328, |
|
"logps/pi_response": -115.76622009277344, |
|
"logps/ref_response": -115.76622009277344, |
|
"logps/rejected": -234.01425170898438, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.16736401673640167, |
|
"grad_norm": 12.223946579272333, |
|
"learning_rate": 4.930057285201027e-07, |
|
"logits/chosen": -2.7576725482940674, |
|
"logits/rejected": -2.7086384296417236, |
|
"logps/chosen": -221.15676879882812, |
|
"logps/pi_response": -116.39059448242188, |
|
"logps/ref_response": -115.38680267333984, |
|
"logps/rejected": -261.13031005859375, |
|
"loss": 0.6862, |
|
"rewards/accuracies": 0.5416666865348816, |
|
"rewards/chosen": -0.04135013371706009, |
|
"rewards/margins": 0.019138270989060402, |
|
"rewards/rejected": -0.06048841029405594, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.33472803347280333, |
|
"grad_norm": 27.23026162056322, |
|
"learning_rate": 4.187457503795526e-07, |
|
"logits/chosen": -2.7793827056884766, |
|
"logits/rejected": -2.7369754314422607, |
|
"logps/chosen": -245.52627563476562, |
|
"logps/pi_response": -110.24317932128906, |
|
"logps/ref_response": -113.51313781738281, |
|
"logps/rejected": -298.79180908203125, |
|
"loss": 0.6346, |
|
"rewards/accuracies": 0.6468750238418579, |
|
"rewards/chosen": -0.2135976105928421, |
|
"rewards/margins": 0.2609703838825226, |
|
"rewards/rejected": -0.4745679795742035, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.502092050209205, |
|
"grad_norm": 16.564474644022212, |
|
"learning_rate": 2.8691164100062034e-07, |
|
"logits/chosen": -2.765554428100586, |
|
"logits/rejected": -2.7183127403259277, |
|
"logps/chosen": -278.768798828125, |
|
"logps/pi_response": -138.74417114257812, |
|
"logps/ref_response": -120.3040542602539, |
|
"logps/rejected": -346.51177978515625, |
|
"loss": 0.5838, |
|
"rewards/accuracies": 0.690625011920929, |
|
"rewards/chosen": -0.47783130407333374, |
|
"rewards/margins": 0.4638592302799225, |
|
"rewards/rejected": -0.9416905641555786, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6694560669456067, |
|
"grad_norm": 16.782748799564995, |
|
"learning_rate": 1.4248369943086995e-07, |
|
"logits/chosen": -2.691978693008423, |
|
"logits/rejected": -2.649599552154541, |
|
"logps/chosen": -283.6568298339844, |
|
"logps/pi_response": -144.93222045898438, |
|
"logps/ref_response": -112.8275375366211, |
|
"logps/rejected": -357.76116943359375, |
|
"loss": 0.5652, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.563094437122345, |
|
"rewards/margins": 0.47530126571655273, |
|
"rewards/rejected": -1.0383957624435425, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8368200836820083, |
|
"grad_norm": 17.23293020250368, |
|
"learning_rate": 3.473909705816111e-08, |
|
"logits/chosen": -2.67022705078125, |
|
"logits/rejected": -2.62345290184021, |
|
"logps/chosen": -279.62060546875, |
|
"logps/pi_response": -149.0137481689453, |
|
"logps/ref_response": -111.8585433959961, |
|
"logps/rejected": -333.55364990234375, |
|
"loss": 0.5589, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.5371103882789612, |
|
"rewards/margins": 0.4536103308200836, |
|
"rewards/rejected": -0.9907207489013672, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9874476987447699, |
|
"step": 59, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5968084577786721, |
|
"train_runtime": 2657.7029, |
|
"train_samples_per_second": 5.75, |
|
"train_steps_per_second": 0.022 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 59, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|