|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 8.0, |
|
"eval_steps": 500, |
|
"global_step": 2544, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.9968553459119497, |
|
"grad_norm": 0.7093676328659058, |
|
"learning_rate": 1.7507861635220126e-05, |
|
"loss": 0.4971, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6170967741935484, |
|
"eval_loss": 0.2514326870441437, |
|
"eval_runtime": 1.427, |
|
"eval_samples_per_second": 2172.377, |
|
"eval_steps_per_second": 45.55, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 1.9937106918238994, |
|
"grad_norm": 0.5753037333488464, |
|
"learning_rate": 1.5015723270440253e-05, |
|
"loss": 0.2041, |
|
"step": 634 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.8416129032258064, |
|
"eval_loss": 0.11213530600070953, |
|
"eval_runtime": 1.4434, |
|
"eval_samples_per_second": 2147.677, |
|
"eval_steps_per_second": 45.032, |
|
"step": 636 |
|
}, |
|
{ |
|
"epoch": 2.990566037735849, |
|
"grad_norm": 0.503776490688324, |
|
"learning_rate": 1.2523584905660378e-05, |
|
"loss": 0.1194, |
|
"step": 951 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.8858064516129033, |
|
"eval_loss": 0.06934015452861786, |
|
"eval_runtime": 1.442, |
|
"eval_samples_per_second": 2149.749, |
|
"eval_steps_per_second": 45.075, |
|
"step": 954 |
|
}, |
|
{ |
|
"epoch": 3.9874213836477987, |
|
"grad_norm": 0.45865872502326965, |
|
"learning_rate": 1.0031446540880504e-05, |
|
"loss": 0.0864, |
|
"step": 1268 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.9045161290322581, |
|
"eval_loss": 0.05072392523288727, |
|
"eval_runtime": 1.4463, |
|
"eval_samples_per_second": 2143.474, |
|
"eval_steps_per_second": 44.944, |
|
"step": 1272 |
|
}, |
|
{ |
|
"epoch": 4.984276729559748, |
|
"grad_norm": 0.35762810707092285, |
|
"learning_rate": 7.539308176100629e-06, |
|
"loss": 0.07, |
|
"step": 1585 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.9125806451612903, |
|
"eval_loss": 0.04198624566197395, |
|
"eval_runtime": 1.6456, |
|
"eval_samples_per_second": 1883.86, |
|
"eval_steps_per_second": 39.5, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 5.981132075471698, |
|
"grad_norm": 0.4083423316478729, |
|
"learning_rate": 5.047169811320756e-06, |
|
"loss": 0.0612, |
|
"step": 1902 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.9183870967741935, |
|
"eval_loss": 0.03691146895289421, |
|
"eval_runtime": 1.6409, |
|
"eval_samples_per_second": 1889.151, |
|
"eval_steps_per_second": 39.611, |
|
"step": 1908 |
|
}, |
|
{ |
|
"epoch": 6.977987421383648, |
|
"grad_norm": 0.2706115245819092, |
|
"learning_rate": 2.555031446540881e-06, |
|
"loss": 0.0564, |
|
"step": 2219 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.9229032258064516, |
|
"eval_loss": 0.03457155451178551, |
|
"eval_runtime": 1.6398, |
|
"eval_samples_per_second": 1890.51, |
|
"eval_steps_per_second": 39.64, |
|
"step": 2226 |
|
}, |
|
{ |
|
"epoch": 7.9748427672955975, |
|
"grad_norm": 0.29390910267829895, |
|
"learning_rate": 6.289308176100629e-08, |
|
"loss": 0.0541, |
|
"step": 2536 |
|
} |
|
], |
|
"logging_steps": 317, |
|
"max_steps": 2544, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 8, |
|
"save_steps": 1000000000.0, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 660694498184616.0, |
|
"train_batch_size": 48, |
|
"trial_name": null, |
|
"trial_params": { |
|
"alpha": 0.4600551411814572, |
|
"num_train_epochs": 8, |
|
"temperature": 4 |
|
} |
|
} |
|
|