|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.802816901408451, |
|
"eval_steps": 54, |
|
"global_step": 810, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 1.6524436473846436, |
|
"learning_rate": 1.267605633802817e-05, |
|
"loss": 1.8571, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 1.815904140472412, |
|
"eval_runtime": 71.1199, |
|
"eval_samples_per_second": 3.164, |
|
"eval_steps_per_second": 0.801, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 2.144040584564209, |
|
"learning_rate": 2.535211267605634e-05, |
|
"loss": 1.6784, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_loss": 1.5669676065444946, |
|
"eval_runtime": 71.0969, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 2.36417293548584, |
|
"learning_rate": 3.802816901408451e-05, |
|
"loss": 1.5266, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_loss": 1.497390627861023, |
|
"eval_runtime": 71.0941, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"grad_norm": 2.723508358001709, |
|
"learning_rate": 4.992175273865415e-05, |
|
"loss": 1.4987, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"eval_loss": 1.4736177921295166, |
|
"eval_runtime": 71.0941, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"grad_norm": 2.092134952545166, |
|
"learning_rate": 4.85133020344288e-05, |
|
"loss": 1.4088, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"eval_loss": 1.4523909091949463, |
|
"eval_runtime": 71.0899, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 2.478315830230713, |
|
"learning_rate": 4.710485133020345e-05, |
|
"loss": 1.4383, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"eval_loss": 1.4438430070877075, |
|
"eval_runtime": 71.0889, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 324 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"grad_norm": 2.811476707458496, |
|
"learning_rate": 4.569640062597809e-05, |
|
"loss": 1.4606, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"eval_loss": 1.4346896409988403, |
|
"eval_runtime": 71.0897, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 378 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"grad_norm": 3.4677302837371826, |
|
"learning_rate": 4.428794992175274e-05, |
|
"loss": 1.4029, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"eval_loss": 1.431531548500061, |
|
"eval_runtime": 71.0955, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 432 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"grad_norm": 2.5842223167419434, |
|
"learning_rate": 4.287949921752739e-05, |
|
"loss": 1.35, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 2.28, |
|
"eval_loss": 1.430802583694458, |
|
"eval_runtime": 71.0806, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 486 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"grad_norm": 2.942049980163574, |
|
"learning_rate": 4.1471048513302035e-05, |
|
"loss": 1.3749, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"eval_loss": 1.4312644004821777, |
|
"eval_runtime": 71.1092, |
|
"eval_samples_per_second": 3.164, |
|
"eval_steps_per_second": 0.802, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"grad_norm": 2.2633845806121826, |
|
"learning_rate": 4.0062597809076686e-05, |
|
"loss": 1.3596, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"eval_loss": 1.4272420406341553, |
|
"eval_runtime": 71.1027, |
|
"eval_samples_per_second": 3.164, |
|
"eval_steps_per_second": 0.802, |
|
"step": 594 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"grad_norm": 2.9916341304779053, |
|
"learning_rate": 3.865414710485133e-05, |
|
"loss": 1.3655, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"eval_loss": 1.429884433746338, |
|
"eval_runtime": 71.0733, |
|
"eval_samples_per_second": 3.166, |
|
"eval_steps_per_second": 0.802, |
|
"step": 648 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"grad_norm": 3.2689390182495117, |
|
"learning_rate": 3.724569640062598e-05, |
|
"loss": 1.3184, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"eval_loss": 1.4284456968307495, |
|
"eval_runtime": 71.0941, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 702 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"grad_norm": 2.7613720893859863, |
|
"learning_rate": 3.5837245696400626e-05, |
|
"loss": 1.3084, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"eval_loss": 1.4279944896697998, |
|
"eval_runtime": 71.0944, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 756 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"grad_norm": 2.9932751655578613, |
|
"learning_rate": 3.442879499217527e-05, |
|
"loss": 1.3247, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"eval_loss": 1.4245632886886597, |
|
"eval_runtime": 71.0816, |
|
"eval_samples_per_second": 3.165, |
|
"eval_steps_per_second": 0.802, |
|
"step": 810 |
|
} |
|
], |
|
"logging_steps": 54, |
|
"max_steps": 2130, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 54, |
|
"total_flos": 7.901132314617446e+16, |
|
"train_batch_size": 6, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|