|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.10301976691777735, |
|
"eval_steps": 500, |
|
"global_step": 2000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.9996726480206315e-05, |
|
"loss": 0.7422, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 4.9986906778099784e-05, |
|
"loss": 0.6673, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.9970543465279565e-05, |
|
"loss": 0.6749, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.994764082699591e-05, |
|
"loss": 0.6481, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.991820486102801e-05, |
|
"loss": 0.6505, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.9882243276113245e-05, |
|
"loss": 0.6708, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.983976548992841e-05, |
|
"loss": 0.6596, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 4.9790782626623436e-05, |
|
"loss": 0.6657, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.97353075139081e-05, |
|
"loss": 0.659, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.9673354679692785e-05, |
|
"loss": 0.6519, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.960494034828381e-05, |
|
"loss": 0.638, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.9530082436134614e-05, |
|
"loss": 0.629, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.944880054715378e-05, |
|
"loss": 0.6464, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.9361115967571094e-05, |
|
"loss": 0.642, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.926705166036311e-05, |
|
"loss": 0.6431, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.916663225923953e-05, |
|
"loss": 0.6465, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.905988406219217e-05, |
|
"loss": 0.6439, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.8946835024607885e-05, |
|
"loss": 0.652, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.8827514751947656e-05, |
|
"loss": 0.6376, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 4.8701954491993426e-05, |
|
"loss": 0.6357, |
|
"step": 2000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 19413, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 2000, |
|
"total_flos": 4.5346633176597135e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|