|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 32.0, |
|
"eval_steps": 500, |
|
"global_step": 9184, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.000484375, |
|
"loss": 3.0823, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 0.00046875, |
|
"loss": 2.7242, |
|
"step": 574 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.000453125, |
|
"loss": 2.5348, |
|
"step": 861 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 0.0004375, |
|
"loss": 2.4455, |
|
"step": 1148 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 0.000421875, |
|
"loss": 2.3794, |
|
"step": 1435 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 0.00040625000000000004, |
|
"loss": 2.3375, |
|
"step": 1722 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 0.000390625, |
|
"loss": 2.3262, |
|
"step": 2009 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 0.000375, |
|
"loss": 2.3114, |
|
"step": 2296 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 0.000359375, |
|
"loss": 2.2921, |
|
"step": 2583 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 0.00034375, |
|
"loss": 2.2918, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 0.000328125, |
|
"loss": 2.2578, |
|
"step": 3157 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 0.0003125, |
|
"loss": 2.2693, |
|
"step": 3444 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 0.000296875, |
|
"loss": 2.2594, |
|
"step": 3731 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 0.00028125000000000003, |
|
"loss": 2.2555, |
|
"step": 4018 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 0.000265625, |
|
"loss": 2.2481, |
|
"step": 4305 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 0.00025, |
|
"loss": 2.2468, |
|
"step": 4592 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 0.000234375, |
|
"loss": 2.248, |
|
"step": 4879 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 0.00021875, |
|
"loss": 2.2435, |
|
"step": 5166 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 0.00020312500000000002, |
|
"loss": 2.2319, |
|
"step": 5453 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 0.0001875, |
|
"loss": 2.2303, |
|
"step": 5740 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 0.000171875, |
|
"loss": 2.2215, |
|
"step": 6027 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 0.00015625, |
|
"loss": 2.2256, |
|
"step": 6314 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 0.00014062500000000002, |
|
"loss": 2.2257, |
|
"step": 6601 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 0.000125, |
|
"loss": 2.2275, |
|
"step": 6888 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 0.000109375, |
|
"loss": 2.2225, |
|
"step": 7175 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 9.375e-05, |
|
"loss": 2.2166, |
|
"step": 7462 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 7.8125e-05, |
|
"loss": 2.2174, |
|
"step": 7749 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 6.25e-05, |
|
"loss": 2.2188, |
|
"step": 8036 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 4.6875e-05, |
|
"loss": 2.2143, |
|
"step": 8323 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 3.125e-05, |
|
"loss": 2.2171, |
|
"step": 8610 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 1.5625e-05, |
|
"loss": 2.2168, |
|
"step": 8897 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 0.0, |
|
"loss": 2.2152, |
|
"step": 9184 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 9184, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 32, |
|
"save_steps": 500, |
|
"total_flos": 0.0, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|