|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 1074, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9068901303538176e-05, |
|
"loss": 0.749, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.813780260707635e-05, |
|
"loss": 0.3832, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.7206703910614527e-05, |
|
"loss": 0.272, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.62756052141527e-05, |
|
"loss": 0.2612, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.5344506517690876e-05, |
|
"loss": 0.2773, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.4413407821229052e-05, |
|
"loss": 0.2311, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 1.3482309124767226e-05, |
|
"loss": 0.2838, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 1.25512104283054e-05, |
|
"loss": 0.2267, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 1.1620111731843577e-05, |
|
"loss": 0.2023, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 1.0689013035381753e-05, |
|
"loss": 0.2424, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"eval_accuracy": 0.9444093493367025, |
|
"eval_loss": 0.22674749791622162, |
|
"eval_runtime": 3.5901, |
|
"eval_samples_per_second": 440.936, |
|
"eval_steps_per_second": 27.576, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 9.757914338919926e-06, |
|
"loss": 0.1837, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 8.826815642458101e-06, |
|
"loss": 0.2126, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 7.895716945996277e-06, |
|
"loss": 0.163, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 6.964618249534451e-06, |
|
"loss": 0.1756, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 6.033519553072626e-06, |
|
"loss": 0.1463, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 5.102420856610801e-06, |
|
"loss": 0.1406, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 4.171322160148976e-06, |
|
"loss": 0.0934, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 3.240223463687151e-06, |
|
"loss": 0.1488, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 2.3091247672253262e-06, |
|
"loss": 0.1865, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 1.378026070763501e-06, |
|
"loss": 0.1406, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"eval_accuracy": 0.9494630448515476, |
|
"eval_loss": 0.22929790616035461, |
|
"eval_runtime": 3.5929, |
|
"eval_samples_per_second": 440.593, |
|
"eval_steps_per_second": 27.554, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 4.46927374301676e-07, |
|
"loss": 0.1183, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1074, |
|
"total_flos": 3331498423874952.0, |
|
"train_loss": 0.23038989738379112, |
|
"train_runtime": 429.1494, |
|
"train_samples_per_second": 79.874, |
|
"train_steps_per_second": 2.503 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 1074, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 3331498423874952.0, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|