|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"global_step": 2040, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 6.824120603015075e-06, |
|
"loss": 1.2787, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.48663100600242615, |
|
"eval_loss": 1.1127219200134277, |
|
"eval_runtime": 2.143, |
|
"eval_samples_per_second": 174.524, |
|
"eval_steps_per_second": 11.199, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 6.472361809045226e-06, |
|
"loss": 1.089, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"eval_accuracy": 0.7139037251472473, |
|
"eval_loss": 0.9668397903442383, |
|
"eval_runtime": 2.1362, |
|
"eval_samples_per_second": 175.077, |
|
"eval_steps_per_second": 11.235, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 6.1206030150753765e-06, |
|
"loss": 0.9134, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"eval_accuracy": 0.7834224700927734, |
|
"eval_loss": 0.8720457553863525, |
|
"eval_runtime": 2.1335, |
|
"eval_samples_per_second": 175.301, |
|
"eval_steps_per_second": 11.249, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 5.7688442211055275e-06, |
|
"loss": 0.8618, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_accuracy": 0.7941176295280457, |
|
"eval_loss": 0.772637665271759, |
|
"eval_runtime": 2.1363, |
|
"eval_samples_per_second": 175.072, |
|
"eval_steps_per_second": 11.235, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 5.4170854271356785e-06, |
|
"loss": 0.686, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"eval_accuracy": 0.8208556175231934, |
|
"eval_loss": 0.7337380647659302, |
|
"eval_runtime": 2.1375, |
|
"eval_samples_per_second": 174.975, |
|
"eval_steps_per_second": 11.228, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 5.0653266331658295e-06, |
|
"loss": 0.6333, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"eval_accuracy": 0.8235294222831726, |
|
"eval_loss": 0.7350101470947266, |
|
"eval_runtime": 2.1343, |
|
"eval_samples_per_second": 175.234, |
|
"eval_steps_per_second": 11.245, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"learning_rate": 4.71356783919598e-06, |
|
"loss": 0.5765, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.43, |
|
"eval_accuracy": 0.8235294222831726, |
|
"eval_loss": 0.7560638785362244, |
|
"eval_runtime": 2.1248, |
|
"eval_samples_per_second": 176.019, |
|
"eval_steps_per_second": 11.295, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 4.361809045226131e-06, |
|
"loss": 0.5502, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"eval_accuracy": 0.8475936055183411, |
|
"eval_loss": 0.727317750453949, |
|
"eval_runtime": 2.1509, |
|
"eval_samples_per_second": 173.882, |
|
"eval_steps_per_second": 11.158, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 4.010050251256282e-06, |
|
"loss": 0.5049, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"eval_accuracy": 0.8101603984832764, |
|
"eval_loss": 0.8136795163154602, |
|
"eval_runtime": 2.1343, |
|
"eval_samples_per_second": 175.233, |
|
"eval_steps_per_second": 11.245, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"learning_rate": 3.6582914572864323e-06, |
|
"loss": 0.4695, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 4.9, |
|
"eval_accuracy": 0.8288770318031311, |
|
"eval_loss": 0.7581244707107544, |
|
"eval_runtime": 2.1366, |
|
"eval_samples_per_second": 175.041, |
|
"eval_steps_per_second": 11.233, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"learning_rate": 3.306532663316583e-06, |
|
"loss": 0.4657, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 5.39, |
|
"eval_accuracy": 0.8048128485679626, |
|
"eval_loss": 0.8404039144515991, |
|
"eval_runtime": 2.1382, |
|
"eval_samples_per_second": 174.912, |
|
"eval_steps_per_second": 11.224, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"learning_rate": 2.9547738693467334e-06, |
|
"loss": 0.4549, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 5.88, |
|
"eval_accuracy": 0.8368983864784241, |
|
"eval_loss": 0.7800447940826416, |
|
"eval_runtime": 2.143, |
|
"eval_samples_per_second": 174.523, |
|
"eval_steps_per_second": 11.199, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"learning_rate": 2.6030150753768844e-06, |
|
"loss": 0.4305, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 6.37, |
|
"eval_accuracy": 0.8235294222831726, |
|
"eval_loss": 0.8575029969215393, |
|
"eval_runtime": 2.1375, |
|
"eval_samples_per_second": 174.967, |
|
"eval_steps_per_second": 11.228, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"learning_rate": 2.251256281407035e-06, |
|
"loss": 0.4209, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 6.86, |
|
"eval_accuracy": 0.8101603984832764, |
|
"eval_loss": 0.8572390675544739, |
|
"eval_runtime": 2.1484, |
|
"eval_samples_per_second": 174.083, |
|
"eval_steps_per_second": 11.171, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"learning_rate": 1.899497487437186e-06, |
|
"loss": 0.3983, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 7.35, |
|
"eval_accuracy": 0.8315507769584656, |
|
"eval_loss": 0.8391810655593872, |
|
"eval_runtime": 2.1399, |
|
"eval_samples_per_second": 174.771, |
|
"eval_steps_per_second": 11.215, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"learning_rate": 1.5477386934673368e-06, |
|
"loss": 0.4139, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"eval_accuracy": 0.8208556175231934, |
|
"eval_loss": 0.8151516318321228, |
|
"eval_runtime": 2.1644, |
|
"eval_samples_per_second": 172.797, |
|
"eval_steps_per_second": 11.089, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 1.1959798994974873e-06, |
|
"loss": 0.393, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"eval_accuracy": 0.8288770318031311, |
|
"eval_loss": 0.8261328935623169, |
|
"eval_runtime": 2.171, |
|
"eval_samples_per_second": 172.27, |
|
"eval_steps_per_second": 11.055, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 8.442211055276381e-07, |
|
"loss": 0.3979, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"eval_accuracy": 0.8235294222831726, |
|
"eval_loss": 0.8327566385269165, |
|
"eval_runtime": 2.1392, |
|
"eval_samples_per_second": 174.828, |
|
"eval_steps_per_second": 11.219, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 9.31, |
|
"learning_rate": 4.924623115577889e-07, |
|
"loss": 0.3928, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 9.31, |
|
"eval_accuracy": 0.8208556175231934, |
|
"eval_loss": 0.8364331126213074, |
|
"eval_runtime": 2.138, |
|
"eval_samples_per_second": 174.931, |
|
"eval_steps_per_second": 11.226, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 1.4070351758793969e-07, |
|
"loss": 0.3848, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"eval_accuracy": 0.8235294222831726, |
|
"eval_loss": 0.8322352170944214, |
|
"eval_runtime": 2.142, |
|
"eval_samples_per_second": 174.602, |
|
"eval_steps_per_second": 11.204, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 2040, |
|
"total_flos": 1.517670726457344e+16, |
|
"train_loss": 0.5818920486113605, |
|
"train_runtime": 787.9964, |
|
"train_samples_per_second": 41.333, |
|
"train_steps_per_second": 2.589 |
|
} |
|
], |
|
"max_steps": 2040, |
|
"num_train_epochs": 10, |
|
"total_flos": 1.517670726457344e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|