|
{ |
|
"best_metric": 49.03147699757869, |
|
"best_model_checkpoint": "./checkpoint-400", |
|
"epoch": 11.894117647058824, |
|
"global_step": 500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 2.666666666666667e-06, |
|
"loss": 4.3134, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 6e-06, |
|
"loss": 2.9329, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 9.333333333333334e-06, |
|
"loss": 2.0584, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 9.829787234042554e-06, |
|
"loss": 1.566, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 9.617021276595745e-06, |
|
"loss": 1.3777, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 9.404255319148937e-06, |
|
"loss": 1.1469, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 9.191489361702128e-06, |
|
"loss": 1.0638, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 8.97872340425532e-06, |
|
"loss": 0.9974, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 8.765957446808512e-06, |
|
"loss": 0.9615, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 8.553191489361703e-06, |
|
"loss": 0.8262, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"eval_cer": 149.33157314260887, |
|
"eval_loss": 0.8188337683677673, |
|
"eval_runtime": 459.9395, |
|
"eval_samples_per_second": 1.113, |
|
"eval_steps_per_second": 0.139, |
|
"eval_wer": 146.85230024213075, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 8.340425531914894e-06, |
|
"loss": 0.7986, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 8.127659574468085e-06, |
|
"loss": 0.7707, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 7.914893617021278e-06, |
|
"loss": 0.7623, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 7.702127659574469e-06, |
|
"loss": 0.6451, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.56, |
|
"learning_rate": 7.48936170212766e-06, |
|
"loss": 0.6205, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"learning_rate": 7.2765957446808524e-06, |
|
"loss": 0.6097, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"learning_rate": 7.0638297872340434e-06, |
|
"loss": 0.6217, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.28, |
|
"learning_rate": 6.8510638297872344e-06, |
|
"loss": 0.4584, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 6.6382978723404254e-06, |
|
"loss": 0.4929, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"learning_rate": 6.425531914893618e-06, |
|
"loss": 0.4843, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.75, |
|
"eval_cer": 22.099862972494236, |
|
"eval_loss": 0.6699215173721313, |
|
"eval_runtime": 1062.3991, |
|
"eval_samples_per_second": 0.482, |
|
"eval_steps_per_second": 0.06, |
|
"eval_wer": 52.66343825665859, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 6.212765957446809e-06, |
|
"loss": 0.4678, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 5.24, |
|
"learning_rate": 6e-06, |
|
"loss": 0.4014, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 5.47, |
|
"learning_rate": 5.787234042553191e-06, |
|
"loss": 0.3926, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 5.574468085106384e-06, |
|
"loss": 0.3806, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 5.94, |
|
"learning_rate": 5.361702127659575e-06, |
|
"loss": 0.3855, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 6.19, |
|
"learning_rate": 5.148936170212766e-06, |
|
"loss": 0.3478, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 6.42, |
|
"learning_rate": 4.936170212765958e-06, |
|
"loss": 0.3078, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 4.7234042553191496e-06, |
|
"loss": 0.3147, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 6.89, |
|
"learning_rate": 4.5106382978723406e-06, |
|
"loss": 0.3213, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"learning_rate": 4.297872340425532e-06, |
|
"loss": 0.287, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.14, |
|
"eval_cer": 20.677784833394604, |
|
"eval_loss": 0.6913720369338989, |
|
"eval_runtime": 1003.603, |
|
"eval_samples_per_second": 0.51, |
|
"eval_steps_per_second": 0.064, |
|
"eval_wer": 50.13619854721549, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 4.085106382978723e-06, |
|
"loss": 0.2602, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 7.61, |
|
"learning_rate": 3.872340425531915e-06, |
|
"loss": 0.2612, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"learning_rate": 3.6595744680851063e-06, |
|
"loss": 0.2618, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 3.446808510638298e-06, |
|
"loss": 0.2494, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 3.23404255319149e-06, |
|
"loss": 0.2123, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 8.56, |
|
"learning_rate": 3.021276595744681e-06, |
|
"loss": 0.2173, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 8.8, |
|
"learning_rate": 2.808510638297873e-06, |
|
"loss": 0.2093, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 2.595744680851064e-06, |
|
"loss": 0.2209, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"learning_rate": 2.3829787234042557e-06, |
|
"loss": 0.1742, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 9.52, |
|
"learning_rate": 2.170212765957447e-06, |
|
"loss": 0.1828, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.52, |
|
"eval_cer": 20.460546104742487, |
|
"eval_loss": 0.7369570136070251, |
|
"eval_runtime": 1004.0546, |
|
"eval_samples_per_second": 0.51, |
|
"eval_steps_per_second": 0.064, |
|
"eval_wer": 49.03147699757869, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 1.9574468085106385e-06, |
|
"loss": 0.1902, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 9.99, |
|
"learning_rate": 1.74468085106383e-06, |
|
"loss": 0.1809, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 10.24, |
|
"learning_rate": 1.5319148936170214e-06, |
|
"loss": 0.1705, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 10.47, |
|
"learning_rate": 1.3191489361702128e-06, |
|
"loss": 0.1593, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 10.71, |
|
"learning_rate": 1.1063829787234042e-06, |
|
"loss": 0.1596, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 10.94, |
|
"learning_rate": 8.936170212765959e-07, |
|
"loss": 0.161, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 11.19, |
|
"learning_rate": 6.808510638297873e-07, |
|
"loss": 0.1594, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 11.42, |
|
"learning_rate": 4.6808510638297873e-07, |
|
"loss": 0.1436, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 11.66, |
|
"learning_rate": 2.553191489361702e-07, |
|
"loss": 0.1517, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 11.89, |
|
"learning_rate": 4.2553191489361707e-08, |
|
"loss": 0.1493, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 11.89, |
|
"eval_cer": 20.8064570034424, |
|
"eval_loss": 0.7532366514205933, |
|
"eval_runtime": 925.6199, |
|
"eval_samples_per_second": 0.553, |
|
"eval_steps_per_second": 0.069, |
|
"eval_wer": 49.583837772397096, |
|
"step": 500 |
|
} |
|
], |
|
"max_steps": 500, |
|
"num_train_epochs": 12, |
|
"total_flos": 9.30774491652096e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|