|
{ |
|
"best_metric": 21.483863040629096, |
|
"best_model_checkpoint": "./att1/checkpoint-11000", |
|
"epoch": 1.4468333333333334, |
|
"global_step": 11000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.9e-06, |
|
"loss": 0.6056, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.569565217391306e-06, |
|
"loss": 0.5195, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_loss": 0.4892498850822449, |
|
"eval_runtime": 5699.9901, |
|
"eval_samples_per_second": 0.585, |
|
"eval_steps_per_second": 0.037, |
|
"eval_wer": 26.316759501965926, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.134782608695652e-06, |
|
"loss": 0.494, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 8.700000000000001e-06, |
|
"loss": 0.4744, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_loss": 0.45853909850120544, |
|
"eval_runtime": 5781.8818, |
|
"eval_samples_per_second": 0.576, |
|
"eval_steps_per_second": 0.036, |
|
"eval_wer": 25.868283093053734, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 8.26608695652174e-06, |
|
"loss": 0.4652, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 7.831304347826087e-06, |
|
"loss": 0.4449, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_loss": 0.4347614645957947, |
|
"eval_runtime": 5767.4751, |
|
"eval_samples_per_second": 0.578, |
|
"eval_steps_per_second": 0.036, |
|
"eval_wer": 24.18086500655308, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 7.396521739130436e-06, |
|
"loss": 0.4431, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 6.961739130434784e-06, |
|
"loss": 0.4267, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_loss": 0.41802194714546204, |
|
"eval_runtime": 5718.7598, |
|
"eval_samples_per_second": 0.583, |
|
"eval_steps_per_second": 0.037, |
|
"eval_wer": 23.638188073394495, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 6.526956521739131e-06, |
|
"loss": 0.4231, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 6.092173913043479e-06, |
|
"loss": 0.4163, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_loss": 0.4041951596736908, |
|
"eval_runtime": 6102.971, |
|
"eval_samples_per_second": 0.546, |
|
"eval_steps_per_second": 0.034, |
|
"eval_wer": 23.937172346002622, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 5.6573913043478265e-06, |
|
"loss": 0.4094, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 5.222608695652175e-06, |
|
"loss": 0.3171, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"eval_loss": 0.4081740379333496, |
|
"eval_runtime": 5663.9498, |
|
"eval_samples_per_second": 0.588, |
|
"eval_steps_per_second": 0.037, |
|
"eval_wer": 23.087319790301443, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 4.787826086956522e-06, |
|
"loss": 0.273, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 4.35304347826087e-06, |
|
"loss": 0.2763, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"eval_loss": 0.4000445604324341, |
|
"eval_runtime": 5952.601, |
|
"eval_samples_per_second": 0.56, |
|
"eval_steps_per_second": 0.035, |
|
"eval_wer": 22.94601900393185, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 3.918260869565217e-06, |
|
"loss": 0.2727, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 3.4843478260869567e-06, |
|
"loss": 0.279, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_loss": 0.39390885829925537, |
|
"eval_runtime": 5726.1575, |
|
"eval_samples_per_second": 0.582, |
|
"eval_steps_per_second": 0.036, |
|
"eval_wer": 22.493446920052424, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 3.049565217391305e-06, |
|
"loss": 0.2759, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 2.6147826086956524e-06, |
|
"loss": 0.2841, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"eval_loss": 0.38072848320007324, |
|
"eval_runtime": 6058.2634, |
|
"eval_samples_per_second": 0.55, |
|
"eval_steps_per_second": 0.034, |
|
"eval_wer": 22.52211664482307, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 2.1800000000000003e-06, |
|
"loss": 0.2712, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 1.745217391304348e-06, |
|
"loss": 0.27, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"eval_loss": 0.37836796045303345, |
|
"eval_runtime": 5966.0536, |
|
"eval_samples_per_second": 0.558, |
|
"eval_steps_per_second": 0.035, |
|
"eval_wer": 21.907765399737876, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 1.311304347826087e-06, |
|
"loss": 0.2869, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 8.773913043478262e-07, |
|
"loss": 0.2646, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"eval_loss": 0.3694765865802765, |
|
"eval_runtime": 5884.5805, |
|
"eval_samples_per_second": 0.566, |
|
"eval_steps_per_second": 0.036, |
|
"eval_wer": 21.483863040629096, |
|
"step": 11000 |
|
} |
|
], |
|
"max_steps": 12000, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 3.736950850289664e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|