|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.4784151389710232, |
|
"global_step": 5000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.9546619357382224e-05, |
|
"loss": 7.0936, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.905381431105855e-05, |
|
"loss": 4.7571, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 4.856100926473487e-05, |
|
"loss": 4.3059, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 4.80682042184112e-05, |
|
"loss": 3.9402, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.7575399172087526e-05, |
|
"loss": 4.5039, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.708259412576385e-05, |
|
"loss": 5.0274, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.658978907944018e-05, |
|
"loss": 4.4826, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 4.60969840331165e-05, |
|
"loss": 4.2794, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.560417898679283e-05, |
|
"loss": 4.1005, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 4.5111373940469154e-05, |
|
"loss": 3.6633, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"eval_cer": 1.0385071558472365, |
|
"eval_loss": 3.3380627632141113, |
|
"eval_runtime": 108.6612, |
|
"eval_samples_per_second": 13.832, |
|
"eval_steps_per_second": 3.46, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 4.461856889414548e-05, |
|
"loss": 3.2821, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.41257638478218e-05, |
|
"loss": 3.1914, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.363295880149813e-05, |
|
"loss": 3.0884, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.3140153755174456e-05, |
|
"loss": 2.9599, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.264734870885078e-05, |
|
"loss": 2.8756, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.215454366252711e-05, |
|
"loss": 2.7076, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 4.166173861620343e-05, |
|
"loss": 2.6674, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.116893356987976e-05, |
|
"loss": 2.5175, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.0676128523556084e-05, |
|
"loss": 2.5046, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 4.018332347723241e-05, |
|
"loss": 2.3915, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"eval_cer": 0.6388866924962442, |
|
"eval_loss": 2.358058214187622, |
|
"eval_runtime": 91.2055, |
|
"eval_samples_per_second": 16.479, |
|
"eval_steps_per_second": 4.123, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 3.969051843090874e-05, |
|
"loss": 2.2984, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.919771338458506e-05, |
|
"loss": 2.2398, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 3.8704908338261386e-05, |
|
"loss": 2.1845, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 3.821210329193771e-05, |
|
"loss": 2.0596, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 3.771929824561404e-05, |
|
"loss": 1.9458, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 3.722649319929036e-05, |
|
"loss": 1.8901, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 3.673368815296669e-05, |
|
"loss": 1.8351, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 3.6240883106643014e-05, |
|
"loss": 1.6854, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 3.574807806031934e-05, |
|
"loss": 1.6954, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.525527301399567e-05, |
|
"loss": 1.5061, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"eval_cer": 0.47789989720882425, |
|
"eval_loss": 1.4634768962860107, |
|
"eval_runtime": 93.897, |
|
"eval_samples_per_second": 16.007, |
|
"eval_steps_per_second": 4.004, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.476246796767199e-05, |
|
"loss": 1.5024, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 3.4269662921348316e-05, |
|
"loss": 1.5311, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.377685787502464e-05, |
|
"loss": 1.334, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 3.328405282870097e-05, |
|
"loss": 1.2053, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 3.27912477823773e-05, |
|
"loss": 1.1355, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 3.229844273605362e-05, |
|
"loss": 1.0514, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 3.1805637689729944e-05, |
|
"loss": 0.9352, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 3.131283264340627e-05, |
|
"loss": 0.8822, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 3.08200275970826e-05, |
|
"loss": 0.9451, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 3.0327222550758922e-05, |
|
"loss": 0.8043, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"eval_cer": 0.34268996599984186, |
|
"eval_loss": 0.826034426689148, |
|
"eval_runtime": 95.3049, |
|
"eval_samples_per_second": 15.77, |
|
"eval_steps_per_second": 3.945, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 2.9834417504435245e-05, |
|
"loss": 0.8479, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 2.9341612458111573e-05, |
|
"loss": 0.7237, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 2.88488074117879e-05, |
|
"loss": 0.6919, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 2.8356002365464223e-05, |
|
"loss": 0.6698, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 2.786319731914055e-05, |
|
"loss": 0.5657, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 2.7370392272816874e-05, |
|
"loss": 0.6165, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 2.68775872264932e-05, |
|
"loss": 0.6387, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 2.6384782180169525e-05, |
|
"loss": 0.5372, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 2.5891977133845852e-05, |
|
"loss": 0.5254, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 2.539917208752218e-05, |
|
"loss": 0.46, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"eval_cer": 0.22416383332015496, |
|
"eval_loss": 0.5074146389961243, |
|
"eval_runtime": 96.0964, |
|
"eval_samples_per_second": 15.641, |
|
"eval_steps_per_second": 3.913, |
|
"step": 5000 |
|
} |
|
], |
|
"max_steps": 10146, |
|
"num_train_epochs": 3, |
|
"total_flos": 3.621264453794857e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|