|
{ |
|
"best_metric": 6.531481563650885, |
|
"best_model_checkpoint": "whisper-stakwork-crypto/checkpoint-150", |
|
"epoch": 38.0025, |
|
"global_step": 800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.0000000000000004e-08, |
|
"loss": 0.8208, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 8e-08, |
|
"loss": 0.8146, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.3e-07, |
|
"loss": 0.7898, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 1.8e-07, |
|
"loss": 0.7761, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 2.3e-07, |
|
"loss": 0.7348, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"eval_loss": 0.7377644777297974, |
|
"eval_runtime": 118.0826, |
|
"eval_samples_per_second": 2.32, |
|
"eval_steps_per_second": 0.152, |
|
"eval_wer": 9.30913608732307, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 2.8e-07, |
|
"loss": 0.7129, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 3.01, |
|
"learning_rate": 3.3e-07, |
|
"loss": 0.6506, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.02, |
|
"learning_rate": 3.8000000000000007e-07, |
|
"loss": 0.6193, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.01, |
|
"learning_rate": 4.3e-07, |
|
"loss": 0.574, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 4.800000000000001e-07, |
|
"loss": 0.5276, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"eval_loss": 0.5290005207061768, |
|
"eval_runtime": 121.5574, |
|
"eval_samples_per_second": 2.254, |
|
"eval_steps_per_second": 0.148, |
|
"eval_wer": 8.612503882504326, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 5.3e-07, |
|
"loss": 0.4652, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.02, |
|
"learning_rate": 5.8e-07, |
|
"loss": 0.4235, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 6.3e-07, |
|
"loss": 0.3972, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 6.02, |
|
"learning_rate": 6.8e-07, |
|
"loss": 0.3675, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 7.300000000000001e-07, |
|
"loss": 0.3585, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 0.3874845504760742, |
|
"eval_runtime": 130.7132, |
|
"eval_samples_per_second": 2.096, |
|
"eval_steps_per_second": 0.138, |
|
"eval_wer": 6.531481563650885, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 7.02, |
|
"learning_rate": 7.8e-07, |
|
"loss": 0.3306, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 8.3e-07, |
|
"loss": 0.3255, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 8.02, |
|
"learning_rate": 8.8e-07, |
|
"loss": 0.307, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 9.3e-07, |
|
"loss": 0.3035, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"learning_rate": 9.8e-07, |
|
"loss": 0.2924, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 9.01, |
|
"eval_loss": 0.35476040840148926, |
|
"eval_runtime": 120.7053, |
|
"eval_samples_per_second": 2.27, |
|
"eval_steps_per_second": 0.149, |
|
"eval_wer": 6.677907441096862, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 1.03e-06, |
|
"loss": 0.2826, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 10.01, |
|
"learning_rate": 1.08e-06, |
|
"loss": 0.267, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 10.03, |
|
"learning_rate": 1.13e-06, |
|
"loss": 0.27, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 11.01, |
|
"learning_rate": 1.18e-06, |
|
"loss": 0.2492, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 11.02, |
|
"learning_rate": 1.2299999999999999e-06, |
|
"loss": 0.2506, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 11.02, |
|
"eval_loss": 0.33637768030166626, |
|
"eval_runtime": 118.1569, |
|
"eval_samples_per_second": 2.319, |
|
"eval_steps_per_second": 0.152, |
|
"eval_wer": 6.788836136131694, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 12.01, |
|
"learning_rate": 1.28e-06, |
|
"loss": 0.2263, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 12.02, |
|
"learning_rate": 1.3300000000000002e-06, |
|
"loss": 0.23, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 13.01, |
|
"learning_rate": 1.3800000000000001e-06, |
|
"loss": 0.2102, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 13.02, |
|
"learning_rate": 1.43e-06, |
|
"loss": 0.2093, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 14.01, |
|
"learning_rate": 1.48e-06, |
|
"loss": 0.1946, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 14.01, |
|
"eval_loss": 0.32616764307022095, |
|
"eval_runtime": 119.3365, |
|
"eval_samples_per_second": 2.296, |
|
"eval_steps_per_second": 0.151, |
|
"eval_wer": 7.148245108044549, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 14.02, |
|
"learning_rate": 1.53e-06, |
|
"loss": 0.1848, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 15.01, |
|
"learning_rate": 1.58e-06, |
|
"loss": 0.1752, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 15.02, |
|
"learning_rate": 1.63e-06, |
|
"loss": 0.1659, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 1.6800000000000002e-06, |
|
"loss": 0.1579, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 16.02, |
|
"learning_rate": 1.73e-06, |
|
"loss": 0.1411, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 16.02, |
|
"eval_loss": 0.33289629220962524, |
|
"eval_runtime": 117.7955, |
|
"eval_samples_per_second": 2.326, |
|
"eval_steps_per_second": 0.153, |
|
"eval_wer": 7.210365177264055, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.7800000000000001e-06, |
|
"loss": 0.1374, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 17.02, |
|
"learning_rate": 1.83e-06, |
|
"loss": 0.1223, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.8800000000000002e-06, |
|
"loss": 0.1173, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 18.02, |
|
"learning_rate": 1.93e-06, |
|
"loss": 0.1034, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 1.98e-06, |
|
"loss": 0.1005, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_loss": 0.34216001629829407, |
|
"eval_runtime": 121.3922, |
|
"eval_samples_per_second": 2.257, |
|
"eval_steps_per_second": 0.148, |
|
"eval_wer": 7.556462705772729, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 19.01, |
|
"learning_rate": 2.03e-06, |
|
"loss": 0.0894, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 19.03, |
|
"learning_rate": 2.08e-06, |
|
"loss": 0.0786, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 20.01, |
|
"learning_rate": 2.13e-06, |
|
"loss": 0.0694, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 20.02, |
|
"learning_rate": 2.1800000000000003e-06, |
|
"loss": 0.066, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 21.01, |
|
"learning_rate": 2.23e-06, |
|
"loss": 0.0535, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 21.01, |
|
"eval_loss": 0.35322892665863037, |
|
"eval_runtime": 120.2632, |
|
"eval_samples_per_second": 2.278, |
|
"eval_steps_per_second": 0.15, |
|
"eval_wer": 7.179305142654302, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 21.02, |
|
"learning_rate": 2.28e-06, |
|
"loss": 0.0494, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 22.01, |
|
"learning_rate": 2.3299999999999997e-06, |
|
"loss": 0.043, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 22.02, |
|
"learning_rate": 2.38e-06, |
|
"loss": 0.0359, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 23.01, |
|
"learning_rate": 2.43e-06, |
|
"loss": 0.0311, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 23.02, |
|
"learning_rate": 2.48e-06, |
|
"loss": 0.0259, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 23.02, |
|
"eval_loss": 0.34564533829689026, |
|
"eval_runtime": 119.3022, |
|
"eval_samples_per_second": 2.297, |
|
"eval_steps_per_second": 0.151, |
|
"eval_wer": 7.512091227758797, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 24.01, |
|
"learning_rate": 2.5300000000000003e-06, |
|
"loss": 0.0222, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 24.02, |
|
"learning_rate": 2.58e-06, |
|
"loss": 0.0186, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 25.01, |
|
"learning_rate": 2.6300000000000002e-06, |
|
"loss": 0.0154, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 25.02, |
|
"learning_rate": 2.68e-06, |
|
"loss": 0.0154, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 2.73e-06, |
|
"loss": 0.0137, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"eval_loss": 0.3586655259132385, |
|
"eval_runtime": 121.9747, |
|
"eval_samples_per_second": 2.246, |
|
"eval_steps_per_second": 0.148, |
|
"eval_wer": 7.654079957403381, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 26.02, |
|
"learning_rate": 2.78e-06, |
|
"loss": 0.0121, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 2.83e-06, |
|
"loss": 0.0113, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 27.02, |
|
"learning_rate": 2.88e-06, |
|
"loss": 0.0103, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 2.9300000000000003e-06, |
|
"loss": 0.0112, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 28.02, |
|
"learning_rate": 2.98e-06, |
|
"loss": 0.0078, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 28.02, |
|
"eval_loss": 0.3590793013572693, |
|
"eval_runtime": 125.6972, |
|
"eval_samples_per_second": 2.18, |
|
"eval_steps_per_second": 0.143, |
|
"eval_wer": 7.352353906908639, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 2.91e-06, |
|
"loss": 0.0074, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 29.01, |
|
"learning_rate": 2.7600000000000003e-06, |
|
"loss": 0.0065, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 29.03, |
|
"learning_rate": 2.61e-06, |
|
"loss": 0.0061, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 30.01, |
|
"learning_rate": 2.4599999999999997e-06, |
|
"loss": 0.0053, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 30.02, |
|
"learning_rate": 2.31e-06, |
|
"loss": 0.0041, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 30.02, |
|
"eval_loss": 0.3672121465206146, |
|
"eval_runtime": 119.005, |
|
"eval_samples_per_second": 2.302, |
|
"eval_steps_per_second": 0.151, |
|
"eval_wer": 7.303545281093313, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 31.01, |
|
"learning_rate": 2.16e-06, |
|
"loss": 0.0035, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 31.02, |
|
"learning_rate": 2.0100000000000002e-06, |
|
"loss": 0.0032, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 32.01, |
|
"learning_rate": 1.86e-06, |
|
"loss": 0.0029, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 32.02, |
|
"learning_rate": 1.71e-06, |
|
"loss": 0.0028, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 33.01, |
|
"learning_rate": 1.56e-06, |
|
"loss": 0.0026, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 33.01, |
|
"eval_loss": 0.3961912989616394, |
|
"eval_runtime": 131.1922, |
|
"eval_samples_per_second": 2.089, |
|
"eval_steps_per_second": 0.137, |
|
"eval_wer": 7.321293872298887, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 33.02, |
|
"learning_rate": 1.41e-06, |
|
"loss": 0.0026, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 34.01, |
|
"learning_rate": 1.26e-06, |
|
"loss": 0.0024, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 34.02, |
|
"learning_rate": 1.11e-06, |
|
"loss": 0.0024, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 35.01, |
|
"learning_rate": 9.600000000000001e-07, |
|
"loss": 0.0026, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 35.02, |
|
"learning_rate": 8.100000000000001e-07, |
|
"loss": 0.0022, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 35.02, |
|
"eval_loss": 0.399670273065567, |
|
"eval_runtime": 119.4156, |
|
"eval_samples_per_second": 2.295, |
|
"eval_steps_per_second": 0.151, |
|
"eval_wer": 7.352353906908639, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 36.01, |
|
"learning_rate": 6.6e-07, |
|
"loss": 0.0023, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 36.02, |
|
"learning_rate": 5.100000000000001e-07, |
|
"loss": 0.0021, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 3.6e-07, |
|
"loss": 0.0022, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 37.02, |
|
"learning_rate": 2.1000000000000003e-07, |
|
"loss": 0.0021, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 6.000000000000001e-08, |
|
"loss": 0.0022, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"eval_loss": 0.4024691879749298, |
|
"eval_runtime": 131.5096, |
|
"eval_samples_per_second": 2.083, |
|
"eval_steps_per_second": 0.137, |
|
"eval_wer": 7.330168167901673, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"step": 800, |
|
"total_flos": 2.570087278116864e+19, |
|
"train_loss": 0.18918936033500358, |
|
"train_runtime": 13916.8243, |
|
"train_samples_per_second": 1.84, |
|
"train_steps_per_second": 0.057 |
|
} |
|
], |
|
"max_steps": 800, |
|
"num_train_epochs": 9223372036854775807, |
|
"total_flos": 2.570087278116864e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|