{ "best_metric": 0.6288811564445496, "best_model_checkpoint": "/scratch/skscla001/results/w2v-bert-bem-natbed-combined-model/checkpoint-2400", "epoch": 7.509386733416771, "eval_steps": 200, "global_step": 3000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.2503128911138924, "grad_norm": 3.927278518676758, "learning_rate": 0.00029699999999999996, "loss": 2.3529, "step": 100 }, { "epoch": 0.5006257822277848, "grad_norm": 2.2314515113830566, "learning_rate": 0.00029749789385004213, "loss": 1.0078, "step": 200 }, { "epoch": 0.5006257822277848, "eval_loss": 0.9815022945404053, "eval_runtime": 61.0946, "eval_samples_per_second": 22.228, "eval_steps_per_second": 2.783, "eval_wer": 0.8200802555073294, "step": 200 }, { "epoch": 0.7509386733416771, "grad_norm": 2.754998207092285, "learning_rate": 0.0002949705139005897, "loss": 0.9989, "step": 300 }, { "epoch": 1.0012515644555695, "grad_norm": 3.1453473567962646, "learning_rate": 0.0002924431339511373, "loss": 0.8769, "step": 400 }, { "epoch": 1.0012515644555695, "eval_loss": 0.9823216199874878, "eval_runtime": 61.3287, "eval_samples_per_second": 22.143, "eval_steps_per_second": 2.772, "eval_wer": 1.0433215952829415, "step": 400 }, { "epoch": 1.2515644555694618, "grad_norm": 11.187702178955078, "learning_rate": 0.0002899157540016849, "loss": 0.8422, "step": 500 }, { "epoch": 1.5018773466833542, "grad_norm": 2.2132036685943604, "learning_rate": 0.0002873883740522325, "loss": 0.805, "step": 600 }, { "epoch": 1.5018773466833542, "eval_loss": 0.8306216597557068, "eval_runtime": 61.3405, "eval_samples_per_second": 22.139, "eval_steps_per_second": 2.771, "eval_wer": 0.8606174760461879, "step": 600 }, { "epoch": 1.7521902377972465, "grad_norm": 8.310944557189941, "learning_rate": 0.0002848609941027801, "loss": 0.7299, "step": 700 }, { "epoch": 2.002503128911139, "grad_norm": 0.9676926136016846, "learning_rate": 0.0002823336141533277, "loss": 0.8141, "step": 800 }, { "epoch": 2.002503128911139, "eval_loss": 0.7547941207885742, "eval_runtime": 61.8648, "eval_samples_per_second": 21.951, "eval_steps_per_second": 2.748, "eval_wer": 0.7195970845958561, "step": 800 }, { "epoch": 2.252816020025031, "grad_norm": Infinity, "learning_rate": 0.0002798315080033698, "loss": 0.7184, "step": 900 }, { "epoch": 2.5031289111389237, "grad_norm": 2.475212335586548, "learning_rate": 0.00027730412805391743, "loss": 0.7132, "step": 1000 }, { "epoch": 2.5031289111389237, "eval_loss": 0.7485368847846985, "eval_runtime": 65.846, "eval_samples_per_second": 20.624, "eval_steps_per_second": 2.582, "eval_wer": 0.6932274179018917, "step": 1000 }, { "epoch": 2.7534418022528158, "grad_norm": 12.120068550109863, "learning_rate": 0.000274776748104465, "loss": 0.706, "step": 1100 }, { "epoch": 3.0037546933667083, "grad_norm": 1.1044033765792847, "learning_rate": 0.0002722493681550126, "loss": 0.7058, "step": 1200 }, { "epoch": 3.0037546933667083, "eval_loss": 0.7280192375183105, "eval_runtime": 61.369, "eval_samples_per_second": 22.128, "eval_steps_per_second": 2.77, "eval_wer": 0.6916714437801982, "step": 1200 }, { "epoch": 3.254067584480601, "grad_norm": 1.5886726379394531, "learning_rate": 0.0002697219882055602, "loss": 0.6273, "step": 1300 }, { "epoch": 3.504380475594493, "grad_norm": 5.7870659828186035, "learning_rate": 0.0002671946082561078, "loss": 0.6563, "step": 1400 }, { "epoch": 3.504380475594493, "eval_loss": 0.7046008706092834, "eval_runtime": 61.6596, "eval_samples_per_second": 22.024, "eval_steps_per_second": 2.757, "eval_wer": 0.7045287036278765, "step": 1400 }, { "epoch": 3.7546933667083855, "grad_norm": 1.0691437721252441, "learning_rate": 0.0002646672283066554, "loss": 0.6538, "step": 1500 }, { "epoch": 4.005006257822278, "grad_norm": 3.255420684814453, "learning_rate": 0.000262139848357203, "loss": 0.6232, "step": 1600 }, { "epoch": 4.005006257822278, "eval_loss": 0.7186261415481567, "eval_runtime": 61.5315, "eval_samples_per_second": 22.07, "eval_steps_per_second": 2.763, "eval_wer": 0.7408893620506101, "step": 1600 }, { "epoch": 4.25531914893617, "grad_norm": 0.7094090580940247, "learning_rate": 0.00025961246840775057, "loss": 0.585, "step": 1700 }, { "epoch": 4.505632040050062, "grad_norm": 3.2348897457122803, "learning_rate": 0.0002570850884582982, "loss": 0.6093, "step": 1800 }, { "epoch": 4.505632040050062, "eval_loss": 0.7048168182373047, "eval_runtime": 62.5823, "eval_samples_per_second": 21.699, "eval_steps_per_second": 2.716, "eval_wer": 0.643436246007698, "step": 1800 }, { "epoch": 4.755944931163955, "grad_norm": 0.6349893808364868, "learning_rate": 0.0002545577085088458, "loss": 0.6006, "step": 1900 }, { "epoch": 5.006257822277847, "grad_norm": 1.0852922201156616, "learning_rate": 0.0002520303285593934, "loss": 0.5767, "step": 2000 }, { "epoch": 5.006257822277847, "eval_loss": 0.6520542502403259, "eval_runtime": 61.2377, "eval_samples_per_second": 22.176, "eval_steps_per_second": 2.776, "eval_wer": 0.6474490213741708, "step": 2000 }, { "epoch": 5.256570713391739, "grad_norm": 1.3806486129760742, "learning_rate": 0.000249502948609941, "loss": 0.5647, "step": 2100 }, { "epoch": 5.506883604505632, "grad_norm": 1.1985052824020386, "learning_rate": 0.0002469755686604886, "loss": 0.5628, "step": 2200 }, { "epoch": 5.506883604505632, "eval_loss": 0.6322329640388489, "eval_runtime": 61.1301, "eval_samples_per_second": 22.215, "eval_steps_per_second": 2.781, "eval_wer": 0.6017525182212758, "step": 2200 }, { "epoch": 5.7571964956195245, "grad_norm": 1.5096967220306396, "learning_rate": 0.0002444481887110362, "loss": 0.5281, "step": 2300 }, { "epoch": 6.007509386733417, "grad_norm": 1.2591967582702637, "learning_rate": 0.00024192080876158381, "loss": 0.5569, "step": 2400 }, { "epoch": 6.007509386733417, "eval_loss": 0.6288811564445496, "eval_runtime": 61.1464, "eval_samples_per_second": 22.209, "eval_steps_per_second": 2.78, "eval_wer": 0.6078126279583982, "step": 2400 }, { "epoch": 6.257822277847309, "grad_norm": 1.0905272960662842, "learning_rate": 0.0002393934288121314, "loss": 0.538, "step": 2500 }, { "epoch": 6.508135168961202, "grad_norm": 0.9892663359642029, "learning_rate": 0.000236866048862679, "loss": 0.5156, "step": 2600 }, { "epoch": 6.508135168961202, "eval_loss": 0.6504250764846802, "eval_runtime": 60.9759, "eval_samples_per_second": 22.271, "eval_steps_per_second": 2.788, "eval_wer": 0.6373761362705757, "step": 2600 }, { "epoch": 6.758448060075094, "grad_norm": 4.6128106117248535, "learning_rate": 0.0002343386689132266, "loss": 0.5022, "step": 2700 }, { "epoch": 7.008760951188986, "grad_norm": 1.0073637962341309, "learning_rate": 0.00023181128896377422, "loss": 0.5074, "step": 2800 }, { "epoch": 7.008760951188986, "eval_loss": 0.663825273513794, "eval_runtime": 60.8911, "eval_samples_per_second": 22.302, "eval_steps_per_second": 2.792, "eval_wer": 0.6222258619277701, "step": 2800 }, { "epoch": 7.259073842302879, "grad_norm": 1.6625531911849976, "learning_rate": 0.00022928390901432182, "loss": 0.478, "step": 2900 }, { "epoch": 7.509386733416771, "grad_norm": 4.11372184753418, "learning_rate": 0.0002267565290648694, "loss": 0.4906, "step": 3000 }, { "epoch": 7.509386733416771, "eval_loss": 0.6743665933609009, "eval_runtime": 61.0318, "eval_samples_per_second": 22.251, "eval_steps_per_second": 2.785, "eval_wer": 0.5884038981246417, "step": 3000 }, { "epoch": 7.509386733416771, "step": 3000, "total_flos": 9.634997827277466e+18, "train_loss": 0.7149289563496908, "train_runtime": 5175.4065, "train_samples_per_second": 37.023, "train_steps_per_second": 2.313 } ], "logging_steps": 100, "max_steps": 11970, "num_input_tokens_seen": 0, "num_train_epochs": 30, "save_steps": 200, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 3, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 3 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 9.634997827277466e+18, "train_batch_size": 8, "trial_name": null, "trial_params": null }