ashrafulparan's picture
Upload folder using huggingface_hub
4e59e0b verified
raw
history blame
2.67 kB
{
"best_metric": 0.5780478119850159,
"best_model_checkpoint": "whisper-reg-ben/checkpoint-5394",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 5394,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.19,
"grad_norm": 3.480727434158325,
"learning_rate": 4.623158292406498e-05,
"loss": 0.2145,
"step": 500
},
{
"epoch": 0.37,
"grad_norm": 5.088292121887207,
"learning_rate": 4.151870041556479e-05,
"loss": 0.2333,
"step": 1000
},
{
"epoch": 0.56,
"grad_norm": 3.8999531269073486,
"learning_rate": 3.679637325273895e-05,
"loss": 0.2302,
"step": 1500
},
{
"epoch": 0.74,
"grad_norm": 3.2788655757904053,
"learning_rate": 3.2074046089913114e-05,
"loss": 0.2293,
"step": 2000
},
{
"epoch": 0.93,
"grad_norm": 2.2892401218414307,
"learning_rate": 2.735171892708727e-05,
"loss": 0.2166,
"step": 2500
},
{
"epoch": 1.0,
"eval_cer": 0.3217996656894684,
"eval_loss": 0.2115698903799057,
"eval_runtime": 9048.1139,
"eval_samples_per_second": 0.298,
"eval_steps_per_second": 0.075,
"eval_wer": 0.6381270885467529,
"step": 2697
},
{
"epoch": 1.11,
"grad_norm": 3.279925584793091,
"learning_rate": 2.262939176426143e-05,
"loss": 0.1708,
"step": 3000
},
{
"epoch": 1.3,
"grad_norm": 2.526437997817993,
"learning_rate": 1.790706460143559e-05,
"loss": 0.1369,
"step": 3500
},
{
"epoch": 1.48,
"grad_norm": 3.6526429653167725,
"learning_rate": 1.3184737438609747e-05,
"loss": 0.1288,
"step": 4000
},
{
"epoch": 1.67,
"grad_norm": 2.9243693351745605,
"learning_rate": 8.462410275783907e-06,
"loss": 0.1239,
"step": 4500
},
{
"epoch": 1.85,
"grad_norm": 3.0473432540893555,
"learning_rate": 3.749527767283718e-06,
"loss": 0.1149,
"step": 5000
},
{
"epoch": 2.0,
"eval_cer": 0.28478333353996277,
"eval_loss": 0.18326056003570557,
"eval_runtime": 9056.1912,
"eval_samples_per_second": 0.298,
"eval_steps_per_second": 0.075,
"eval_wer": 0.5780478119850159,
"step": 5394
}
],
"logging_steps": 500,
"max_steps": 5394,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 6.2247870849024e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}