|
{ |
|
"best_metric": 0.288705050945282, |
|
"best_model_checkpoint": "/scratch/skscla001/results/mms-1b-all-bem-genbed-m-model/checkpoint-3600", |
|
"epoch": 5.379310344827586, |
|
"eval_steps": 100, |
|
"global_step": 3900, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13793103448275862, |
|
"grad_norm": 4.020408630371094, |
|
"learning_rate": 0.00029099999999999997, |
|
"loss": 6.6677, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.13793103448275862, |
|
"eval_loss": 1.1907541751861572, |
|
"eval_runtime": 66.3884, |
|
"eval_samples_per_second": 14.596, |
|
"eval_steps_per_second": 1.838, |
|
"eval_wer": 0.9661625503209662, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.27586206896551724, |
|
"grad_norm": 2.7590324878692627, |
|
"learning_rate": 0.00029865588914549653, |
|
"loss": 0.7337, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.27586206896551724, |
|
"eval_loss": 0.41317346692085266, |
|
"eval_runtime": 66.0778, |
|
"eval_samples_per_second": 14.665, |
|
"eval_steps_per_second": 1.846, |
|
"eval_wer": 0.5657708628005658, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.41379310344827586, |
|
"grad_norm": 3.6684412956237793, |
|
"learning_rate": 0.000297270207852194, |
|
"loss": 0.5661, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.41379310344827586, |
|
"eval_loss": 0.3766968846321106, |
|
"eval_runtime": 65.3213, |
|
"eval_samples_per_second": 14.834, |
|
"eval_steps_per_second": 1.868, |
|
"eval_wer": 0.5507561745185507, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.5517241379310345, |
|
"grad_norm": 1.821001648902893, |
|
"learning_rate": 0.00029588452655889143, |
|
"loss": 0.523, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5517241379310345, |
|
"eval_loss": 0.3591341972351074, |
|
"eval_runtime": 66.145, |
|
"eval_samples_per_second": 14.65, |
|
"eval_steps_per_second": 1.844, |
|
"eval_wer": 0.5009248177565009, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.6896551724137931, |
|
"grad_norm": 1.441114068031311, |
|
"learning_rate": 0.0002944988452655889, |
|
"loss": 0.5391, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.6896551724137931, |
|
"eval_loss": 0.3529561161994934, |
|
"eval_runtime": 66.2528, |
|
"eval_samples_per_second": 14.626, |
|
"eval_steps_per_second": 1.841, |
|
"eval_wer": 0.5053857034055054, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.8275862068965517, |
|
"grad_norm": 2.5801000595092773, |
|
"learning_rate": 0.00029311316397228633, |
|
"loss": 0.4978, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.8275862068965517, |
|
"eval_loss": 0.3495703935623169, |
|
"eval_runtime": 66.263, |
|
"eval_samples_per_second": 14.624, |
|
"eval_steps_per_second": 1.841, |
|
"eval_wer": 0.5096289848765096, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.9655172413793104, |
|
"grad_norm": 2.353003978729248, |
|
"learning_rate": 0.00029172748267898384, |
|
"loss": 0.4737, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.9655172413793104, |
|
"eval_loss": 0.3478347957134247, |
|
"eval_runtime": 66.0122, |
|
"eval_samples_per_second": 14.679, |
|
"eval_steps_per_second": 1.848, |
|
"eval_wer": 0.5078881514525079, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.103448275862069, |
|
"grad_norm": 1.0043631792068481, |
|
"learning_rate": 0.00029034180138568123, |
|
"loss": 0.4878, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.103448275862069, |
|
"eval_loss": 0.33903008699417114, |
|
"eval_runtime": 65.851, |
|
"eval_samples_per_second": 14.715, |
|
"eval_steps_per_second": 1.853, |
|
"eval_wer": 0.4737242955064737, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.2413793103448276, |
|
"grad_norm": 0.7626993656158447, |
|
"learning_rate": 0.00028895612009237874, |
|
"loss": 0.4737, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.2413793103448276, |
|
"eval_loss": 0.3304341435432434, |
|
"eval_runtime": 65.8955, |
|
"eval_samples_per_second": 14.705, |
|
"eval_steps_per_second": 1.851, |
|
"eval_wer": 0.49047981721249045, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.3793103448275863, |
|
"grad_norm": 0.9640819430351257, |
|
"learning_rate": 0.0002875704387990762, |
|
"loss": 0.4603, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.3793103448275863, |
|
"eval_loss": 0.33281686902046204, |
|
"eval_runtime": 66.3, |
|
"eval_samples_per_second": 14.615, |
|
"eval_steps_per_second": 1.84, |
|
"eval_wer": 0.48416929605048414, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.5172413793103448, |
|
"grad_norm": 2.8595709800720215, |
|
"learning_rate": 0.0002861986143187067, |
|
"loss": 0.4793, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.5172413793103448, |
|
"eval_loss": 0.3252318799495697, |
|
"eval_runtime": 66.2152, |
|
"eval_samples_per_second": 14.634, |
|
"eval_steps_per_second": 1.842, |
|
"eval_wer": 0.46404090958546407, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.6551724137931034, |
|
"grad_norm": 0.949995219707489, |
|
"learning_rate": 0.0002848129330254041, |
|
"loss": 0.4359, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.6551724137931034, |
|
"eval_loss": 0.32520735263824463, |
|
"eval_runtime": 66.7065, |
|
"eval_samples_per_second": 14.526, |
|
"eval_steps_per_second": 1.829, |
|
"eval_wer": 0.4658905450984659, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.793103448275862, |
|
"grad_norm": 0.6196809411048889, |
|
"learning_rate": 0.0002834272517321016, |
|
"loss": 0.457, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.793103448275862, |
|
"eval_loss": 0.32037919759750366, |
|
"eval_runtime": 66.2729, |
|
"eval_samples_per_second": 14.621, |
|
"eval_steps_per_second": 1.841, |
|
"eval_wer": 0.4716570558154717, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 1.9310344827586206, |
|
"grad_norm": 1.259907603263855, |
|
"learning_rate": 0.0002820415704387991, |
|
"loss": 0.469, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 1.9310344827586206, |
|
"eval_loss": 0.3184985816478729, |
|
"eval_runtime": 66.6429, |
|
"eval_samples_per_second": 14.54, |
|
"eval_steps_per_second": 1.831, |
|
"eval_wer": 0.4696986182134697, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.0689655172413794, |
|
"grad_norm": 2.5777244567871094, |
|
"learning_rate": 0.00028065588914549653, |
|
"loss": 0.4894, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.0689655172413794, |
|
"eval_loss": 0.31666702032089233, |
|
"eval_runtime": 66.147, |
|
"eval_samples_per_second": 14.649, |
|
"eval_steps_per_second": 1.844, |
|
"eval_wer": 0.4621912740724622, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.206896551724138, |
|
"grad_norm": 1.6249274015426636, |
|
"learning_rate": 0.000279270207852194, |
|
"loss": 0.4386, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.206896551724138, |
|
"eval_loss": 0.3184942603111267, |
|
"eval_runtime": 66.4241, |
|
"eval_samples_per_second": 14.588, |
|
"eval_steps_per_second": 1.837, |
|
"eval_wer": 0.4645849200304646, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.344827586206897, |
|
"grad_norm": 3.3305695056915283, |
|
"learning_rate": 0.00027788452655889143, |
|
"loss": 0.4441, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.344827586206897, |
|
"eval_loss": 0.30993008613586426, |
|
"eval_runtime": 66.4734, |
|
"eval_samples_per_second": 14.577, |
|
"eval_steps_per_second": 1.835, |
|
"eval_wer": 0.4606680448264607, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.4827586206896552, |
|
"grad_norm": 8.44198989868164, |
|
"learning_rate": 0.0002764988452655889, |
|
"loss": 0.444, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.4827586206896552, |
|
"eval_loss": 0.315364807844162, |
|
"eval_runtime": 66.1431, |
|
"eval_samples_per_second": 14.65, |
|
"eval_steps_per_second": 1.844, |
|
"eval_wer": 0.4551191382874551, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.6206896551724137, |
|
"grad_norm": 2.8819918632507324, |
|
"learning_rate": 0.00027511316397228633, |
|
"loss": 0.4065, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.6206896551724137, |
|
"eval_loss": 0.3137612044811249, |
|
"eval_runtime": 66.3301, |
|
"eval_samples_per_second": 14.609, |
|
"eval_steps_per_second": 1.839, |
|
"eval_wer": 0.4623000761614623, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 2.7586206896551726, |
|
"grad_norm": 2.527022123336792, |
|
"learning_rate": 0.0002737274826789838, |
|
"loss": 0.4163, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.7586206896551726, |
|
"eval_loss": 0.3087407350540161, |
|
"eval_runtime": 66.4958, |
|
"eval_samples_per_second": 14.572, |
|
"eval_steps_per_second": 1.835, |
|
"eval_wer": 0.43771080404743773, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 2.896551724137931, |
|
"grad_norm": 4.25317907333374, |
|
"learning_rate": 0.0002723418013856813, |
|
"loss": 0.4518, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.896551724137931, |
|
"eval_loss": 0.3054434657096863, |
|
"eval_runtime": 66.3758, |
|
"eval_samples_per_second": 14.599, |
|
"eval_steps_per_second": 1.838, |
|
"eval_wer": 0.44946142965944946, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.0344827586206895, |
|
"grad_norm": 1.4464918375015259, |
|
"learning_rate": 0.00027095612009237874, |
|
"loss": 0.4208, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.0344827586206895, |
|
"eval_loss": 0.30368348956108093, |
|
"eval_runtime": 65.8792, |
|
"eval_samples_per_second": 14.709, |
|
"eval_steps_per_second": 1.852, |
|
"eval_wer": 0.4512022630834512, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.1724137931034484, |
|
"grad_norm": 1.0703141689300537, |
|
"learning_rate": 0.0002695704387990762, |
|
"loss": 0.381, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.1724137931034484, |
|
"eval_loss": 0.3074105381965637, |
|
"eval_runtime": 66.1774, |
|
"eval_samples_per_second": 14.642, |
|
"eval_steps_per_second": 1.844, |
|
"eval_wer": 0.43858122075943856, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.310344827586207, |
|
"grad_norm": 0.6348339319229126, |
|
"learning_rate": 0.00026818475750577364, |
|
"loss": 0.4203, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.310344827586207, |
|
"eval_loss": 0.29894039034843445, |
|
"eval_runtime": 66.3138, |
|
"eval_samples_per_second": 14.612, |
|
"eval_steps_per_second": 1.84, |
|
"eval_wer": 0.42443694918942443, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.4482758620689653, |
|
"grad_norm": 0.7910144925117493, |
|
"learning_rate": 0.0002668129330254041, |
|
"loss": 0.4556, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.4482758620689653, |
|
"eval_loss": 0.30804964900016785, |
|
"eval_runtime": 66.2991, |
|
"eval_samples_per_second": 14.616, |
|
"eval_steps_per_second": 1.84, |
|
"eval_wer": 0.4835164835164835, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.586206896551724, |
|
"grad_norm": 0.7089941501617432, |
|
"learning_rate": 0.0002654411085450346, |
|
"loss": 0.4143, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.586206896551724, |
|
"eval_loss": 0.29555052518844604, |
|
"eval_runtime": 66.1318, |
|
"eval_samples_per_second": 14.653, |
|
"eval_steps_per_second": 1.845, |
|
"eval_wer": 0.42215210532042213, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.7241379310344827, |
|
"grad_norm": 1.1959691047668457, |
|
"learning_rate": 0.00026405542725173206, |
|
"loss": 0.4055, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 3.7241379310344827, |
|
"eval_loss": 0.30229437351226807, |
|
"eval_runtime": 66.3475, |
|
"eval_samples_per_second": 14.605, |
|
"eval_steps_per_second": 1.839, |
|
"eval_wer": 0.45805679469045807, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 3.862068965517241, |
|
"grad_norm": 0.894882321357727, |
|
"learning_rate": 0.00026266974595842956, |
|
"loss": 0.4102, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 3.862068965517241, |
|
"eval_loss": 0.2955451011657715, |
|
"eval_runtime": 66.6438, |
|
"eval_samples_per_second": 14.54, |
|
"eval_steps_per_second": 1.831, |
|
"eval_wer": 0.44119247089544117, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 1.242213249206543, |
|
"learning_rate": 0.000261284064665127, |
|
"loss": 0.4451, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 0.29440462589263916, |
|
"eval_runtime": 66.0997, |
|
"eval_samples_per_second": 14.66, |
|
"eval_steps_per_second": 1.846, |
|
"eval_wer": 0.4181264280274181, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 4.137931034482759, |
|
"grad_norm": 4.585179328918457, |
|
"learning_rate": 0.00025989838337182446, |
|
"loss": 0.3857, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.137931034482759, |
|
"eval_loss": 0.29851463437080383, |
|
"eval_runtime": 66.1268, |
|
"eval_samples_per_second": 14.654, |
|
"eval_steps_per_second": 1.845, |
|
"eval_wer": 0.44260689805244263, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.275862068965517, |
|
"grad_norm": 2.503293991088867, |
|
"learning_rate": 0.0002585127020785219, |
|
"loss": 0.4071, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 4.275862068965517, |
|
"eval_loss": 0.29181334376335144, |
|
"eval_runtime": 66.2612, |
|
"eval_samples_per_second": 14.624, |
|
"eval_steps_per_second": 1.841, |
|
"eval_wer": 0.429006636927429, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 4.413793103448276, |
|
"grad_norm": 219.94293212890625, |
|
"learning_rate": 0.00025712702078521936, |
|
"loss": 0.4, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 4.413793103448276, |
|
"eval_loss": 0.2951139509677887, |
|
"eval_runtime": 66.077, |
|
"eval_samples_per_second": 14.665, |
|
"eval_steps_per_second": 1.846, |
|
"eval_wer": 0.43205309541943204, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 4.551724137931035, |
|
"grad_norm": 5.060625076293945, |
|
"learning_rate": 0.00025574133949191687, |
|
"loss": 0.4257, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 4.551724137931035, |
|
"eval_loss": 0.3035087585449219, |
|
"eval_runtime": 66.4299, |
|
"eval_samples_per_second": 14.587, |
|
"eval_steps_per_second": 1.837, |
|
"eval_wer": 0.4607768469154608, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 4.689655172413794, |
|
"grad_norm": 4.010824680328369, |
|
"learning_rate": 0.00025435565819861426, |
|
"loss": 0.3929, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 4.689655172413794, |
|
"eval_loss": 0.2965061366558075, |
|
"eval_runtime": 66.5423, |
|
"eval_samples_per_second": 14.562, |
|
"eval_steps_per_second": 1.833, |
|
"eval_wer": 0.4131215319334131, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 4.827586206896552, |
|
"grad_norm": 1.3017578125, |
|
"learning_rate": 0.00025296997690531177, |
|
"loss": 0.3957, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 4.827586206896552, |
|
"eval_loss": 0.2937914729118347, |
|
"eval_runtime": 66.4495, |
|
"eval_samples_per_second": 14.583, |
|
"eval_steps_per_second": 1.836, |
|
"eval_wer": 0.43966924164943966, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 4.9655172413793105, |
|
"grad_norm": 2.3724756240844727, |
|
"learning_rate": 0.0002515842956120092, |
|
"loss": 0.3974, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 4.9655172413793105, |
|
"eval_loss": 0.288705050945282, |
|
"eval_runtime": 66.1655, |
|
"eval_samples_per_second": 14.645, |
|
"eval_steps_per_second": 1.844, |
|
"eval_wer": 0.4119247089544119, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 5.103448275862069, |
|
"grad_norm": 1.0314807891845703, |
|
"learning_rate": 0.00025019861431870667, |
|
"loss": 0.3733, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 5.103448275862069, |
|
"eval_loss": 0.2889891266822815, |
|
"eval_runtime": 66.2632, |
|
"eval_samples_per_second": 14.624, |
|
"eval_steps_per_second": 1.841, |
|
"eval_wer": 0.404961375258405, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 5.241379310344827, |
|
"grad_norm": 1.2068380117416382, |
|
"learning_rate": 0.0002488129330254041, |
|
"loss": 0.382, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 5.241379310344827, |
|
"eval_loss": 0.29174748063087463, |
|
"eval_runtime": 66.0529, |
|
"eval_samples_per_second": 14.67, |
|
"eval_steps_per_second": 1.847, |
|
"eval_wer": 0.42334892829942333, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 5.379310344827586, |
|
"grad_norm": 1.330348014831543, |
|
"learning_rate": 0.00024742725173210157, |
|
"loss": 0.3669, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 5.379310344827586, |
|
"eval_loss": 0.2899990379810333, |
|
"eval_runtime": 66.5504, |
|
"eval_samples_per_second": 14.56, |
|
"eval_steps_per_second": 1.833, |
|
"eval_wer": 0.4312914807964313, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 5.379310344827586, |
|
"step": 3900, |
|
"total_flos": 9.379255341017438e+18, |
|
"train_loss": 0.6060099772917918, |
|
"train_runtime": 5080.4448, |
|
"train_samples_per_second": 17.124, |
|
"train_steps_per_second": 4.281 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 21750, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 30, |
|
"save_steps": 400, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 3, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.379255341017438e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|