infinitejoy's picture
End of training
10e6825
raw
history blame
17.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 100.0,
"global_step": 12600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.79,
"learning_rate": 1.47e-05,
"loss": 9.8646,
"step": 100
},
{
"epoch": 1.59,
"learning_rate": 2.97e-05,
"loss": 4.237,
"step": 200
},
{
"epoch": 2.38,
"learning_rate": 4.4699999999999996e-05,
"loss": 3.3845,
"step": 300
},
{
"epoch": 3.17,
"learning_rate": 5.97e-05,
"loss": 3.1402,
"step": 400
},
{
"epoch": 3.97,
"learning_rate": 7.47e-05,
"loss": 3.0675,
"step": 500
},
{
"epoch": 4.76,
"learning_rate": 8.969999999999998e-05,
"loss": 2.9355,
"step": 600
},
{
"epoch": 5.56,
"learning_rate": 0.00010469999999999998,
"loss": 2.4044,
"step": 700
},
{
"epoch": 6.35,
"learning_rate": 0.0001197,
"loss": 1.8799,
"step": 800
},
{
"epoch": 7.14,
"learning_rate": 0.0001347,
"loss": 1.6588,
"step": 900
},
{
"epoch": 7.94,
"learning_rate": 0.00014969999999999998,
"loss": 1.5757,
"step": 1000
},
{
"epoch": 8.73,
"learning_rate": 0.0001647,
"loss": 1.5135,
"step": 1100
},
{
"epoch": 9.52,
"learning_rate": 0.00017969999999999998,
"loss": 1.4795,
"step": 1200
},
{
"epoch": 10.32,
"learning_rate": 0.0001947,
"loss": 1.4418,
"step": 1300
},
{
"epoch": 11.11,
"learning_rate": 0.00020969999999999997,
"loss": 1.4246,
"step": 1400
},
{
"epoch": 11.9,
"learning_rate": 0.0002247,
"loss": 1.4072,
"step": 1500
},
{
"epoch": 12.7,
"learning_rate": 0.0002397,
"loss": 1.3936,
"step": 1600
},
{
"epoch": 13.49,
"learning_rate": 0.00025455,
"loss": 1.3632,
"step": 1700
},
{
"epoch": 14.29,
"learning_rate": 0.00026954999999999997,
"loss": 1.3721,
"step": 1800
},
{
"epoch": 15.08,
"learning_rate": 0.00028455,
"loss": 1.369,
"step": 1900
},
{
"epoch": 15.87,
"learning_rate": 0.00029955,
"loss": 1.3677,
"step": 2000
},
{
"epoch": 15.87,
"eval_loss": 0.6431823968887329,
"eval_runtime": 111.1097,
"eval_samples_per_second": 16.92,
"eval_steps_per_second": 16.92,
"eval_wer": 0.6198062079159139,
"step": 2000
},
{
"epoch": 16.67,
"learning_rate": 0.000297254716981132,
"loss": 1.3547,
"step": 2100
},
{
"epoch": 17.46,
"learning_rate": 0.0002944245283018868,
"loss": 1.346,
"step": 2200
},
{
"epoch": 18.25,
"learning_rate": 0.0002915943396226415,
"loss": 1.3279,
"step": 2300
},
{
"epoch": 19.05,
"learning_rate": 0.0002887641509433962,
"loss": 1.3309,
"step": 2400
},
{
"epoch": 19.84,
"learning_rate": 0.0002859339622641509,
"loss": 1.3086,
"step": 2500
},
{
"epoch": 20.63,
"learning_rate": 0.00028310377358490565,
"loss": 1.2874,
"step": 2600
},
{
"epoch": 21.43,
"learning_rate": 0.00028027358490566036,
"loss": 1.2788,
"step": 2700
},
{
"epoch": 22.22,
"learning_rate": 0.00027744339622641506,
"loss": 1.2532,
"step": 2800
},
{
"epoch": 23.02,
"learning_rate": 0.00027461320754716977,
"loss": 1.2481,
"step": 2900
},
{
"epoch": 23.81,
"learning_rate": 0.00027178301886792447,
"loss": 1.24,
"step": 3000
},
{
"epoch": 24.6,
"learning_rate": 0.00026895283018867923,
"loss": 1.2139,
"step": 3100
},
{
"epoch": 25.4,
"learning_rate": 0.00026612264150943394,
"loss": 1.2218,
"step": 3200
},
{
"epoch": 26.19,
"learning_rate": 0.00026329245283018864,
"loss": 1.2139,
"step": 3300
},
{
"epoch": 26.98,
"learning_rate": 0.00026046226415094335,
"loss": 1.2061,
"step": 3400
},
{
"epoch": 27.78,
"learning_rate": 0.0002576320754716981,
"loss": 1.1801,
"step": 3500
},
{
"epoch": 28.57,
"learning_rate": 0.0002548018867924528,
"loss": 1.1654,
"step": 3600
},
{
"epoch": 29.37,
"learning_rate": 0.0002519716981132075,
"loss": 1.1647,
"step": 3700
},
{
"epoch": 30.16,
"learning_rate": 0.0002491415094339622,
"loss": 1.1677,
"step": 3800
},
{
"epoch": 30.95,
"learning_rate": 0.00024631132075471693,
"loss": 1.1487,
"step": 3900
},
{
"epoch": 31.75,
"learning_rate": 0.00024348113207547166,
"loss": 1.1379,
"step": 4000
},
{
"epoch": 31.75,
"eval_loss": 0.6196324229240417,
"eval_runtime": 111.1854,
"eval_samples_per_second": 16.909,
"eval_steps_per_second": 16.909,
"eval_wer": 0.5592051239940877,
"step": 4000
},
{
"epoch": 32.54,
"learning_rate": 0.0002406509433962264,
"loss": 1.1574,
"step": 4100
},
{
"epoch": 33.33,
"learning_rate": 0.0002378207547169811,
"loss": 1.1431,
"step": 4200
},
{
"epoch": 34.13,
"learning_rate": 0.00023499056603773583,
"loss": 1.1192,
"step": 4300
},
{
"epoch": 34.92,
"learning_rate": 0.00023216037735849054,
"loss": 1.1371,
"step": 4400
},
{
"epoch": 35.71,
"learning_rate": 0.00022933018867924527,
"loss": 1.1085,
"step": 4500
},
{
"epoch": 36.51,
"learning_rate": 0.00022649999999999998,
"loss": 1.109,
"step": 4600
},
{
"epoch": 37.3,
"learning_rate": 0.0002236698113207547,
"loss": 1.1021,
"step": 4700
},
{
"epoch": 38.1,
"learning_rate": 0.00022083962264150941,
"loss": 1.1072,
"step": 4800
},
{
"epoch": 38.89,
"learning_rate": 0.00021800943396226415,
"loss": 1.1001,
"step": 4900
},
{
"epoch": 39.68,
"learning_rate": 0.00021517924528301885,
"loss": 1.0972,
"step": 5000
},
{
"epoch": 40.48,
"learning_rate": 0.00021234905660377356,
"loss": 1.0675,
"step": 5100
},
{
"epoch": 41.27,
"learning_rate": 0.0002095188679245283,
"loss": 1.0626,
"step": 5200
},
{
"epoch": 42.06,
"learning_rate": 0.000206688679245283,
"loss": 1.0505,
"step": 5300
},
{
"epoch": 42.86,
"learning_rate": 0.00020385849056603773,
"loss": 1.0665,
"step": 5400
},
{
"epoch": 43.65,
"learning_rate": 0.00020102830188679243,
"loss": 1.0456,
"step": 5500
},
{
"epoch": 44.44,
"learning_rate": 0.00019819811320754717,
"loss": 1.0474,
"step": 5600
},
{
"epoch": 45.24,
"learning_rate": 0.00019536792452830187,
"loss": 1.0439,
"step": 5700
},
{
"epoch": 46.03,
"learning_rate": 0.0001925377358490566,
"loss": 1.0231,
"step": 5800
},
{
"epoch": 46.83,
"learning_rate": 0.0001897075471698113,
"loss": 1.0248,
"step": 5900
},
{
"epoch": 47.62,
"learning_rate": 0.00018687735849056604,
"loss": 1.0093,
"step": 6000
},
{
"epoch": 47.62,
"eval_loss": 0.5827558040618896,
"eval_runtime": 112.3252,
"eval_samples_per_second": 16.737,
"eval_steps_per_second": 16.737,
"eval_wer": 0.5117424864509772,
"step": 6000
},
{
"epoch": 48.41,
"learning_rate": 0.00018404716981132075,
"loss": 1.0113,
"step": 6100
},
{
"epoch": 49.21,
"learning_rate": 0.00018121698113207548,
"loss": 1.0139,
"step": 6200
},
{
"epoch": 50.0,
"learning_rate": 0.00017838679245283019,
"loss": 1.0,
"step": 6300
},
{
"epoch": 50.79,
"learning_rate": 0.0001755566037735849,
"loss": 1.0081,
"step": 6400
},
{
"epoch": 51.59,
"learning_rate": 0.00017272641509433962,
"loss": 0.9896,
"step": 6500
},
{
"epoch": 52.38,
"learning_rate": 0.00016989622641509433,
"loss": 0.997,
"step": 6600
},
{
"epoch": 53.17,
"learning_rate": 0.00016709433962264152,
"loss": 0.9755,
"step": 6700
},
{
"epoch": 53.97,
"learning_rate": 0.00016426415094339622,
"loss": 0.9592,
"step": 6800
},
{
"epoch": 54.76,
"learning_rate": 0.00016143396226415093,
"loss": 0.9542,
"step": 6900
},
{
"epoch": 55.56,
"learning_rate": 0.00015860377358490566,
"loss": 0.9563,
"step": 7000
},
{
"epoch": 56.35,
"learning_rate": 0.00015577358490566036,
"loss": 0.9531,
"step": 7100
},
{
"epoch": 57.14,
"learning_rate": 0.0001529433962264151,
"loss": 0.9386,
"step": 7200
},
{
"epoch": 57.94,
"learning_rate": 0.0001501132075471698,
"loss": 0.9424,
"step": 7300
},
{
"epoch": 58.73,
"learning_rate": 0.0001472830188679245,
"loss": 0.9233,
"step": 7400
},
{
"epoch": 59.52,
"learning_rate": 0.00014445283018867924,
"loss": 0.912,
"step": 7500
},
{
"epoch": 60.32,
"learning_rate": 0.00014162264150943395,
"loss": 0.9165,
"step": 7600
},
{
"epoch": 61.11,
"learning_rate": 0.00013879245283018868,
"loss": 0.9143,
"step": 7700
},
{
"epoch": 61.9,
"learning_rate": 0.00013596226415094338,
"loss": 0.9175,
"step": 7800
},
{
"epoch": 62.7,
"learning_rate": 0.0001331320754716981,
"loss": 0.8966,
"step": 7900
},
{
"epoch": 63.49,
"learning_rate": 0.00013030188679245282,
"loss": 0.8888,
"step": 8000
},
{
"epoch": 63.49,
"eval_loss": 0.5754138827323914,
"eval_runtime": 112.1089,
"eval_samples_per_second": 16.769,
"eval_steps_per_second": 16.769,
"eval_wer": 0.4822357256254448,
"step": 8000
},
{
"epoch": 64.29,
"learning_rate": 0.00012747169811320753,
"loss": 0.8999,
"step": 8100
},
{
"epoch": 65.08,
"learning_rate": 0.00012464150943396226,
"loss": 0.8865,
"step": 8200
},
{
"epoch": 65.87,
"learning_rate": 0.00012181132075471698,
"loss": 0.8739,
"step": 8300
},
{
"epoch": 66.67,
"learning_rate": 0.00011898113207547169,
"loss": 0.8812,
"step": 8400
},
{
"epoch": 67.46,
"learning_rate": 0.0001161509433962264,
"loss": 0.8808,
"step": 8500
},
{
"epoch": 68.25,
"learning_rate": 0.00011332075471698112,
"loss": 0.8658,
"step": 8600
},
{
"epoch": 69.05,
"learning_rate": 0.00011049056603773584,
"loss": 0.8582,
"step": 8700
},
{
"epoch": 69.84,
"learning_rate": 0.00010766037735849056,
"loss": 0.8524,
"step": 8800
},
{
"epoch": 70.63,
"learning_rate": 0.00010483018867924528,
"loss": 0.8402,
"step": 8900
},
{
"epoch": 71.43,
"learning_rate": 0.000102,
"loss": 0.8417,
"step": 9000
},
{
"epoch": 72.22,
"learning_rate": 9.919811320754716e-05,
"loss": 0.8386,
"step": 9100
},
{
"epoch": 73.02,
"learning_rate": 9.636792452830188e-05,
"loss": 0.8319,
"step": 9200
},
{
"epoch": 73.81,
"learning_rate": 9.35377358490566e-05,
"loss": 0.8271,
"step": 9300
},
{
"epoch": 74.6,
"learning_rate": 9.070754716981132e-05,
"loss": 0.8182,
"step": 9400
},
{
"epoch": 75.4,
"learning_rate": 8.787735849056603e-05,
"loss": 0.8223,
"step": 9500
},
{
"epoch": 76.19,
"learning_rate": 8.504716981132075e-05,
"loss": 0.8074,
"step": 9600
},
{
"epoch": 76.98,
"learning_rate": 8.221698113207547e-05,
"loss": 0.7932,
"step": 9700
},
{
"epoch": 77.78,
"learning_rate": 7.938679245283019e-05,
"loss": 0.8135,
"step": 9800
},
{
"epoch": 78.57,
"learning_rate": 7.655660377358491e-05,
"loss": 0.7849,
"step": 9900
},
{
"epoch": 79.37,
"learning_rate": 7.372641509433962e-05,
"loss": 0.7985,
"step": 10000
},
{
"epoch": 79.37,
"eval_loss": 0.5987282395362854,
"eval_runtime": 112.2997,
"eval_samples_per_second": 16.741,
"eval_steps_per_second": 16.741,
"eval_wer": 0.4690425357201511,
"step": 10000
},
{
"epoch": 80.16,
"learning_rate": 7.089622641509434e-05,
"loss": 0.7807,
"step": 10100
},
{
"epoch": 80.95,
"learning_rate": 6.806603773584905e-05,
"loss": 0.7713,
"step": 10200
},
{
"epoch": 81.75,
"learning_rate": 6.523584905660377e-05,
"loss": 0.7628,
"step": 10300
},
{
"epoch": 82.54,
"learning_rate": 6.243396226415093e-05,
"loss": 0.7789,
"step": 10400
},
{
"epoch": 83.33,
"learning_rate": 5.960377358490565e-05,
"loss": 0.7727,
"step": 10500
},
{
"epoch": 84.13,
"learning_rate": 5.677358490566037e-05,
"loss": 0.7555,
"step": 10600
},
{
"epoch": 84.92,
"learning_rate": 5.394339622641509e-05,
"loss": 0.7549,
"step": 10700
},
{
"epoch": 85.71,
"learning_rate": 5.111320754716981e-05,
"loss": 0.7517,
"step": 10800
},
{
"epoch": 86.51,
"learning_rate": 4.828301886792453e-05,
"loss": 0.7329,
"step": 10900
},
{
"epoch": 87.3,
"learning_rate": 4.545283018867924e-05,
"loss": 0.7509,
"step": 11000
},
{
"epoch": 88.1,
"learning_rate": 4.262264150943396e-05,
"loss": 0.7255,
"step": 11100
},
{
"epoch": 88.89,
"learning_rate": 3.9820754716981125e-05,
"loss": 0.7431,
"step": 11200
},
{
"epoch": 89.68,
"learning_rate": 3.6990566037735845e-05,
"loss": 0.7341,
"step": 11300
},
{
"epoch": 90.48,
"learning_rate": 3.4160377358490564e-05,
"loss": 0.7026,
"step": 11400
},
{
"epoch": 91.27,
"learning_rate": 3.133018867924528e-05,
"loss": 0.7257,
"step": 11500
},
{
"epoch": 92.06,
"learning_rate": 2.8499999999999998e-05,
"loss": 0.7195,
"step": 11600
},
{
"epoch": 92.86,
"learning_rate": 2.5669811320754717e-05,
"loss": 0.7091,
"step": 11700
},
{
"epoch": 93.65,
"learning_rate": 2.2839622641509433e-05,
"loss": 0.7017,
"step": 11800
},
{
"epoch": 94.44,
"learning_rate": 2.000943396226415e-05,
"loss": 0.7071,
"step": 11900
},
{
"epoch": 95.24,
"learning_rate": 1.7179245283018867e-05,
"loss": 0.697,
"step": 12000
},
{
"epoch": 95.24,
"eval_loss": 0.6013949513435364,
"eval_runtime": 112.0467,
"eval_samples_per_second": 16.779,
"eval_steps_per_second": 16.779,
"eval_wer": 0.44709038156238023,
"step": 12000
},
{
"epoch": 96.03,
"learning_rate": 1.4349056603773583e-05,
"loss": 0.7027,
"step": 12100
},
{
"epoch": 96.83,
"learning_rate": 1.15188679245283e-05,
"loss": 0.7046,
"step": 12200
},
{
"epoch": 97.62,
"learning_rate": 8.688679245283018e-06,
"loss": 0.6978,
"step": 12300
},
{
"epoch": 98.41,
"learning_rate": 5.858490566037735e-06,
"loss": 0.7058,
"step": 12400
},
{
"epoch": 99.21,
"learning_rate": 3.0283018867924524e-06,
"loss": 0.6868,
"step": 12500
},
{
"epoch": 100.0,
"learning_rate": 1.9811320754716982e-07,
"loss": 0.6835,
"step": 12600
},
{
"epoch": 100.0,
"step": 12600,
"total_flos": 6.584079374045402e+19,
"train_loss": 1.186835220579117,
"train_runtime": 23714.6286,
"train_samples_per_second": 16.88,
"train_steps_per_second": 0.531
}
],
"max_steps": 12600,
"num_train_epochs": 100,
"total_flos": 6.584079374045402e+19,
"trial_name": null,
"trial_params": null
}