albert-punctuation / trainer_state.json
Wikidepia's picture
Update model
3b9b5e8
raw
history blame
12.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"global_step": 48917,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 4.983032483594661e-05,
"loss": 0.3265,
"step": 500
},
{
"epoch": 0.02,
"learning_rate": 4.965996824553155e-05,
"loss": 0.2483,
"step": 1000
},
{
"epoch": 0.03,
"learning_rate": 4.948961165511649e-05,
"loss": 0.2357,
"step": 1500
},
{
"epoch": 0.04,
"learning_rate": 4.9319255064701436e-05,
"loss": 0.2285,
"step": 2000
},
{
"epoch": 0.05,
"learning_rate": 4.7446695422859136e-05,
"loss": 0.2259,
"step": 2500
},
{
"epoch": 0.06,
"learning_rate": 4.693562565161396e-05,
"loss": 0.2204,
"step": 3000
},
{
"epoch": 0.07,
"learning_rate": 4.642455588036879e-05,
"loss": 0.2183,
"step": 3500
},
{
"epoch": 0.08,
"learning_rate": 4.591348610912362e-05,
"loss": 0.2167,
"step": 4000
},
{
"epoch": 0.09,
"learning_rate": 4.540343847742094e-05,
"loss": 0.2131,
"step": 4500
},
{
"epoch": 0.1,
"learning_rate": 4.489236870617577e-05,
"loss": 0.2123,
"step": 5000
},
{
"epoch": 0.11,
"learning_rate": 4.4381298934930596e-05,
"loss": 0.212,
"step": 5500
},
{
"epoch": 0.12,
"learning_rate": 4.387022916368543e-05,
"loss": 0.2075,
"step": 6000
},
{
"epoch": 0.13,
"learning_rate": 4.336018153198275e-05,
"loss": 0.2077,
"step": 6500
},
{
"epoch": 0.14,
"learning_rate": 4.2849111760737576e-05,
"loss": 0.2061,
"step": 7000
},
{
"epoch": 0.15,
"learning_rate": 4.23380419894924e-05,
"loss": 0.207,
"step": 7500
},
{
"epoch": 0.16,
"learning_rate": 4.1826972218247236e-05,
"loss": 0.2039,
"step": 8000
},
{
"epoch": 0.17,
"learning_rate": 4.131590244700207e-05,
"loss": 0.2025,
"step": 8500
},
{
"epoch": 0.18,
"learning_rate": 4.08048326757569e-05,
"loss": 0.2004,
"step": 9000
},
{
"epoch": 0.19,
"learning_rate": 4.029376290451173e-05,
"loss": 0.2036,
"step": 9500
},
{
"epoch": 0.2,
"learning_rate": 3.9782693133266555e-05,
"loss": 0.1998,
"step": 10000
},
{
"epoch": 0.21,
"learning_rate": 3.927162336202139e-05,
"loss": 0.1995,
"step": 10500
},
{
"epoch": 0.22,
"learning_rate": 3.876157573031871e-05,
"loss": 0.199,
"step": 11000
},
{
"epoch": 0.24,
"learning_rate": 3.8250505959073535e-05,
"loss": 0.1969,
"step": 11500
},
{
"epoch": 0.25,
"learning_rate": 3.773943618782836e-05,
"loss": 0.1937,
"step": 12000
},
{
"epoch": 0.26,
"learning_rate": 3.7228366416583195e-05,
"loss": 0.1963,
"step": 12500
},
{
"epoch": 0.27,
"learning_rate": 3.671729664533802e-05,
"loss": 0.1961,
"step": 13000
},
{
"epoch": 0.28,
"learning_rate": 3.6206226874092855e-05,
"loss": 0.1956,
"step": 13500
},
{
"epoch": 0.29,
"learning_rate": 3.569515710284768e-05,
"loss": 0.1953,
"step": 14000
},
{
"epoch": 0.3,
"learning_rate": 3.518715375022998e-05,
"loss": 0.1936,
"step": 14500
},
{
"epoch": 0.31,
"learning_rate": 3.4676083978984816e-05,
"loss": 0.1942,
"step": 15000
},
{
"epoch": 0.32,
"learning_rate": 3.416501420773964e-05,
"loss": 0.192,
"step": 15500
},
{
"epoch": 0.33,
"learning_rate": 3.3653944436494475e-05,
"loss": 0.1925,
"step": 16000
},
{
"epoch": 0.34,
"learning_rate": 3.31428746652493e-05,
"loss": 0.1905,
"step": 16500
},
{
"epoch": 0.35,
"learning_rate": 3.263180489400413e-05,
"loss": 0.1923,
"step": 17000
},
{
"epoch": 0.36,
"learning_rate": 3.212073512275896e-05,
"loss": 0.1897,
"step": 17500
},
{
"epoch": 0.37,
"learning_rate": 3.160966535151379e-05,
"loss": 0.1908,
"step": 18000
},
{
"epoch": 0.38,
"learning_rate": 3.109859558026862e-05,
"loss": 0.1897,
"step": 18500
},
{
"epoch": 0.39,
"learning_rate": 3.058752580902345e-05,
"loss": 0.189,
"step": 19000
},
{
"epoch": 0.4,
"learning_rate": 3.0076456037778278e-05,
"loss": 0.1878,
"step": 19500
},
{
"epoch": 0.41,
"learning_rate": 2.9566408406075595e-05,
"loss": 0.1891,
"step": 20000
},
{
"epoch": 0.42,
"learning_rate": 2.9055338634830425e-05,
"loss": 0.1881,
"step": 20500
},
{
"epoch": 0.43,
"learning_rate": 2.854426886358526e-05,
"loss": 0.1871,
"step": 21000
},
{
"epoch": 0.44,
"learning_rate": 2.803319909234009e-05,
"loss": 0.1871,
"step": 21500
},
{
"epoch": 0.45,
"learning_rate": 2.7522129321094918e-05,
"loss": 0.1863,
"step": 22000
},
{
"epoch": 0.46,
"learning_rate": 2.7011059549849748e-05,
"loss": 0.1853,
"step": 22500
},
{
"epoch": 0.47,
"learning_rate": 2.6499989778604578e-05,
"loss": 0.1867,
"step": 23000
},
{
"epoch": 0.48,
"learning_rate": 2.5988920007359408e-05,
"loss": 0.1847,
"step": 23500
},
{
"epoch": 0.49,
"learning_rate": 2.5478872375656725e-05,
"loss": 0.1839,
"step": 24000
},
{
"epoch": 0.5,
"learning_rate": 2.4967802604411554e-05,
"loss": 0.1825,
"step": 24500
},
{
"epoch": 0.51,
"learning_rate": 2.4456732833166384e-05,
"loss": 0.1849,
"step": 25000
},
{
"epoch": 0.52,
"learning_rate": 2.3945663061921214e-05,
"loss": 0.1848,
"step": 25500
},
{
"epoch": 0.53,
"learning_rate": 2.3435615430218534e-05,
"loss": 0.1829,
"step": 26000
},
{
"epoch": 0.54,
"learning_rate": 2.2924545658973364e-05,
"loss": 0.1817,
"step": 26500
},
{
"epoch": 0.55,
"learning_rate": 2.2413475887728194e-05,
"loss": 0.1829,
"step": 27000
},
{
"epoch": 0.56,
"learning_rate": 2.1902406116483024e-05,
"loss": 0.1826,
"step": 27500
},
{
"epoch": 0.57,
"learning_rate": 2.1391336345237854e-05,
"loss": 0.1808,
"step": 28000
},
{
"epoch": 0.58,
"learning_rate": 2.088026657399268e-05,
"loss": 0.1814,
"step": 28500
},
{
"epoch": 0.59,
"learning_rate": 2.0370218942290004e-05,
"loss": 0.1807,
"step": 29000
},
{
"epoch": 0.6,
"learning_rate": 1.9859149171044834e-05,
"loss": 0.181,
"step": 29500
},
{
"epoch": 0.61,
"learning_rate": 1.934807939979966e-05,
"loss": 0.179,
"step": 30000
},
{
"epoch": 0.62,
"learning_rate": 1.883803176809698e-05,
"loss": 0.1805,
"step": 30500
},
{
"epoch": 0.63,
"learning_rate": 1.832696199685181e-05,
"loss": 0.1813,
"step": 31000
},
{
"epoch": 0.64,
"learning_rate": 1.781589222560664e-05,
"loss": 0.1797,
"step": 31500
},
{
"epoch": 0.65,
"learning_rate": 1.7304822454361467e-05,
"loss": 0.1777,
"step": 32000
},
{
"epoch": 0.66,
"learning_rate": 1.67937526831163e-05,
"loss": 0.1798,
"step": 32500
},
{
"epoch": 0.67,
"learning_rate": 1.628268291187113e-05,
"loss": 0.1795,
"step": 33000
},
{
"epoch": 0.68,
"learning_rate": 1.577161314062596e-05,
"loss": 0.1778,
"step": 33500
},
{
"epoch": 0.7,
"learning_rate": 1.526054336938079e-05,
"loss": 0.1807,
"step": 34000
},
{
"epoch": 0.71,
"learning_rate": 1.4749473598135619e-05,
"loss": 0.1763,
"step": 34500
},
{
"epoch": 0.72,
"learning_rate": 1.4238403826890448e-05,
"loss": 0.1799,
"step": 35000
},
{
"epoch": 0.73,
"learning_rate": 1.3727334055645277e-05,
"loss": 0.1771,
"step": 35500
},
{
"epoch": 0.74,
"learning_rate": 1.3216264284400107e-05,
"loss": 0.1772,
"step": 36000
},
{
"epoch": 0.75,
"learning_rate": 1.2706216652697429e-05,
"loss": 0.1788,
"step": 36500
},
{
"epoch": 0.76,
"learning_rate": 1.2195146881452257e-05,
"loss": 0.177,
"step": 37000
},
{
"epoch": 0.77,
"learning_rate": 1.1684077110207085e-05,
"loss": 0.1769,
"step": 37500
},
{
"epoch": 0.78,
"learning_rate": 1.1173007338961917e-05,
"loss": 0.1753,
"step": 38000
},
{
"epoch": 0.79,
"learning_rate": 1.0661937567716745e-05,
"loss": 0.1749,
"step": 38500
},
{
"epoch": 0.8,
"learning_rate": 1.0152912075556555e-05,
"loss": 0.1752,
"step": 39000
},
{
"epoch": 0.81,
"learning_rate": 9.641842304311385e-06,
"loss": 0.1736,
"step": 39500
},
{
"epoch": 0.82,
"learning_rate": 9.130772533066215e-06,
"loss": 0.1741,
"step": 40000
},
{
"epoch": 0.83,
"learning_rate": 8.619702761821043e-06,
"loss": 0.1741,
"step": 40500
},
{
"epoch": 0.84,
"learning_rate": 8.109655130118364e-06,
"loss": 0.1739,
"step": 41000
},
{
"epoch": 0.85,
"learning_rate": 7.5985853588731936e-06,
"loss": 0.1742,
"step": 41500
},
{
"epoch": 0.86,
"learning_rate": 7.087515587628023e-06,
"loss": 0.1726,
"step": 42000
},
{
"epoch": 0.87,
"learning_rate": 6.576445816382853e-06,
"loss": 0.1733,
"step": 42500
},
{
"epoch": 0.88,
"learning_rate": 6.0653760451376825e-06,
"loss": 0.1729,
"step": 43000
},
{
"epoch": 0.89,
"learning_rate": 5.555328413435002e-06,
"loss": 0.1719,
"step": 43500
},
{
"epoch": 0.9,
"learning_rate": 5.044258642189832e-06,
"loss": 0.1737,
"step": 44000
},
{
"epoch": 0.91,
"learning_rate": 4.533188870944662e-06,
"loss": 0.1731,
"step": 44500
},
{
"epoch": 0.92,
"learning_rate": 4.022119099699491e-06,
"loss": 0.1718,
"step": 45000
},
{
"epoch": 0.93,
"learning_rate": 3.511049328454321e-06,
"loss": 0.1714,
"step": 45500
},
{
"epoch": 0.94,
"learning_rate": 2.99997955720915e-06,
"loss": 0.173,
"step": 46000
},
{
"epoch": 0.95,
"learning_rate": 2.48890978596398e-06,
"loss": 0.1721,
"step": 46500
},
{
"epoch": 0.96,
"learning_rate": 1.9788621542613e-06,
"loss": 0.1713,
"step": 47000
},
{
"epoch": 0.97,
"learning_rate": 1.4677923830161294e-06,
"loss": 0.1738,
"step": 47500
},
{
"epoch": 0.98,
"learning_rate": 9.56722611770959e-07,
"loss": 0.1709,
"step": 48000
},
{
"epoch": 0.99,
"learning_rate": 4.4667498006827893e-07,
"loss": 0.1719,
"step": 48500
},
{
"epoch": 1.0,
"step": 48917,
"total_flos": 1.7063207956905984e+16,
"train_loss": 0.1791531401204783,
"train_runtime": 9599.0055,
"train_samples_per_second": 326.145,
"train_steps_per_second": 5.096
}
],
"max_steps": 48917,
"num_train_epochs": 1,
"total_flos": 1.7063207956905984e+16,
"trial_name": null,
"trial_params": null
}