roberta-base-latin-cased3 / trainer_state.json
pstroe's picture
Upload trainer_state.json
9745688
raw
history blame
21.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"global_step": 77340,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06,
"learning_rate": 9.935350400827516e-05,
"loss": 8.0382,
"step": 500
},
{
"epoch": 0.13,
"learning_rate": 9.87070080165503e-05,
"loss": 7.6429,
"step": 1000
},
{
"epoch": 0.19,
"learning_rate": 9.806051202482544e-05,
"loss": 7.1341,
"step": 1500
},
{
"epoch": 0.26,
"learning_rate": 9.74140160331006e-05,
"loss": 6.9301,
"step": 2000
},
{
"epoch": 0.32,
"learning_rate": 9.676752004137574e-05,
"loss": 6.8441,
"step": 2500
},
{
"epoch": 0.39,
"learning_rate": 9.61210240496509e-05,
"loss": 6.7735,
"step": 3000
},
{
"epoch": 0.45,
"learning_rate": 9.547452805792605e-05,
"loss": 6.3512,
"step": 3500
},
{
"epoch": 0.52,
"learning_rate": 9.482803206620118e-05,
"loss": 5.6406,
"step": 4000
},
{
"epoch": 0.58,
"learning_rate": 9.418153607447634e-05,
"loss": 5.2743,
"step": 4500
},
{
"epoch": 0.65,
"learning_rate": 9.35350400827515e-05,
"loss": 5.0399,
"step": 5000
},
{
"epoch": 0.71,
"learning_rate": 9.288854409102664e-05,
"loss": 4.8407,
"step": 5500
},
{
"epoch": 0.78,
"learning_rate": 9.224204809930179e-05,
"loss": 4.6694,
"step": 6000
},
{
"epoch": 0.84,
"learning_rate": 9.159555210757695e-05,
"loss": 4.5262,
"step": 6500
},
{
"epoch": 0.91,
"learning_rate": 9.094905611585208e-05,
"loss": 4.4021,
"step": 7000
},
{
"epoch": 0.97,
"learning_rate": 9.030256012412723e-05,
"loss": 4.2888,
"step": 7500
},
{
"epoch": 1.0,
"eval_loss": 4.0826334953308105,
"eval_runtime": 475.8214,
"eval_samples_per_second": 693.498,
"eval_steps_per_second": 1.807,
"step": 7734
},
{
"epoch": 1.03,
"learning_rate": 8.965606413240239e-05,
"loss": 4.1903,
"step": 8000
},
{
"epoch": 1.1,
"learning_rate": 8.900956814067753e-05,
"loss": 4.1066,
"step": 8500
},
{
"epoch": 1.16,
"learning_rate": 8.836307214895269e-05,
"loss": 4.0331,
"step": 9000
},
{
"epoch": 1.23,
"learning_rate": 8.771657615722783e-05,
"loss": 3.9644,
"step": 9500
},
{
"epoch": 1.29,
"learning_rate": 8.707008016550297e-05,
"loss": 3.9044,
"step": 10000
},
{
"epoch": 1.36,
"learning_rate": 8.642358417377813e-05,
"loss": 3.851,
"step": 10500
},
{
"epoch": 1.42,
"learning_rate": 8.577708818205328e-05,
"loss": 3.8044,
"step": 11000
},
{
"epoch": 1.49,
"learning_rate": 8.513059219032842e-05,
"loss": 3.761,
"step": 11500
},
{
"epoch": 1.55,
"learning_rate": 8.448409619860357e-05,
"loss": 3.7197,
"step": 12000
},
{
"epoch": 1.62,
"learning_rate": 8.383760020687872e-05,
"loss": 3.6803,
"step": 12500
},
{
"epoch": 1.68,
"learning_rate": 8.319110421515386e-05,
"loss": 3.6381,
"step": 13000
},
{
"epoch": 1.75,
"learning_rate": 8.254460822342902e-05,
"loss": 3.6119,
"step": 13500
},
{
"epoch": 1.81,
"learning_rate": 8.189811223170418e-05,
"loss": 3.577,
"step": 14000
},
{
"epoch": 1.87,
"learning_rate": 8.12516162399793e-05,
"loss": 3.5515,
"step": 14500
},
{
"epoch": 1.94,
"learning_rate": 8.060512024825446e-05,
"loss": 3.5228,
"step": 15000
},
{
"epoch": 2.0,
"eval_loss": 3.361564874649048,
"eval_runtime": 476.9803,
"eval_samples_per_second": 691.813,
"eval_steps_per_second": 1.803,
"step": 15468
},
{
"epoch": 2.0,
"learning_rate": 7.995862425652962e-05,
"loss": 3.4922,
"step": 15500
},
{
"epoch": 2.07,
"learning_rate": 7.931212826480476e-05,
"loss": 3.4583,
"step": 16000
},
{
"epoch": 2.13,
"learning_rate": 7.866563227307991e-05,
"loss": 3.4363,
"step": 16500
},
{
"epoch": 2.2,
"learning_rate": 7.801913628135506e-05,
"loss": 3.4124,
"step": 17000
},
{
"epoch": 2.26,
"learning_rate": 7.73726402896302e-05,
"loss": 3.3911,
"step": 17500
},
{
"epoch": 2.33,
"learning_rate": 7.672614429790535e-05,
"loss": 3.3731,
"step": 18000
},
{
"epoch": 2.39,
"learning_rate": 7.607964830618051e-05,
"loss": 3.3487,
"step": 18500
},
{
"epoch": 2.46,
"learning_rate": 7.543315231445565e-05,
"loss": 3.333,
"step": 19000
},
{
"epoch": 2.52,
"learning_rate": 7.478665632273081e-05,
"loss": 3.3162,
"step": 19500
},
{
"epoch": 2.59,
"learning_rate": 7.414016033100595e-05,
"loss": 3.2947,
"step": 20000
},
{
"epoch": 2.65,
"learning_rate": 7.349366433928109e-05,
"loss": 3.2783,
"step": 20500
},
{
"epoch": 2.72,
"learning_rate": 7.284716834755625e-05,
"loss": 3.2641,
"step": 21000
},
{
"epoch": 2.78,
"learning_rate": 7.22006723558314e-05,
"loss": 3.2446,
"step": 21500
},
{
"epoch": 2.84,
"learning_rate": 7.155417636410655e-05,
"loss": 3.2326,
"step": 22000
},
{
"epoch": 2.91,
"learning_rate": 7.090768037238169e-05,
"loss": 3.2149,
"step": 22500
},
{
"epoch": 2.97,
"learning_rate": 7.026118438065684e-05,
"loss": 3.2034,
"step": 23000
},
{
"epoch": 3.0,
"eval_loss": 3.070152997970581,
"eval_runtime": 476.4651,
"eval_samples_per_second": 692.561,
"eval_steps_per_second": 1.805,
"step": 23202
},
{
"epoch": 3.04,
"learning_rate": 6.961468838893199e-05,
"loss": 3.1826,
"step": 23500
},
{
"epoch": 3.1,
"learning_rate": 6.896819239720714e-05,
"loss": 3.1653,
"step": 24000
},
{
"epoch": 3.17,
"learning_rate": 6.83216964054823e-05,
"loss": 3.1519,
"step": 24500
},
{
"epoch": 3.23,
"learning_rate": 6.767520041375744e-05,
"loss": 3.1387,
"step": 25000
},
{
"epoch": 3.3,
"learning_rate": 6.702870442203258e-05,
"loss": 3.1317,
"step": 25500
},
{
"epoch": 3.36,
"learning_rate": 6.638220843030774e-05,
"loss": 3.1206,
"step": 26000
},
{
"epoch": 3.43,
"learning_rate": 6.573571243858288e-05,
"loss": 3.1074,
"step": 26500
},
{
"epoch": 3.49,
"learning_rate": 6.508921644685804e-05,
"loss": 3.0959,
"step": 27000
},
{
"epoch": 3.56,
"learning_rate": 6.444272045513318e-05,
"loss": 3.0846,
"step": 27500
},
{
"epoch": 3.62,
"learning_rate": 6.379622446340833e-05,
"loss": 3.0691,
"step": 28000
},
{
"epoch": 3.69,
"learning_rate": 6.314972847168348e-05,
"loss": 3.0621,
"step": 28500
},
{
"epoch": 3.75,
"learning_rate": 6.250323247995863e-05,
"loss": 3.0496,
"step": 29000
},
{
"epoch": 3.81,
"learning_rate": 6.185673648823377e-05,
"loss": 3.0406,
"step": 29500
},
{
"epoch": 3.88,
"learning_rate": 6.121024049650892e-05,
"loss": 3.0332,
"step": 30000
},
{
"epoch": 3.94,
"learning_rate": 6.056374450478407e-05,
"loss": 3.0235,
"step": 30500
},
{
"epoch": 4.0,
"eval_loss": 2.890948534011841,
"eval_runtime": 475.9375,
"eval_samples_per_second": 693.328,
"eval_steps_per_second": 1.807,
"step": 30936
},
{
"epoch": 4.01,
"learning_rate": 5.991724851305922e-05,
"loss": 3.0144,
"step": 31000
},
{
"epoch": 4.07,
"learning_rate": 5.927075252133437e-05,
"loss": 2.9923,
"step": 31500
},
{
"epoch": 4.14,
"learning_rate": 5.8624256529609526e-05,
"loss": 2.9902,
"step": 32000
},
{
"epoch": 4.2,
"learning_rate": 5.7977760537884675e-05,
"loss": 2.9746,
"step": 32500
},
{
"epoch": 4.27,
"learning_rate": 5.733126454615981e-05,
"loss": 2.9708,
"step": 33000
},
{
"epoch": 4.33,
"learning_rate": 5.6684768554434966e-05,
"loss": 2.9586,
"step": 33500
},
{
"epoch": 4.4,
"learning_rate": 5.6038272562710115e-05,
"loss": 2.9556,
"step": 34000
},
{
"epoch": 4.46,
"learning_rate": 5.5391776570985264e-05,
"loss": 2.9471,
"step": 34500
},
{
"epoch": 4.53,
"learning_rate": 5.474528057926042e-05,
"loss": 2.941,
"step": 35000
},
{
"epoch": 4.59,
"learning_rate": 5.4098784587535555e-05,
"loss": 2.9353,
"step": 35500
},
{
"epoch": 4.65,
"learning_rate": 5.3452288595810704e-05,
"loss": 2.9263,
"step": 36000
},
{
"epoch": 4.72,
"learning_rate": 5.280579260408586e-05,
"loss": 2.9193,
"step": 36500
},
{
"epoch": 4.78,
"learning_rate": 5.215929661236101e-05,
"loss": 2.9101,
"step": 37000
},
{
"epoch": 4.85,
"learning_rate": 5.151280062063616e-05,
"loss": 2.9071,
"step": 37500
},
{
"epoch": 4.91,
"learning_rate": 5.08663046289113e-05,
"loss": 2.9019,
"step": 38000
},
{
"epoch": 4.98,
"learning_rate": 5.021980863718645e-05,
"loss": 2.89,
"step": 38500
},
{
"epoch": 5.0,
"eval_loss": 2.768686294555664,
"eval_runtime": 485.6271,
"eval_samples_per_second": 679.495,
"eval_steps_per_second": 1.771,
"step": 38670
},
{
"epoch": 5.04,
"learning_rate": 4.95733126454616e-05,
"loss": 2.8791,
"step": 39000
},
{
"epoch": 5.11,
"learning_rate": 4.892681665373675e-05,
"loss": 2.87,
"step": 39500
},
{
"epoch": 5.17,
"learning_rate": 4.8280320662011896e-05,
"loss": 2.8664,
"step": 40000
},
{
"epoch": 5.24,
"learning_rate": 4.7633824670287045e-05,
"loss": 2.859,
"step": 40500
},
{
"epoch": 5.3,
"learning_rate": 4.6987328678562193e-05,
"loss": 2.8562,
"step": 41000
},
{
"epoch": 5.37,
"learning_rate": 4.634083268683734e-05,
"loss": 2.8489,
"step": 41500
},
{
"epoch": 5.43,
"learning_rate": 4.569433669511249e-05,
"loss": 2.846,
"step": 42000
},
{
"epoch": 5.5,
"learning_rate": 4.504784070338764e-05,
"loss": 2.8342,
"step": 42500
},
{
"epoch": 5.56,
"learning_rate": 4.440134471166279e-05,
"loss": 2.8313,
"step": 43000
},
{
"epoch": 5.62,
"learning_rate": 4.375484871993794e-05,
"loss": 2.8217,
"step": 43500
},
{
"epoch": 5.69,
"learning_rate": 4.310835272821309e-05,
"loss": 2.8197,
"step": 44000
},
{
"epoch": 5.75,
"learning_rate": 4.2461856736488236e-05,
"loss": 2.8158,
"step": 44500
},
{
"epoch": 5.82,
"learning_rate": 4.1815360744763385e-05,
"loss": 2.8094,
"step": 45000
},
{
"epoch": 5.88,
"learning_rate": 4.1168864753038534e-05,
"loss": 2.8061,
"step": 45500
},
{
"epoch": 5.95,
"learning_rate": 4.052236876131368e-05,
"loss": 2.8032,
"step": 46000
},
{
"epoch": 6.0,
"eval_loss": 2.681367874145508,
"eval_runtime": 476.3627,
"eval_samples_per_second": 692.71,
"eval_steps_per_second": 1.805,
"step": 46404
},
{
"epoch": 6.01,
"learning_rate": 3.987587276958883e-05,
"loss": 2.794,
"step": 46500
},
{
"epoch": 6.08,
"learning_rate": 3.922937677786398e-05,
"loss": 2.7858,
"step": 47000
},
{
"epoch": 6.14,
"learning_rate": 3.858288078613912e-05,
"loss": 2.7765,
"step": 47500
},
{
"epoch": 6.21,
"learning_rate": 3.793638479441428e-05,
"loss": 2.7752,
"step": 48000
},
{
"epoch": 6.27,
"learning_rate": 3.728988880268943e-05,
"loss": 2.7725,
"step": 48500
},
{
"epoch": 6.34,
"learning_rate": 3.664339281096457e-05,
"loss": 2.7696,
"step": 49000
},
{
"epoch": 6.4,
"learning_rate": 3.5996896819239726e-05,
"loss": 2.7665,
"step": 49500
},
{
"epoch": 6.46,
"learning_rate": 3.5350400827514875e-05,
"loss": 2.7622,
"step": 50000
},
{
"epoch": 6.53,
"learning_rate": 3.470390483579002e-05,
"loss": 2.7566,
"step": 50500
},
{
"epoch": 6.59,
"learning_rate": 3.4057408844065166e-05,
"loss": 2.7543,
"step": 51000
},
{
"epoch": 6.66,
"learning_rate": 3.3410912852340315e-05,
"loss": 2.7451,
"step": 51500
},
{
"epoch": 6.72,
"learning_rate": 3.2764416860615464e-05,
"loss": 2.7392,
"step": 52000
},
{
"epoch": 6.79,
"learning_rate": 3.211792086889061e-05,
"loss": 2.7384,
"step": 52500
},
{
"epoch": 6.85,
"learning_rate": 3.147142487716576e-05,
"loss": 2.7341,
"step": 53000
},
{
"epoch": 6.92,
"learning_rate": 3.082492888544091e-05,
"loss": 2.7317,
"step": 53500
},
{
"epoch": 6.98,
"learning_rate": 3.0178432893716057e-05,
"loss": 2.7308,
"step": 54000
},
{
"epoch": 7.0,
"eval_loss": 2.6155083179473877,
"eval_runtime": 476.4568,
"eval_samples_per_second": 692.573,
"eval_steps_per_second": 1.805,
"step": 54138
},
{
"epoch": 7.05,
"learning_rate": 2.953193690199121e-05,
"loss": 2.7178,
"step": 54500
},
{
"epoch": 7.11,
"learning_rate": 2.8885440910266358e-05,
"loss": 2.712,
"step": 55000
},
{
"epoch": 7.18,
"learning_rate": 2.8238944918541504e-05,
"loss": 2.7152,
"step": 55500
},
{
"epoch": 7.24,
"learning_rate": 2.7592448926816656e-05,
"loss": 2.7076,
"step": 56000
},
{
"epoch": 7.31,
"learning_rate": 2.6945952935091805e-05,
"loss": 2.7048,
"step": 56500
},
{
"epoch": 7.37,
"learning_rate": 2.629945694336695e-05,
"loss": 2.7038,
"step": 57000
},
{
"epoch": 7.43,
"learning_rate": 2.5652960951642103e-05,
"loss": 2.6975,
"step": 57500
},
{
"epoch": 7.5,
"learning_rate": 2.500646495991725e-05,
"loss": 2.696,
"step": 58000
},
{
"epoch": 7.56,
"learning_rate": 2.4359968968192397e-05,
"loss": 2.6941,
"step": 58500
},
{
"epoch": 7.63,
"learning_rate": 2.3713472976467546e-05,
"loss": 2.6884,
"step": 59000
},
{
"epoch": 7.69,
"learning_rate": 2.3066976984742695e-05,
"loss": 2.6844,
"step": 59500
},
{
"epoch": 7.76,
"learning_rate": 2.2420480993017844e-05,
"loss": 2.6791,
"step": 60000
},
{
"epoch": 7.82,
"learning_rate": 2.1773985001292993e-05,
"loss": 2.6759,
"step": 60500
},
{
"epoch": 7.89,
"learning_rate": 2.1127489009568142e-05,
"loss": 2.6779,
"step": 61000
},
{
"epoch": 7.95,
"learning_rate": 2.0480993017843288e-05,
"loss": 2.6749,
"step": 61500
},
{
"epoch": 8.0,
"eval_loss": 2.561079502105713,
"eval_runtime": 476.3937,
"eval_samples_per_second": 692.665,
"eval_steps_per_second": 1.805,
"step": 61872
},
{
"epoch": 8.02,
"learning_rate": 1.983449702611844e-05,
"loss": 2.6705,
"step": 62000
},
{
"epoch": 8.08,
"learning_rate": 1.918800103439359e-05,
"loss": 2.6613,
"step": 62500
},
{
"epoch": 8.15,
"learning_rate": 1.8541505042668735e-05,
"loss": 2.6649,
"step": 63000
},
{
"epoch": 8.21,
"learning_rate": 1.7895009050943884e-05,
"loss": 2.6597,
"step": 63500
},
{
"epoch": 8.28,
"learning_rate": 1.7248513059219036e-05,
"loss": 2.6568,
"step": 64000
},
{
"epoch": 8.34,
"learning_rate": 1.660201706749418e-05,
"loss": 2.6551,
"step": 64500
},
{
"epoch": 8.4,
"learning_rate": 1.595552107576933e-05,
"loss": 2.6518,
"step": 65000
},
{
"epoch": 8.47,
"learning_rate": 1.530902508404448e-05,
"loss": 2.6501,
"step": 65500
},
{
"epoch": 8.53,
"learning_rate": 1.466252909231963e-05,
"loss": 2.6469,
"step": 66000
},
{
"epoch": 8.6,
"learning_rate": 1.4016033100594778e-05,
"loss": 2.6465,
"step": 66500
},
{
"epoch": 8.66,
"learning_rate": 1.3369537108869925e-05,
"loss": 2.6455,
"step": 67000
},
{
"epoch": 8.73,
"learning_rate": 1.2723041117145074e-05,
"loss": 2.6398,
"step": 67500
},
{
"epoch": 8.79,
"learning_rate": 1.2076545125420223e-05,
"loss": 2.6436,
"step": 68000
},
{
"epoch": 8.86,
"learning_rate": 1.1430049133695372e-05,
"loss": 2.6395,
"step": 68500
},
{
"epoch": 8.92,
"learning_rate": 1.078355314197052e-05,
"loss": 2.634,
"step": 69000
},
{
"epoch": 8.99,
"learning_rate": 1.0137057150245668e-05,
"loss": 2.6359,
"step": 69500
},
{
"epoch": 9.0,
"eval_loss": 2.5289456844329834,
"eval_runtime": 476.5662,
"eval_samples_per_second": 692.414,
"eval_steps_per_second": 1.805,
"step": 69606
},
{
"epoch": 9.05,
"learning_rate": 9.490561158520819e-06,
"loss": 2.6261,
"step": 70000
},
{
"epoch": 9.12,
"learning_rate": 8.844065166795966e-06,
"loss": 2.6228,
"step": 70500
},
{
"epoch": 9.18,
"learning_rate": 8.197569175071115e-06,
"loss": 2.6303,
"step": 71000
},
{
"epoch": 9.24,
"learning_rate": 7.551073183346263e-06,
"loss": 2.622,
"step": 71500
},
{
"epoch": 9.31,
"learning_rate": 6.904577191621413e-06,
"loss": 2.6257,
"step": 72000
},
{
"epoch": 9.37,
"learning_rate": 6.258081199896561e-06,
"loss": 2.6244,
"step": 72500
},
{
"epoch": 9.44,
"learning_rate": 5.611585208171709e-06,
"loss": 2.6238,
"step": 73000
},
{
"epoch": 9.5,
"learning_rate": 4.965089216446858e-06,
"loss": 2.6207,
"step": 73500
},
{
"epoch": 9.57,
"learning_rate": 4.318593224722007e-06,
"loss": 2.6168,
"step": 74000
},
{
"epoch": 9.63,
"learning_rate": 3.6720972329971556e-06,
"loss": 2.619,
"step": 74500
},
{
"epoch": 9.7,
"learning_rate": 3.025601241272304e-06,
"loss": 2.6185,
"step": 75000
},
{
"epoch": 9.76,
"learning_rate": 2.379105249547453e-06,
"loss": 2.6173,
"step": 75500
},
{
"epoch": 9.83,
"learning_rate": 1.7326092578226015e-06,
"loss": 2.6114,
"step": 76000
},
{
"epoch": 9.89,
"learning_rate": 1.0861132660977503e-06,
"loss": 2.6091,
"step": 76500
},
{
"epoch": 9.96,
"learning_rate": 4.396172743728989e-07,
"loss": 2.6133,
"step": 77000
},
{
"epoch": 10.0,
"eval_loss": 2.5088417530059814,
"eval_runtime": 484.8466,
"eval_samples_per_second": 680.588,
"eval_steps_per_second": 1.774,
"step": 77340
}
],
"max_steps": 77340,
"num_train_epochs": 10,
"total_flos": 1.9542406291981978e+18,
"trial_name": null,
"trial_params": null
}