Hinglish-finetuned / trainer_state.json
VasRosa's picture
Upload folder using huggingface_hub
cb1a8cd
raw
history blame contribute delete
No virus
17.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.983079526226735,
"eval_steps": 500,
"global_step": 59000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"learning_rate": 1.9830795262267346e-05,
"loss": 2.6117,
"step": 500
},
{
"epoch": 0.17,
"learning_rate": 1.9661590524534688e-05,
"loss": 1.7548,
"step": 1000
},
{
"epoch": 0.25,
"learning_rate": 1.9492385786802032e-05,
"loss": 1.4794,
"step": 1500
},
{
"epoch": 0.34,
"learning_rate": 1.9323181049069374e-05,
"loss": 1.3088,
"step": 2000
},
{
"epoch": 0.42,
"learning_rate": 1.915397631133672e-05,
"loss": 1.1959,
"step": 2500
},
{
"epoch": 0.51,
"learning_rate": 1.8984771573604063e-05,
"loss": 1.1127,
"step": 3000
},
{
"epoch": 0.59,
"learning_rate": 1.8815566835871405e-05,
"loss": 1.0536,
"step": 3500
},
{
"epoch": 0.68,
"learning_rate": 1.864636209813875e-05,
"loss": 0.9879,
"step": 4000
},
{
"epoch": 0.76,
"learning_rate": 1.8477157360406094e-05,
"loss": 0.9587,
"step": 4500
},
{
"epoch": 0.85,
"learning_rate": 1.830795262267344e-05,
"loss": 0.9123,
"step": 5000
},
{
"epoch": 0.93,
"learning_rate": 1.813874788494078e-05,
"loss": 0.8893,
"step": 5500
},
{
"epoch": 1.0,
"eval_bleu": 4.0777,
"eval_gen_len": 18.1211,
"eval_loss": 2.386338472366333,
"eval_runtime": 52.6636,
"eval_samples_per_second": 39.211,
"eval_steps_per_second": 1.234,
"step": 5910
},
{
"epoch": 1.02,
"learning_rate": 1.7969543147208125e-05,
"loss": 0.8853,
"step": 6000
},
{
"epoch": 1.1,
"learning_rate": 1.7800338409475466e-05,
"loss": 0.8313,
"step": 6500
},
{
"epoch": 1.18,
"learning_rate": 1.763113367174281e-05,
"loss": 0.8256,
"step": 7000
},
{
"epoch": 1.27,
"learning_rate": 1.7461928934010152e-05,
"loss": 0.8104,
"step": 7500
},
{
"epoch": 1.35,
"learning_rate": 1.7292724196277497e-05,
"loss": 0.8096,
"step": 8000
},
{
"epoch": 1.44,
"learning_rate": 1.7123519458544842e-05,
"loss": 0.7792,
"step": 8500
},
{
"epoch": 1.52,
"learning_rate": 1.6954314720812183e-05,
"loss": 0.7643,
"step": 9000
},
{
"epoch": 1.61,
"learning_rate": 1.6785109983079528e-05,
"loss": 0.7603,
"step": 9500
},
{
"epoch": 1.69,
"learning_rate": 1.661590524534687e-05,
"loss": 0.7385,
"step": 10000
},
{
"epoch": 1.78,
"learning_rate": 1.6446700507614214e-05,
"loss": 0.7424,
"step": 10500
},
{
"epoch": 1.86,
"learning_rate": 1.627749576988156e-05,
"loss": 0.7244,
"step": 11000
},
{
"epoch": 1.95,
"learning_rate": 1.61082910321489e-05,
"loss": 0.7182,
"step": 11500
},
{
"epoch": 2.0,
"eval_bleu": 4.0885,
"eval_gen_len": 18.231,
"eval_loss": 2.2319083213806152,
"eval_runtime": 49.7931,
"eval_samples_per_second": 41.472,
"eval_steps_per_second": 1.305,
"step": 11820
},
{
"epoch": 2.03,
"learning_rate": 1.5939086294416245e-05,
"loss": 0.7002,
"step": 12000
},
{
"epoch": 2.12,
"learning_rate": 1.576988155668359e-05,
"loss": 0.6985,
"step": 12500
},
{
"epoch": 2.2,
"learning_rate": 1.5600676818950934e-05,
"loss": 0.6908,
"step": 13000
},
{
"epoch": 2.28,
"learning_rate": 1.5431472081218276e-05,
"loss": 0.6684,
"step": 13500
},
{
"epoch": 2.37,
"learning_rate": 1.526226734348562e-05,
"loss": 0.6749,
"step": 14000
},
{
"epoch": 2.45,
"learning_rate": 1.5093062605752962e-05,
"loss": 0.6763,
"step": 14500
},
{
"epoch": 2.54,
"learning_rate": 1.4923857868020306e-05,
"loss": 0.6526,
"step": 15000
},
{
"epoch": 2.62,
"learning_rate": 1.4754653130287648e-05,
"loss": 0.6536,
"step": 15500
},
{
"epoch": 2.71,
"learning_rate": 1.4585448392554992e-05,
"loss": 0.6345,
"step": 16000
},
{
"epoch": 2.79,
"learning_rate": 1.4416243654822337e-05,
"loss": 0.6522,
"step": 16500
},
{
"epoch": 2.88,
"learning_rate": 1.424703891708968e-05,
"loss": 0.6379,
"step": 17000
},
{
"epoch": 2.96,
"learning_rate": 1.4077834179357023e-05,
"loss": 0.6277,
"step": 17500
},
{
"epoch": 3.0,
"eval_bleu": 4.2683,
"eval_gen_len": 18.2155,
"eval_loss": 2.1325912475585938,
"eval_runtime": 49.4798,
"eval_samples_per_second": 41.734,
"eval_steps_per_second": 1.314,
"step": 17730
},
{
"epoch": 3.05,
"learning_rate": 1.3908629441624366e-05,
"loss": 0.6309,
"step": 18000
},
{
"epoch": 3.13,
"learning_rate": 1.3739424703891711e-05,
"loss": 0.6427,
"step": 18500
},
{
"epoch": 3.21,
"learning_rate": 1.3570219966159052e-05,
"loss": 0.614,
"step": 19000
},
{
"epoch": 3.3,
"learning_rate": 1.3401015228426397e-05,
"loss": 0.6126,
"step": 19500
},
{
"epoch": 3.38,
"learning_rate": 1.323181049069374e-05,
"loss": 0.6118,
"step": 20000
},
{
"epoch": 3.47,
"learning_rate": 1.3062605752961083e-05,
"loss": 0.5909,
"step": 20500
},
{
"epoch": 3.55,
"learning_rate": 1.2893401015228428e-05,
"loss": 0.5972,
"step": 21000
},
{
"epoch": 3.64,
"learning_rate": 1.2724196277495771e-05,
"loss": 0.6019,
"step": 21500
},
{
"epoch": 3.72,
"learning_rate": 1.2554991539763116e-05,
"loss": 0.5879,
"step": 22000
},
{
"epoch": 3.81,
"learning_rate": 1.2385786802030457e-05,
"loss": 0.5931,
"step": 22500
},
{
"epoch": 3.89,
"learning_rate": 1.2216582064297802e-05,
"loss": 0.5979,
"step": 23000
},
{
"epoch": 3.98,
"learning_rate": 1.2047377326565143e-05,
"loss": 0.5981,
"step": 23500
},
{
"epoch": 4.0,
"eval_bleu": 4.3741,
"eval_gen_len": 18.3075,
"eval_loss": 2.0639102458953857,
"eval_runtime": 49.7175,
"eval_samples_per_second": 41.535,
"eval_steps_per_second": 1.307,
"step": 23640
},
{
"epoch": 4.06,
"learning_rate": 1.1878172588832488e-05,
"loss": 0.5758,
"step": 24000
},
{
"epoch": 4.15,
"learning_rate": 1.1708967851099833e-05,
"loss": 0.5724,
"step": 24500
},
{
"epoch": 4.23,
"learning_rate": 1.1539763113367176e-05,
"loss": 0.5708,
"step": 25000
},
{
"epoch": 4.31,
"learning_rate": 1.1370558375634519e-05,
"loss": 0.5764,
"step": 25500
},
{
"epoch": 4.4,
"learning_rate": 1.1201353637901862e-05,
"loss": 0.5743,
"step": 26000
},
{
"epoch": 4.48,
"learning_rate": 1.1032148900169206e-05,
"loss": 0.5715,
"step": 26500
},
{
"epoch": 4.57,
"learning_rate": 1.0862944162436548e-05,
"loss": 0.5688,
"step": 27000
},
{
"epoch": 4.65,
"learning_rate": 1.0693739424703892e-05,
"loss": 0.5776,
"step": 27500
},
{
"epoch": 4.74,
"learning_rate": 1.0524534686971236e-05,
"loss": 0.5712,
"step": 28000
},
{
"epoch": 4.82,
"learning_rate": 1.035532994923858e-05,
"loss": 0.5544,
"step": 28500
},
{
"epoch": 4.91,
"learning_rate": 1.0186125211505923e-05,
"loss": 0.5657,
"step": 29000
},
{
"epoch": 4.99,
"learning_rate": 1.0016920473773266e-05,
"loss": 0.5585,
"step": 29500
},
{
"epoch": 5.0,
"eval_bleu": 4.5943,
"eval_gen_len": 18.309,
"eval_loss": 2.012233018875122,
"eval_runtime": 49.6368,
"eval_samples_per_second": 41.602,
"eval_steps_per_second": 1.31,
"step": 29550
},
{
"epoch": 5.08,
"learning_rate": 9.84771573604061e-06,
"loss": 0.5618,
"step": 30000
},
{
"epoch": 5.16,
"learning_rate": 9.678510998307954e-06,
"loss": 0.5427,
"step": 30500
},
{
"epoch": 5.25,
"learning_rate": 9.509306260575297e-06,
"loss": 0.5469,
"step": 31000
},
{
"epoch": 5.33,
"learning_rate": 9.34010152284264e-06,
"loss": 0.5387,
"step": 31500
},
{
"epoch": 5.41,
"learning_rate": 9.170896785109983e-06,
"loss": 0.5454,
"step": 32000
},
{
"epoch": 5.5,
"learning_rate": 9.001692047377328e-06,
"loss": 0.5474,
"step": 32500
},
{
"epoch": 5.58,
"learning_rate": 8.832487309644671e-06,
"loss": 0.5566,
"step": 33000
},
{
"epoch": 5.67,
"learning_rate": 8.663282571912014e-06,
"loss": 0.5339,
"step": 33500
},
{
"epoch": 5.75,
"learning_rate": 8.494077834179357e-06,
"loss": 0.5437,
"step": 34000
},
{
"epoch": 5.84,
"learning_rate": 8.324873096446702e-06,
"loss": 0.5408,
"step": 34500
},
{
"epoch": 5.92,
"learning_rate": 8.155668358714045e-06,
"loss": 0.537,
"step": 35000
},
{
"epoch": 6.0,
"eval_bleu": 4.6946,
"eval_gen_len": 18.3172,
"eval_loss": 1.9772592782974243,
"eval_runtime": 49.5084,
"eval_samples_per_second": 41.71,
"eval_steps_per_second": 1.313,
"step": 35460
},
{
"epoch": 6.01,
"learning_rate": 7.986463620981388e-06,
"loss": 0.5411,
"step": 35500
},
{
"epoch": 6.09,
"learning_rate": 7.817258883248731e-06,
"loss": 0.5292,
"step": 36000
},
{
"epoch": 6.18,
"learning_rate": 7.648054145516076e-06,
"loss": 0.5436,
"step": 36500
},
{
"epoch": 6.26,
"learning_rate": 7.478849407783419e-06,
"loss": 0.5317,
"step": 37000
},
{
"epoch": 6.35,
"learning_rate": 7.309644670050762e-06,
"loss": 0.516,
"step": 37500
},
{
"epoch": 6.43,
"learning_rate": 7.140439932318105e-06,
"loss": 0.522,
"step": 38000
},
{
"epoch": 6.51,
"learning_rate": 6.9712351945854495e-06,
"loss": 0.528,
"step": 38500
},
{
"epoch": 6.6,
"learning_rate": 6.8020304568527926e-06,
"loss": 0.53,
"step": 39000
},
{
"epoch": 6.68,
"learning_rate": 6.6328257191201364e-06,
"loss": 0.5282,
"step": 39500
},
{
"epoch": 6.77,
"learning_rate": 6.4636209813874795e-06,
"loss": 0.5386,
"step": 40000
},
{
"epoch": 6.85,
"learning_rate": 6.2944162436548225e-06,
"loss": 0.5243,
"step": 40500
},
{
"epoch": 6.94,
"learning_rate": 6.125211505922166e-06,
"loss": 0.513,
"step": 41000
},
{
"epoch": 7.0,
"eval_bleu": 4.9177,
"eval_gen_len": 18.2939,
"eval_loss": 1.94707190990448,
"eval_runtime": 49.7911,
"eval_samples_per_second": 41.473,
"eval_steps_per_second": 1.305,
"step": 41370
},
{
"epoch": 7.02,
"learning_rate": 5.9560067681895094e-06,
"loss": 0.5096,
"step": 41500
},
{
"epoch": 7.11,
"learning_rate": 5.7868020304568525e-06,
"loss": 0.5175,
"step": 42000
},
{
"epoch": 7.19,
"learning_rate": 5.617597292724196e-06,
"loss": 0.5205,
"step": 42500
},
{
"epoch": 7.28,
"learning_rate": 5.44839255499154e-06,
"loss": 0.5079,
"step": 43000
},
{
"epoch": 7.36,
"learning_rate": 5.279187817258884e-06,
"loss": 0.5067,
"step": 43500
},
{
"epoch": 7.45,
"learning_rate": 5.109983079526227e-06,
"loss": 0.5174,
"step": 44000
},
{
"epoch": 7.53,
"learning_rate": 4.94077834179357e-06,
"loss": 0.5153,
"step": 44500
},
{
"epoch": 7.61,
"learning_rate": 4.771573604060914e-06,
"loss": 0.5227,
"step": 45000
},
{
"epoch": 7.7,
"learning_rate": 4.602368866328257e-06,
"loss": 0.5146,
"step": 45500
},
{
"epoch": 7.78,
"learning_rate": 4.433164128595601e-06,
"loss": 0.5065,
"step": 46000
},
{
"epoch": 7.87,
"learning_rate": 4.263959390862945e-06,
"loss": 0.5143,
"step": 46500
},
{
"epoch": 7.95,
"learning_rate": 4.094754653130288e-06,
"loss": 0.5234,
"step": 47000
},
{
"epoch": 8.0,
"eval_bleu": 5.0134,
"eval_gen_len": 18.2939,
"eval_loss": 1.928272008895874,
"eval_runtime": 49.7105,
"eval_samples_per_second": 41.54,
"eval_steps_per_second": 1.308,
"step": 47280
},
{
"epoch": 8.04,
"learning_rate": 3.925549915397631e-06,
"loss": 0.5043,
"step": 47500
},
{
"epoch": 8.12,
"learning_rate": 3.756345177664975e-06,
"loss": 0.5272,
"step": 48000
},
{
"epoch": 8.21,
"learning_rate": 3.5871404399323183e-06,
"loss": 0.5068,
"step": 48500
},
{
"epoch": 8.29,
"learning_rate": 3.417935702199662e-06,
"loss": 0.5071,
"step": 49000
},
{
"epoch": 8.38,
"learning_rate": 3.2487309644670053e-06,
"loss": 0.5061,
"step": 49500
},
{
"epoch": 8.46,
"learning_rate": 3.079526226734349e-06,
"loss": 0.4997,
"step": 50000
},
{
"epoch": 8.54,
"learning_rate": 2.910321489001692e-06,
"loss": 0.4953,
"step": 50500
},
{
"epoch": 8.63,
"learning_rate": 2.7411167512690357e-06,
"loss": 0.5072,
"step": 51000
},
{
"epoch": 8.71,
"learning_rate": 2.571912013536379e-06,
"loss": 0.512,
"step": 51500
},
{
"epoch": 8.8,
"learning_rate": 2.4027072758037226e-06,
"loss": 0.5043,
"step": 52000
},
{
"epoch": 8.88,
"learning_rate": 2.233502538071066e-06,
"loss": 0.499,
"step": 52500
},
{
"epoch": 8.97,
"learning_rate": 2.0642978003384095e-06,
"loss": 0.5133,
"step": 53000
},
{
"epoch": 9.0,
"eval_bleu": 5.0308,
"eval_gen_len": 18.3027,
"eval_loss": 1.91579270362854,
"eval_runtime": 49.6621,
"eval_samples_per_second": 41.581,
"eval_steps_per_second": 1.309,
"step": 53190
},
{
"epoch": 9.05,
"learning_rate": 1.8950930626057532e-06,
"loss": 0.5146,
"step": 53500
},
{
"epoch": 9.14,
"learning_rate": 1.7258883248730964e-06,
"loss": 0.4968,
"step": 54000
},
{
"epoch": 9.22,
"learning_rate": 1.55668358714044e-06,
"loss": 0.4806,
"step": 54500
},
{
"epoch": 9.31,
"learning_rate": 1.3874788494077834e-06,
"loss": 0.4979,
"step": 55000
},
{
"epoch": 9.39,
"learning_rate": 1.218274111675127e-06,
"loss": 0.4971,
"step": 55500
},
{
"epoch": 9.48,
"learning_rate": 1.0490693739424705e-06,
"loss": 0.5013,
"step": 56000
},
{
"epoch": 9.56,
"learning_rate": 8.79864636209814e-07,
"loss": 0.5119,
"step": 56500
},
{
"epoch": 9.64,
"learning_rate": 7.106598984771574e-07,
"loss": 0.5076,
"step": 57000
},
{
"epoch": 9.73,
"learning_rate": 5.414551607445009e-07,
"loss": 0.508,
"step": 57500
},
{
"epoch": 9.81,
"learning_rate": 3.7225042301184434e-07,
"loss": 0.4978,
"step": 58000
},
{
"epoch": 9.9,
"learning_rate": 2.0304568527918783e-07,
"loss": 0.5046,
"step": 58500
},
{
"epoch": 9.98,
"learning_rate": 3.3840947546531305e-08,
"loss": 0.5074,
"step": 59000
}
],
"logging_steps": 500,
"max_steps": 59100,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 7.765950061873152e+16,
"trial_name": null,
"trial_params": null
}