deberta-v3-xsmall-quality / trainer_state.json
agentlans's picture
Upload 12 files
08d031a verified
raw
history blame
12.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 33750,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.044444444444444446,
"grad_norm": 3.210815906524658,
"learning_rate": 4.925925925925926e-05,
"loss": 0.4315,
"step": 500
},
{
"epoch": 0.08888888888888889,
"grad_norm": 8.126620292663574,
"learning_rate": 4.851851851851852e-05,
"loss": 0.2862,
"step": 1000
},
{
"epoch": 0.13333333333333333,
"grad_norm": 4.665143013000488,
"learning_rate": 4.7777777777777784e-05,
"loss": 0.237,
"step": 1500
},
{
"epoch": 0.17777777777777778,
"grad_norm": 2.0566887855529785,
"learning_rate": 4.703703703703704e-05,
"loss": 0.2251,
"step": 2000
},
{
"epoch": 0.2222222222222222,
"grad_norm": 7.968118667602539,
"learning_rate": 4.62962962962963e-05,
"loss": 0.2239,
"step": 2500
},
{
"epoch": 0.26666666666666666,
"grad_norm": 1.2121827602386475,
"learning_rate": 4.555555555555556e-05,
"loss": 0.2203,
"step": 3000
},
{
"epoch": 0.3111111111111111,
"grad_norm": 3.395099401473999,
"learning_rate": 4.481481481481482e-05,
"loss": 0.2111,
"step": 3500
},
{
"epoch": 0.35555555555555557,
"grad_norm": 1.9526039361953735,
"learning_rate": 4.4074074074074076e-05,
"loss": 0.193,
"step": 4000
},
{
"epoch": 0.4,
"grad_norm": 3.2926955223083496,
"learning_rate": 4.3333333333333334e-05,
"loss": 0.1853,
"step": 4500
},
{
"epoch": 0.4444444444444444,
"grad_norm": 2.8749849796295166,
"learning_rate": 4.259259259259259e-05,
"loss": 0.1675,
"step": 5000
},
{
"epoch": 0.4888888888888889,
"grad_norm": 6.5106706619262695,
"learning_rate": 4.185185185185185e-05,
"loss": 0.1889,
"step": 5500
},
{
"epoch": 0.5333333333333333,
"grad_norm": 1.893872618675232,
"learning_rate": 4.111111111111111e-05,
"loss": 0.2002,
"step": 6000
},
{
"epoch": 0.5777777777777777,
"grad_norm": 4.822785377502441,
"learning_rate": 4.0370370370370374e-05,
"loss": 0.1802,
"step": 6500
},
{
"epoch": 0.6222222222222222,
"grad_norm": 2.410318374633789,
"learning_rate": 3.962962962962963e-05,
"loss": 0.1578,
"step": 7000
},
{
"epoch": 0.6666666666666666,
"grad_norm": 7.345259189605713,
"learning_rate": 3.888888888888889e-05,
"loss": 0.1668,
"step": 7500
},
{
"epoch": 0.7111111111111111,
"grad_norm": 2.6905734539031982,
"learning_rate": 3.814814814814815e-05,
"loss": 0.1726,
"step": 8000
},
{
"epoch": 0.7555555555555555,
"grad_norm": 7.399478912353516,
"learning_rate": 3.740740740740741e-05,
"loss": 0.1586,
"step": 8500
},
{
"epoch": 0.8,
"grad_norm": 1.8845775127410889,
"learning_rate": 3.6666666666666666e-05,
"loss": 0.1629,
"step": 9000
},
{
"epoch": 0.8444444444444444,
"grad_norm": 3.4357192516326904,
"learning_rate": 3.592592592592593e-05,
"loss": 0.1674,
"step": 9500
},
{
"epoch": 0.8888888888888888,
"grad_norm": 1.8075140714645386,
"learning_rate": 3.518518518518519e-05,
"loss": 0.1671,
"step": 10000
},
{
"epoch": 0.9333333333333333,
"grad_norm": 2.545607089996338,
"learning_rate": 3.444444444444445e-05,
"loss": 0.1595,
"step": 10500
},
{
"epoch": 0.9777777777777777,
"grad_norm": 2.33050799369812,
"learning_rate": 3.3703703703703706e-05,
"loss": 0.1655,
"step": 11000
},
{
"epoch": 1.0222222222222221,
"grad_norm": 1.9340065717697144,
"learning_rate": 3.2962962962962964e-05,
"loss": 0.1349,
"step": 11500
},
{
"epoch": 1.0666666666666667,
"grad_norm": 1.3230476379394531,
"learning_rate": 3.222222222222223e-05,
"loss": 0.1069,
"step": 12000
},
{
"epoch": 1.1111111111111112,
"grad_norm": 4.054622650146484,
"learning_rate": 3.148148148148148e-05,
"loss": 0.1098,
"step": 12500
},
{
"epoch": 1.1555555555555554,
"grad_norm": 4.885370254516602,
"learning_rate": 3.074074074074074e-05,
"loss": 0.1162,
"step": 13000
},
{
"epoch": 1.2,
"grad_norm": 3.5849854946136475,
"learning_rate": 3e-05,
"loss": 0.1077,
"step": 13500
},
{
"epoch": 1.2444444444444445,
"grad_norm": 9.765403747558594,
"learning_rate": 2.925925925925926e-05,
"loss": 0.1181,
"step": 14000
},
{
"epoch": 1.2888888888888888,
"grad_norm": 3.879992961883545,
"learning_rate": 2.851851851851852e-05,
"loss": 0.1011,
"step": 14500
},
{
"epoch": 1.3333333333333333,
"grad_norm": 2.2347288131713867,
"learning_rate": 2.777777777777778e-05,
"loss": 0.1175,
"step": 15000
},
{
"epoch": 1.3777777777777778,
"grad_norm": 7.474192142486572,
"learning_rate": 2.7037037037037037e-05,
"loss": 0.1073,
"step": 15500
},
{
"epoch": 1.4222222222222223,
"grad_norm": 4.1693291664123535,
"learning_rate": 2.6296296296296296e-05,
"loss": 0.0995,
"step": 16000
},
{
"epoch": 1.4666666666666668,
"grad_norm": 1.8383170366287231,
"learning_rate": 2.5555555555555554e-05,
"loss": 0.1145,
"step": 16500
},
{
"epoch": 1.511111111111111,
"grad_norm": 0.694262683391571,
"learning_rate": 2.4814814814814816e-05,
"loss": 0.115,
"step": 17000
},
{
"epoch": 1.5555555555555556,
"grad_norm": 0.9633584022521973,
"learning_rate": 2.4074074074074074e-05,
"loss": 0.1024,
"step": 17500
},
{
"epoch": 1.6,
"grad_norm": 2.0199620723724365,
"learning_rate": 2.3333333333333336e-05,
"loss": 0.1011,
"step": 18000
},
{
"epoch": 1.6444444444444444,
"grad_norm": 1.8132920265197754,
"learning_rate": 2.2592592592592594e-05,
"loss": 0.1041,
"step": 18500
},
{
"epoch": 1.6888888888888889,
"grad_norm": 8.3416748046875,
"learning_rate": 2.1851851851851852e-05,
"loss": 0.1144,
"step": 19000
},
{
"epoch": 1.7333333333333334,
"grad_norm": 1.4581434726715088,
"learning_rate": 2.111111111111111e-05,
"loss": 0.0955,
"step": 19500
},
{
"epoch": 1.7777777777777777,
"grad_norm": 3.9014599323272705,
"learning_rate": 2.037037037037037e-05,
"loss": 0.104,
"step": 20000
},
{
"epoch": 1.8222222222222222,
"grad_norm": 2.070230007171631,
"learning_rate": 1.962962962962963e-05,
"loss": 0.0995,
"step": 20500
},
{
"epoch": 1.8666666666666667,
"grad_norm": 0.8812291026115417,
"learning_rate": 1.888888888888889e-05,
"loss": 0.0982,
"step": 21000
},
{
"epoch": 1.911111111111111,
"grad_norm": 1.1036711931228638,
"learning_rate": 1.814814814814815e-05,
"loss": 0.0987,
"step": 21500
},
{
"epoch": 1.9555555555555557,
"grad_norm": 0.6822977066040039,
"learning_rate": 1.740740740740741e-05,
"loss": 0.101,
"step": 22000
},
{
"epoch": 2.0,
"grad_norm": 4.314443111419678,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0979,
"step": 22500
},
{
"epoch": 2.0444444444444443,
"grad_norm": 3.5247573852539062,
"learning_rate": 1.5925925925925926e-05,
"loss": 0.0733,
"step": 23000
},
{
"epoch": 2.088888888888889,
"grad_norm": 0.6801110506057739,
"learning_rate": 1.5185185185185186e-05,
"loss": 0.0651,
"step": 23500
},
{
"epoch": 2.1333333333333333,
"grad_norm": 4.516416072845459,
"learning_rate": 1.4444444444444444e-05,
"loss": 0.0765,
"step": 24000
},
{
"epoch": 2.1777777777777776,
"grad_norm": 1.9308606386184692,
"learning_rate": 1.3703703703703704e-05,
"loss": 0.0707,
"step": 24500
},
{
"epoch": 2.2222222222222223,
"grad_norm": 1.1442885398864746,
"learning_rate": 1.2962962962962962e-05,
"loss": 0.0617,
"step": 25000
},
{
"epoch": 2.2666666666666666,
"grad_norm": 5.30832576751709,
"learning_rate": 1.2222222222222222e-05,
"loss": 0.0693,
"step": 25500
},
{
"epoch": 2.311111111111111,
"grad_norm": 0.5708422660827637,
"learning_rate": 1.1481481481481482e-05,
"loss": 0.0697,
"step": 26000
},
{
"epoch": 2.3555555555555556,
"grad_norm": 0.680268406867981,
"learning_rate": 1.074074074074074e-05,
"loss": 0.066,
"step": 26500
},
{
"epoch": 2.4,
"grad_norm": 0.72934889793396,
"learning_rate": 1e-05,
"loss": 0.0644,
"step": 27000
},
{
"epoch": 2.4444444444444446,
"grad_norm": 4.663362503051758,
"learning_rate": 9.259259259259259e-06,
"loss": 0.0667,
"step": 27500
},
{
"epoch": 2.488888888888889,
"grad_norm": 1.3758256435394287,
"learning_rate": 8.518518518518519e-06,
"loss": 0.0575,
"step": 28000
},
{
"epoch": 2.533333333333333,
"grad_norm": 0.8022651672363281,
"learning_rate": 7.777777777777777e-06,
"loss": 0.0621,
"step": 28500
},
{
"epoch": 2.5777777777777775,
"grad_norm": 1.576392412185669,
"learning_rate": 7.0370370370370375e-06,
"loss": 0.0586,
"step": 29000
},
{
"epoch": 2.6222222222222222,
"grad_norm": 1.166567087173462,
"learning_rate": 6.296296296296296e-06,
"loss": 0.0662,
"step": 29500
},
{
"epoch": 2.6666666666666665,
"grad_norm": 2.4423012733459473,
"learning_rate": 5.555555555555556e-06,
"loss": 0.0645,
"step": 30000
},
{
"epoch": 2.7111111111111112,
"grad_norm": 3.1122899055480957,
"learning_rate": 4.814814814814815e-06,
"loss": 0.0642,
"step": 30500
},
{
"epoch": 2.7555555555555555,
"grad_norm": 3.2840280532836914,
"learning_rate": 4.074074074074075e-06,
"loss": 0.0617,
"step": 31000
},
{
"epoch": 2.8,
"grad_norm": 0.6285051703453064,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.0566,
"step": 31500
},
{
"epoch": 2.8444444444444446,
"grad_norm": 3.462080240249634,
"learning_rate": 2.5925925925925925e-06,
"loss": 0.0614,
"step": 32000
},
{
"epoch": 2.888888888888889,
"grad_norm": 2.2216219902038574,
"learning_rate": 1.8518518518518519e-06,
"loss": 0.0647,
"step": 32500
},
{
"epoch": 2.9333333333333336,
"grad_norm": 1.6926839351654053,
"learning_rate": 1.1111111111111112e-06,
"loss": 0.0611,
"step": 33000
},
{
"epoch": 2.977777777777778,
"grad_norm": 4.723779678344727,
"learning_rate": 3.703703703703704e-07,
"loss": 0.0597,
"step": 33500
},
{
"epoch": 3.0,
"step": 33750,
"total_flos": 4446488701440000.0,
"train_loss": 0.12363309427897136,
"train_runtime": 1375.441,
"train_samples_per_second": 196.301,
"train_steps_per_second": 24.538
}
],
"logging_steps": 500,
"max_steps": 33750,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4446488701440000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}