deberta-v3-xsmall-quality / trainer_state.json
agentlans's picture
Upload 11 files
611c63a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 33750,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.044444444444444446,
"grad_norm": 4.640678405761719,
"learning_rate": 4.925925925925926e-05,
"loss": 0.2464,
"num_input_tokens_seen": 512000,
"step": 500
},
{
"epoch": 0.08888888888888889,
"grad_norm": 2.8545427322387695,
"learning_rate": 4.851851851851852e-05,
"loss": 0.1755,
"num_input_tokens_seen": 1024000,
"step": 1000
},
{
"epoch": 0.13333333333333333,
"grad_norm": 1.1847012042999268,
"learning_rate": 4.7777777777777784e-05,
"loss": 0.1628,
"num_input_tokens_seen": 1536000,
"step": 1500
},
{
"epoch": 0.17777777777777778,
"grad_norm": 3.767167568206787,
"learning_rate": 4.703703703703704e-05,
"loss": 0.1475,
"num_input_tokens_seen": 2048000,
"step": 2000
},
{
"epoch": 0.2222222222222222,
"grad_norm": 3.5827574729919434,
"learning_rate": 4.62962962962963e-05,
"loss": 0.1448,
"num_input_tokens_seen": 2560000,
"step": 2500
},
{
"epoch": 0.26666666666666666,
"grad_norm": 2.042477607727051,
"learning_rate": 4.555555555555556e-05,
"loss": 0.1412,
"num_input_tokens_seen": 3072000,
"step": 3000
},
{
"epoch": 0.3111111111111111,
"grad_norm": 1.130966305732727,
"learning_rate": 4.481481481481482e-05,
"loss": 0.1296,
"num_input_tokens_seen": 3584000,
"step": 3500
},
{
"epoch": 0.35555555555555557,
"grad_norm": 3.1181435585021973,
"learning_rate": 4.4074074074074076e-05,
"loss": 0.131,
"num_input_tokens_seen": 4096000,
"step": 4000
},
{
"epoch": 0.4,
"grad_norm": 2.284423351287842,
"learning_rate": 4.3333333333333334e-05,
"loss": 0.135,
"num_input_tokens_seen": 4608000,
"step": 4500
},
{
"epoch": 0.4444444444444444,
"grad_norm": 2.6171817779541016,
"learning_rate": 4.259259259259259e-05,
"loss": 0.1251,
"num_input_tokens_seen": 5120000,
"step": 5000
},
{
"epoch": 0.4888888888888889,
"grad_norm": 2.712770462036133,
"learning_rate": 4.185185185185185e-05,
"loss": 0.1214,
"num_input_tokens_seen": 5632000,
"step": 5500
},
{
"epoch": 0.5333333333333333,
"grad_norm": 1.8071026802062988,
"learning_rate": 4.111111111111111e-05,
"loss": 0.1178,
"num_input_tokens_seen": 6144000,
"step": 6000
},
{
"epoch": 0.5777777777777777,
"grad_norm": 2.124100685119629,
"learning_rate": 4.0370370370370374e-05,
"loss": 0.1187,
"num_input_tokens_seen": 6656000,
"step": 6500
},
{
"epoch": 0.6222222222222222,
"grad_norm": 2.7214131355285645,
"learning_rate": 3.962962962962963e-05,
"loss": 0.118,
"num_input_tokens_seen": 7168000,
"step": 7000
},
{
"epoch": 0.6666666666666666,
"grad_norm": 2.097531318664551,
"learning_rate": 3.888888888888889e-05,
"loss": 0.1156,
"num_input_tokens_seen": 7680000,
"step": 7500
},
{
"epoch": 0.7111111111111111,
"grad_norm": 2.9476003646850586,
"learning_rate": 3.814814814814815e-05,
"loss": 0.1168,
"num_input_tokens_seen": 8192000,
"step": 8000
},
{
"epoch": 0.7555555555555555,
"grad_norm": 2.068228006362915,
"learning_rate": 3.740740740740741e-05,
"loss": 0.1122,
"num_input_tokens_seen": 8704000,
"step": 8500
},
{
"epoch": 0.8,
"grad_norm": 1.8230115175247192,
"learning_rate": 3.6666666666666666e-05,
"loss": 0.1129,
"num_input_tokens_seen": 9216000,
"step": 9000
},
{
"epoch": 0.8444444444444444,
"grad_norm": 2.0530920028686523,
"learning_rate": 3.592592592592593e-05,
"loss": 0.1074,
"num_input_tokens_seen": 9728000,
"step": 9500
},
{
"epoch": 0.8888888888888888,
"grad_norm": 1.2028056383132935,
"learning_rate": 3.518518518518519e-05,
"loss": 0.1022,
"num_input_tokens_seen": 10240000,
"step": 10000
},
{
"epoch": 0.9333333333333333,
"grad_norm": 1.908441185951233,
"learning_rate": 3.444444444444445e-05,
"loss": 0.102,
"num_input_tokens_seen": 10752000,
"step": 10500
},
{
"epoch": 0.9777777777777777,
"grad_norm": 1.4811742305755615,
"learning_rate": 3.3703703703703706e-05,
"loss": 0.0967,
"num_input_tokens_seen": 11264000,
"step": 11000
},
{
"epoch": 1.0222222222222221,
"grad_norm": 2.421898126602173,
"learning_rate": 3.2962962962962964e-05,
"loss": 0.0908,
"num_input_tokens_seen": 11776000,
"step": 11500
},
{
"epoch": 1.0666666666666667,
"grad_norm": 1.024445652961731,
"learning_rate": 3.222222222222223e-05,
"loss": 0.0729,
"num_input_tokens_seen": 12288000,
"step": 12000
},
{
"epoch": 1.1111111111111112,
"grad_norm": 2.516057014465332,
"learning_rate": 3.148148148148148e-05,
"loss": 0.0739,
"num_input_tokens_seen": 12800000,
"step": 12500
},
{
"epoch": 1.1555555555555554,
"grad_norm": 3.101442813873291,
"learning_rate": 3.074074074074074e-05,
"loss": 0.0728,
"num_input_tokens_seen": 13312000,
"step": 13000
},
{
"epoch": 1.2,
"grad_norm": 1.0181483030319214,
"learning_rate": 3e-05,
"loss": 0.0713,
"num_input_tokens_seen": 13824000,
"step": 13500
},
{
"epoch": 1.2444444444444445,
"grad_norm": 2.7126169204711914,
"learning_rate": 2.925925925925926e-05,
"loss": 0.0739,
"num_input_tokens_seen": 14336000,
"step": 14000
},
{
"epoch": 1.2888888888888888,
"grad_norm": 2.1057209968566895,
"learning_rate": 2.851851851851852e-05,
"loss": 0.0718,
"num_input_tokens_seen": 14848000,
"step": 14500
},
{
"epoch": 1.3333333333333333,
"grad_norm": 2.226621150970459,
"learning_rate": 2.777777777777778e-05,
"loss": 0.081,
"num_input_tokens_seen": 15360000,
"step": 15000
},
{
"epoch": 1.3777777777777778,
"grad_norm": 1.1672347784042358,
"learning_rate": 2.7037037037037037e-05,
"loss": 0.0744,
"num_input_tokens_seen": 15872000,
"step": 15500
},
{
"epoch": 1.4222222222222223,
"grad_norm": 1.556462287902832,
"learning_rate": 2.6296296296296296e-05,
"loss": 0.0727,
"num_input_tokens_seen": 16384000,
"step": 16000
},
{
"epoch": 1.4666666666666668,
"grad_norm": 1.4660066366195679,
"learning_rate": 2.5555555555555554e-05,
"loss": 0.0688,
"num_input_tokens_seen": 16896000,
"step": 16500
},
{
"epoch": 1.511111111111111,
"grad_norm": 1.3551133871078491,
"learning_rate": 2.4814814814814816e-05,
"loss": 0.0664,
"num_input_tokens_seen": 17408000,
"step": 17000
},
{
"epoch": 1.5555555555555556,
"grad_norm": 1.1325716972351074,
"learning_rate": 2.4074074074074074e-05,
"loss": 0.0712,
"num_input_tokens_seen": 17920000,
"step": 17500
},
{
"epoch": 1.6,
"grad_norm": 1.0320031642913818,
"learning_rate": 2.3333333333333336e-05,
"loss": 0.0696,
"num_input_tokens_seen": 18432000,
"step": 18000
},
{
"epoch": 1.6444444444444444,
"grad_norm": 2.1350128650665283,
"learning_rate": 2.2592592592592594e-05,
"loss": 0.0712,
"num_input_tokens_seen": 18944000,
"step": 18500
},
{
"epoch": 1.6888888888888889,
"grad_norm": 1.1131097078323364,
"learning_rate": 2.1851851851851852e-05,
"loss": 0.0728,
"num_input_tokens_seen": 19456000,
"step": 19000
},
{
"epoch": 1.7333333333333334,
"grad_norm": 1.8592647314071655,
"learning_rate": 2.111111111111111e-05,
"loss": 0.07,
"num_input_tokens_seen": 19968000,
"step": 19500
},
{
"epoch": 1.7777777777777777,
"grad_norm": 1.7992678880691528,
"learning_rate": 2.037037037037037e-05,
"loss": 0.0682,
"num_input_tokens_seen": 20480000,
"step": 20000
},
{
"epoch": 1.8222222222222222,
"grad_norm": 1.4641705751419067,
"learning_rate": 1.962962962962963e-05,
"loss": 0.0627,
"num_input_tokens_seen": 20992000,
"step": 20500
},
{
"epoch": 1.8666666666666667,
"grad_norm": 1.758195161819458,
"learning_rate": 1.888888888888889e-05,
"loss": 0.0686,
"num_input_tokens_seen": 21504000,
"step": 21000
},
{
"epoch": 1.911111111111111,
"grad_norm": 1.9842095375061035,
"learning_rate": 1.814814814814815e-05,
"loss": 0.068,
"num_input_tokens_seen": 22016000,
"step": 21500
},
{
"epoch": 1.9555555555555557,
"grad_norm": 1.4546846151351929,
"learning_rate": 1.740740740740741e-05,
"loss": 0.0646,
"num_input_tokens_seen": 22528000,
"step": 22000
},
{
"epoch": 2.0,
"grad_norm": 3.412598133087158,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.068,
"num_input_tokens_seen": 23040000,
"step": 22500
},
{
"epoch": 2.0444444444444443,
"grad_norm": 2.1435558795928955,
"learning_rate": 1.5925925925925926e-05,
"loss": 0.0492,
"num_input_tokens_seen": 23552000,
"step": 23000
},
{
"epoch": 2.088888888888889,
"grad_norm": 1.800618290901184,
"learning_rate": 1.5185185185185186e-05,
"loss": 0.0488,
"num_input_tokens_seen": 24064000,
"step": 23500
},
{
"epoch": 2.1333333333333333,
"grad_norm": 1.1772105693817139,
"learning_rate": 1.4444444444444444e-05,
"loss": 0.0465,
"num_input_tokens_seen": 24576000,
"step": 24000
},
{
"epoch": 2.1777777777777776,
"grad_norm": 0.9198475480079651,
"learning_rate": 1.3703703703703704e-05,
"loss": 0.0458,
"num_input_tokens_seen": 25088000,
"step": 24500
},
{
"epoch": 2.2222222222222223,
"grad_norm": 1.3264068365097046,
"learning_rate": 1.2962962962962962e-05,
"loss": 0.0476,
"num_input_tokens_seen": 25600000,
"step": 25000
},
{
"epoch": 2.2666666666666666,
"grad_norm": 2.010927438735962,
"learning_rate": 1.2222222222222222e-05,
"loss": 0.0485,
"num_input_tokens_seen": 26112000,
"step": 25500
},
{
"epoch": 2.311111111111111,
"grad_norm": 1.620568871498108,
"learning_rate": 1.1481481481481482e-05,
"loss": 0.0443,
"num_input_tokens_seen": 26624000,
"step": 26000
},
{
"epoch": 2.3555555555555556,
"grad_norm": 1.1650017499923706,
"learning_rate": 1.074074074074074e-05,
"loss": 0.0456,
"num_input_tokens_seen": 27136000,
"step": 26500
},
{
"epoch": 2.4,
"grad_norm": 1.404801607131958,
"learning_rate": 1e-05,
"loss": 0.0453,
"num_input_tokens_seen": 27648000,
"step": 27000
},
{
"epoch": 2.4444444444444446,
"grad_norm": 1.6808654069900513,
"learning_rate": 9.259259259259259e-06,
"loss": 0.0447,
"num_input_tokens_seen": 28160000,
"step": 27500
},
{
"epoch": 2.488888888888889,
"grad_norm": 1.0057835578918457,
"learning_rate": 8.518518518518519e-06,
"loss": 0.0494,
"num_input_tokens_seen": 28672000,
"step": 28000
},
{
"epoch": 2.533333333333333,
"grad_norm": 1.2674041986465454,
"learning_rate": 7.777777777777777e-06,
"loss": 0.044,
"num_input_tokens_seen": 29184000,
"step": 28500
},
{
"epoch": 2.5777777777777775,
"grad_norm": 1.236249327659607,
"learning_rate": 7.0370370370370375e-06,
"loss": 0.0478,
"num_input_tokens_seen": 29696000,
"step": 29000
},
{
"epoch": 2.6222222222222222,
"grad_norm": 1.3907105922698975,
"learning_rate": 6.296296296296296e-06,
"loss": 0.0428,
"num_input_tokens_seen": 30208000,
"step": 29500
},
{
"epoch": 2.6666666666666665,
"grad_norm": 1.8549304008483887,
"learning_rate": 5.555555555555556e-06,
"loss": 0.0451,
"num_input_tokens_seen": 30720000,
"step": 30000
},
{
"epoch": 2.7111111111111112,
"grad_norm": 1.3427213430404663,
"learning_rate": 4.814814814814815e-06,
"loss": 0.0438,
"num_input_tokens_seen": 31232000,
"step": 30500
},
{
"epoch": 2.7555555555555555,
"grad_norm": 1.9282052516937256,
"learning_rate": 4.074074074074075e-06,
"loss": 0.046,
"num_input_tokens_seen": 31744000,
"step": 31000
},
{
"epoch": 2.8,
"grad_norm": 1.3202067613601685,
"learning_rate": 3.3333333333333333e-06,
"loss": 0.0394,
"num_input_tokens_seen": 32256000,
"step": 31500
},
{
"epoch": 2.8444444444444446,
"grad_norm": 0.9947272539138794,
"learning_rate": 2.5925925925925925e-06,
"loss": 0.0428,
"num_input_tokens_seen": 32768000,
"step": 32000
},
{
"epoch": 2.888888888888889,
"grad_norm": 1.1175397634506226,
"learning_rate": 1.8518518518518519e-06,
"loss": 0.0411,
"num_input_tokens_seen": 33280000,
"step": 32500
},
{
"epoch": 2.9333333333333336,
"grad_norm": 1.4308379888534546,
"learning_rate": 1.1111111111111112e-06,
"loss": 0.0422,
"num_input_tokens_seen": 33792000,
"step": 33000
},
{
"epoch": 2.977777777777778,
"grad_norm": 1.3511176109313965,
"learning_rate": 3.703703703703704e-07,
"loss": 0.0428,
"num_input_tokens_seen": 34304000,
"step": 33500
},
{
"epoch": 3.0,
"num_input_tokens_seen": 34560000,
"step": 33750,
"total_flos": 4446488701440000.0,
"train_loss": 0.08211795973601164,
"train_runtime": 1433.1905,
"train_samples_per_second": 188.391,
"train_steps_per_second": 23.549,
"train_tokens_per_second": 24114.032
}
],
"logging_steps": 500,
"max_steps": 33750,
"num_input_tokens_seen": 34560000,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4446488701440000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}