debugging / trainer_state.json
bowphs's picture
End of training
0c1c611 verified
raw
history blame
7.33 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 16464,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09110787172011661,
"grad_norm": 0.5542775988578796,
"learning_rate": 4.848153547133139e-05,
"loss": 7.6369,
"step": 500
},
{
"epoch": 0.18221574344023322,
"grad_norm": 0.6718146800994873,
"learning_rate": 4.696307094266278e-05,
"loss": 6.0229,
"step": 1000
},
{
"epoch": 0.27332361516034986,
"grad_norm": 0.6730484366416931,
"learning_rate": 4.5444606413994175e-05,
"loss": 5.5062,
"step": 1500
},
{
"epoch": 0.36443148688046645,
"grad_norm": 0.7009643912315369,
"learning_rate": 4.3926141885325564e-05,
"loss": 5.2064,
"step": 2000
},
{
"epoch": 0.4555393586005831,
"grad_norm": 0.7892385721206665,
"learning_rate": 4.240767735665695e-05,
"loss": 4.9923,
"step": 2500
},
{
"epoch": 0.5466472303206997,
"grad_norm": 0.840638279914856,
"learning_rate": 4.088921282798834e-05,
"loss": 4.8437,
"step": 3000
},
{
"epoch": 0.6377551020408163,
"grad_norm": 0.8709360957145691,
"learning_rate": 3.937074829931973e-05,
"loss": 4.7268,
"step": 3500
},
{
"epoch": 0.7288629737609329,
"grad_norm": 0.9013066291809082,
"learning_rate": 3.785228377065112e-05,
"loss": 4.6324,
"step": 4000
},
{
"epoch": 0.8199708454810496,
"grad_norm": 0.9396289587020874,
"learning_rate": 3.6333819241982507e-05,
"loss": 4.556,
"step": 4500
},
{
"epoch": 0.9110787172011662,
"grad_norm": 1.0571647882461548,
"learning_rate": 3.4815354713313895e-05,
"loss": 4.4846,
"step": 5000
},
{
"epoch": 1.0,
"eval_accuracy": 0.23895378723078822,
"eval_loss": 4.422997951507568,
"eval_runtime": 213.5442,
"eval_samples_per_second": 89.185,
"eval_steps_per_second": 2.791,
"step": 5488
},
{
"epoch": 1.0021865889212829,
"grad_norm": 0.9928910732269287,
"learning_rate": 3.329689018464529e-05,
"loss": 4.4267,
"step": 5500
},
{
"epoch": 1.0932944606413995,
"grad_norm": 1.0094648599624634,
"learning_rate": 3.177842565597668e-05,
"loss": 4.3598,
"step": 6000
},
{
"epoch": 1.184402332361516,
"grad_norm": 1.1073040962219238,
"learning_rate": 3.0259961127308068e-05,
"loss": 4.3227,
"step": 6500
},
{
"epoch": 1.2755102040816326,
"grad_norm": 1.0472743511199951,
"learning_rate": 2.8741496598639456e-05,
"loss": 4.2846,
"step": 7000
},
{
"epoch": 1.3666180758017492,
"grad_norm": 1.0852700471878052,
"learning_rate": 2.7223032069970845e-05,
"loss": 4.2512,
"step": 7500
},
{
"epoch": 1.4577259475218658,
"grad_norm": 1.0805084705352783,
"learning_rate": 2.5704567541302237e-05,
"loss": 4.2266,
"step": 8000
},
{
"epoch": 1.5488338192419824,
"grad_norm": 1.1083316802978516,
"learning_rate": 2.4186103012633625e-05,
"loss": 4.1916,
"step": 8500
},
{
"epoch": 1.639941690962099,
"grad_norm": 1.1683896780014038,
"learning_rate": 2.2667638483965014e-05,
"loss": 4.1699,
"step": 9000
},
{
"epoch": 1.7310495626822158,
"grad_norm": 1.2223966121673584,
"learning_rate": 2.1149173955296406e-05,
"loss": 4.1517,
"step": 9500
},
{
"epoch": 1.8221574344023324,
"grad_norm": 1.1384276151657104,
"learning_rate": 1.9630709426627795e-05,
"loss": 4.1228,
"step": 10000
},
{
"epoch": 1.913265306122449,
"grad_norm": 1.2079554796218872,
"learning_rate": 1.8112244897959187e-05,
"loss": 4.1079,
"step": 10500
},
{
"epoch": 2.0,
"eval_accuracy": 0.27172573472254197,
"eval_loss": 4.11556339263916,
"eval_runtime": 213.5538,
"eval_samples_per_second": 89.181,
"eval_steps_per_second": 2.791,
"step": 10976
},
{
"epoch": 2.0043731778425657,
"grad_norm": 1.2157140970230103,
"learning_rate": 1.6593780369290575e-05,
"loss": 4.0902,
"step": 11000
},
{
"epoch": 2.0954810495626823,
"grad_norm": 1.2627109289169312,
"learning_rate": 1.5075315840621965e-05,
"loss": 4.0556,
"step": 11500
},
{
"epoch": 2.186588921282799,
"grad_norm": 1.2118178606033325,
"learning_rate": 1.3556851311953352e-05,
"loss": 4.0461,
"step": 12000
},
{
"epoch": 2.2776967930029155,
"grad_norm": 1.3033051490783691,
"learning_rate": 1.2038386783284743e-05,
"loss": 4.0326,
"step": 12500
},
{
"epoch": 2.368804664723032,
"grad_norm": 1.2471331357955933,
"learning_rate": 1.0519922254616133e-05,
"loss": 4.0282,
"step": 13000
},
{
"epoch": 2.4599125364431487,
"grad_norm": 1.3048267364501953,
"learning_rate": 9.001457725947522e-06,
"loss": 4.0197,
"step": 13500
},
{
"epoch": 2.5510204081632653,
"grad_norm": 1.1969304084777832,
"learning_rate": 7.482993197278912e-06,
"loss": 4.0076,
"step": 14000
},
{
"epoch": 2.642128279883382,
"grad_norm": 1.264858603477478,
"learning_rate": 5.964528668610301e-06,
"loss": 3.9948,
"step": 14500
},
{
"epoch": 2.7332361516034984,
"grad_norm": 1.244125485420227,
"learning_rate": 4.4460641399416915e-06,
"loss": 3.9953,
"step": 15000
},
{
"epoch": 2.824344023323615,
"grad_norm": 1.2413588762283325,
"learning_rate": 2.9275996112730806e-06,
"loss": 3.9837,
"step": 15500
},
{
"epoch": 2.9154518950437316,
"grad_norm": 1.233183741569519,
"learning_rate": 1.4091350826044704e-06,
"loss": 3.9841,
"step": 16000
},
{
"epoch": 3.0,
"eval_accuracy": 0.2825701437173418,
"eval_loss": 4.0273261070251465,
"eval_runtime": 215.4599,
"eval_samples_per_second": 88.392,
"eval_steps_per_second": 2.766,
"step": 16464
},
{
"epoch": 3.0,
"step": 16464,
"total_flos": 1.44589665695957e+17,
"train_loss": 4.469071887788319,
"train_runtime": 13360.3551,
"train_samples_per_second": 39.431,
"train_steps_per_second": 1.232
}
],
"logging_steps": 500,
"max_steps": 16464,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.44589665695957e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}