yehiawp4's picture
End of training
7f37b3d verified
{
"best_metric": 0.26666666666666666,
"best_model_checkpoint": "videomae-base-finetuned-caer-subset-5-classes-base-model\\checkpoint-139",
"epoch": 1.444,
"eval_steps": 500,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"grad_norm": 11.07775592803955,
"learning_rate": 2e-05,
"loss": 1.6854,
"step": 10
},
{
"epoch": 0.08,
"grad_norm": 16.159523010253906,
"learning_rate": 4e-05,
"loss": 1.6475,
"step": 20
},
{
"epoch": 0.12,
"grad_norm": 15.219178199768066,
"learning_rate": 4.888888888888889e-05,
"loss": 1.6296,
"step": 30
},
{
"epoch": 0.16,
"grad_norm": 12.341416358947754,
"learning_rate": 4.666666666666667e-05,
"loss": 1.7157,
"step": 40
},
{
"epoch": 0.2,
"grad_norm": 14.330547332763672,
"learning_rate": 4.4444444444444447e-05,
"loss": 1.652,
"step": 50
},
{
"epoch": 0.24,
"grad_norm": 12.243963241577148,
"learning_rate": 4.222222222222222e-05,
"loss": 1.6353,
"step": 60
},
{
"epoch": 0.28,
"grad_norm": 12.711626052856445,
"learning_rate": 4e-05,
"loss": 1.6923,
"step": 70
},
{
"epoch": 0.32,
"grad_norm": 16.082073211669922,
"learning_rate": 3.777777777777778e-05,
"loss": 1.5922,
"step": 80
},
{
"epoch": 0.36,
"grad_norm": 16.63982582092285,
"learning_rate": 3.555555555555556e-05,
"loss": 1.7151,
"step": 90
},
{
"epoch": 0.4,
"grad_norm": 8.993603706359863,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.718,
"step": 100
},
{
"epoch": 0.44,
"grad_norm": 11.654336929321289,
"learning_rate": 3.111111111111111e-05,
"loss": 1.6497,
"step": 110
},
{
"epoch": 0.48,
"grad_norm": 10.800435066223145,
"learning_rate": 2.8888888888888888e-05,
"loss": 1.6542,
"step": 120
},
{
"epoch": 0.52,
"grad_norm": 9.841632843017578,
"learning_rate": 2.6666666666666667e-05,
"loss": 1.5763,
"step": 130
},
{
"epoch": 0.56,
"eval_accuracy": 0.26666666666666666,
"eval_loss": 1.641332983970642,
"eval_runtime": 9.3839,
"eval_samples_per_second": 3.197,
"eval_steps_per_second": 1.598,
"step": 139
},
{
"epoch": 1.0,
"grad_norm": 13.519031524658203,
"learning_rate": 2.4444444444444445e-05,
"loss": 1.5659,
"step": 140
},
{
"epoch": 1.04,
"grad_norm": 9.212859153747559,
"learning_rate": 2.2222222222222223e-05,
"loss": 1.6481,
"step": 150
},
{
"epoch": 1.08,
"grad_norm": 10.454906463623047,
"learning_rate": 2e-05,
"loss": 1.7381,
"step": 160
},
{
"epoch": 1.12,
"grad_norm": 11.107510566711426,
"learning_rate": 1.777777777777778e-05,
"loss": 1.6243,
"step": 170
},
{
"epoch": 1.16,
"grad_norm": 9.847452163696289,
"learning_rate": 1.5555555555555555e-05,
"loss": 1.5956,
"step": 180
},
{
"epoch": 1.2,
"grad_norm": 11.35783576965332,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.6769,
"step": 190
},
{
"epoch": 1.24,
"grad_norm": 8.935040473937988,
"learning_rate": 1.1111111111111112e-05,
"loss": 1.5996,
"step": 200
},
{
"epoch": 1.28,
"grad_norm": 10.564764976501465,
"learning_rate": 8.88888888888889e-06,
"loss": 1.7105,
"step": 210
},
{
"epoch": 1.32,
"grad_norm": 9.259597778320312,
"learning_rate": 6.666666666666667e-06,
"loss": 1.6253,
"step": 220
},
{
"epoch": 1.36,
"grad_norm": 15.238162994384766,
"learning_rate": 4.444444444444445e-06,
"loss": 1.5752,
"step": 230
},
{
"epoch": 1.4,
"grad_norm": 9.198563575744629,
"learning_rate": 2.2222222222222225e-06,
"loss": 1.5624,
"step": 240
},
{
"epoch": 1.44,
"grad_norm": 9.007689476013184,
"learning_rate": 0.0,
"loss": 1.5825,
"step": 250
},
{
"epoch": 1.44,
"eval_accuracy": 0.2,
"eval_loss": 1.615497350692749,
"eval_runtime": 9.657,
"eval_samples_per_second": 3.107,
"eval_steps_per_second": 1.553,
"step": 250
},
{
"epoch": 1.44,
"step": 250,
"total_flos": 6.2304896360448e+17,
"train_loss": 1.6427093963623047,
"train_runtime": 248.4449,
"train_samples_per_second": 2.013,
"train_steps_per_second": 1.006
},
{
"epoch": 1.44,
"eval_accuracy": 0.2328767123287671,
"eval_loss": 1.6937663555145264,
"eval_runtime": 21.9862,
"eval_samples_per_second": 3.32,
"eval_steps_per_second": 1.683,
"step": 250
},
{
"epoch": 1.44,
"eval_accuracy": 0.2328767123287671,
"eval_loss": 1.693766474723816,
"eval_runtime": 22.6545,
"eval_samples_per_second": 3.222,
"eval_steps_per_second": 1.633,
"step": 250
}
],
"logging_steps": 10,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 6.2304896360448e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}