shaswatamitra's picture
Upload folder using huggingface_hub
94fa82a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 430,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.29,
"grad_norm": 0.3876499831676483,
"learning_rate": 0.00011627906976744187,
"loss": 1.3841,
"step": 25
},
{
"epoch": 0.58,
"grad_norm": 0.35026389360427856,
"learning_rate": 0.0001963824289405685,
"loss": 0.8968,
"step": 50
},
{
"epoch": 0.87,
"grad_norm": 0.317654550075531,
"learning_rate": 0.00018346253229974162,
"loss": 0.6679,
"step": 75
},
{
"epoch": 1.16,
"grad_norm": 0.4258534610271454,
"learning_rate": 0.00017054263565891473,
"loss": 0.5714,
"step": 100
},
{
"epoch": 1.45,
"grad_norm": 0.4060456156730652,
"learning_rate": 0.00015762273901808786,
"loss": 0.5099,
"step": 125
},
{
"epoch": 1.74,
"grad_norm": 0.37451767921447754,
"learning_rate": 0.000144702842377261,
"loss": 0.5017,
"step": 150
},
{
"epoch": 2.03,
"grad_norm": 0.36458975076675415,
"learning_rate": 0.0001317829457364341,
"loss": 0.4647,
"step": 175
},
{
"epoch": 2.33,
"grad_norm": 0.3686697781085968,
"learning_rate": 0.00011886304909560724,
"loss": 0.4359,
"step": 200
},
{
"epoch": 2.62,
"grad_norm": 0.37151384353637695,
"learning_rate": 0.00010594315245478037,
"loss": 0.4268,
"step": 225
},
{
"epoch": 2.91,
"grad_norm": 0.45622408390045166,
"learning_rate": 9.30232558139535e-05,
"loss": 0.4423,
"step": 250
},
{
"epoch": 3.2,
"grad_norm": 0.44779106974601746,
"learning_rate": 8.010335917312663e-05,
"loss": 0.3945,
"step": 275
},
{
"epoch": 3.49,
"grad_norm": 0.4048546552658081,
"learning_rate": 6.718346253229974e-05,
"loss": 0.3765,
"step": 300
},
{
"epoch": 3.78,
"grad_norm": 0.5087065696716309,
"learning_rate": 5.426356589147287e-05,
"loss": 0.3912,
"step": 325
},
{
"epoch": 4.07,
"grad_norm": 0.5312517285346985,
"learning_rate": 4.1343669250646e-05,
"loss": 0.3972,
"step": 350
},
{
"epoch": 4.36,
"grad_norm": 0.47890299558639526,
"learning_rate": 2.842377260981912e-05,
"loss": 0.3485,
"step": 375
},
{
"epoch": 4.65,
"grad_norm": 0.5938854217529297,
"learning_rate": 1.5503875968992248e-05,
"loss": 0.3707,
"step": 400
},
{
"epoch": 4.94,
"grad_norm": 0.5553755760192871,
"learning_rate": 2.583979328165375e-06,
"loss": 0.3336,
"step": 425
}
],
"logging_steps": 25,
"max_steps": 430,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 6.991294110892032e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}