byt5-base-es_maq / trainer_state.json
mekjr1's picture
End of training
53de869
raw
history blame
1.45 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"global_step": 3980,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.26,
"learning_rate": 4.3718592964824124e-05,
"loss": 2.2285,
"step": 500
},
{
"epoch": 2.51,
"learning_rate": 3.7437185929648245e-05,
"loss": 0.9853,
"step": 1000
},
{
"epoch": 3.77,
"learning_rate": 3.1155778894472366e-05,
"loss": 0.854,
"step": 1500
},
{
"epoch": 5.03,
"learning_rate": 2.4874371859296484e-05,
"loss": 0.8085,
"step": 2000
},
{
"epoch": 6.28,
"learning_rate": 1.8592964824120602e-05,
"loss": 0.768,
"step": 2500
},
{
"epoch": 7.54,
"learning_rate": 1.2311557788944725e-05,
"loss": 0.7459,
"step": 3000
},
{
"epoch": 8.79,
"learning_rate": 6.030150753768844e-06,
"loss": 0.7298,
"step": 3500
},
{
"epoch": 10.0,
"step": 3980,
"total_flos": 2215243269734400.0,
"train_loss": 0.9812813993674426,
"train_runtime": 514.4973,
"train_samples_per_second": 123.499,
"train_steps_per_second": 7.736
}
],
"max_steps": 3980,
"num_train_epochs": 10,
"total_flos": 2215243269734400.0,
"trial_name": null,
"trial_params": null
}