deberta-large_lemon_10k_3_p3 / trainer_state.json
stuartmesham's picture
Upload with huggingface_hub
598d1bc
raw
history blame contribute delete
No virus
1.75 kB
{
"best_metric": 0.9412672324153892,
"best_model_checkpoint": "model_saves/deberta-large_lemon_10k_3_p3/checkpoint-268",
"epoch": 4.0,
"global_step": 1072,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": 0.9412672324153892,
"eval_loss": 0.42676153779029846,
"eval_runtime": 4.6735,
"eval_samples_per_second": 937.632,
"eval_steps_per_second": 7.489,
"step": 268
},
{
"epoch": 2.0,
"eval_accuracy": 0.941121881953063,
"eval_loss": 0.4438949525356293,
"eval_runtime": 4.677,
"eval_samples_per_second": 936.927,
"eval_steps_per_second": 7.483,
"step": 536
},
{
"epoch": 3.0,
"eval_accuracy": 0.9400820671071903,
"eval_loss": 0.49137458205223083,
"eval_runtime": 4.6845,
"eval_samples_per_second": 935.428,
"eval_steps_per_second": 7.471,
"step": 804
},
{
"epoch": 3.73,
"learning_rate": 1e-05,
"loss": 0.2514,
"step": 1000
},
{
"epoch": 4.0,
"eval_accuracy": 0.9398416798041123,
"eval_loss": 0.5405893921852112,
"eval_runtime": 4.6762,
"eval_samples_per_second": 937.079,
"eval_steps_per_second": 7.485,
"step": 1072
},
{
"epoch": 4.0,
"step": 1072,
"total_flos": 1.6308703116394496e+16,
"train_loss": 0.24491678689842794,
"train_runtime": 475.934,
"train_samples_per_second": 1081.158,
"train_steps_per_second": 8.447
}
],
"max_steps": 4020,
"num_train_epochs": 15,
"total_flos": 1.6308703116394496e+16,
"trial_name": null,
"trial_params": null
}