deberta-large_lemon_5k_6_p3 / trainer_state.json
stuartmesham's picture
Upload with huggingface_hub
122c541
raw
history blame contribute delete
No virus
1.75 kB
{
"best_metric": 0.9421561063965385,
"best_model_checkpoint": "model_saves/deberta-large_lemon_5k_6_p3/checkpoint-268",
"epoch": 4.0,
"global_step": 1072,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_accuracy": 0.9421561063965385,
"eval_loss": 0.4133068919181824,
"eval_runtime": 4.659,
"eval_samples_per_second": 940.549,
"eval_steps_per_second": 7.512,
"step": 268
},
{
"epoch": 2.0,
"eval_accuracy": 0.9417088742047652,
"eval_loss": 0.4411185383796692,
"eval_runtime": 4.6385,
"eval_samples_per_second": 944.711,
"eval_steps_per_second": 7.546,
"step": 536
},
{
"epoch": 3.0,
"eval_accuracy": 0.9407920482116303,
"eval_loss": 0.47731733322143555,
"eval_runtime": 4.6411,
"eval_samples_per_second": 944.18,
"eval_steps_per_second": 7.541,
"step": 804
},
{
"epoch": 3.73,
"learning_rate": 1e-05,
"loss": 0.2535,
"step": 1000
},
{
"epoch": 4.0,
"eval_accuracy": 0.9397745949753463,
"eval_loss": 0.5340213179588318,
"eval_runtime": 4.6436,
"eval_samples_per_second": 943.673,
"eval_steps_per_second": 7.537,
"step": 1072
},
{
"epoch": 4.0,
"step": 1072,
"total_flos": 1.6074262796304384e+16,
"train_loss": 0.246939716944054,
"train_runtime": 456.1068,
"train_samples_per_second": 1128.157,
"train_steps_per_second": 8.814
}
],
"max_steps": 4020,
"num_train_epochs": 15,
"total_flos": 1.6074262796304384e+16,
"trial_name": null,
"trial_params": null
}