food-image-classification / trainer_state.json
Shresthadev403's picture
End of training
8d9286c verified
raw
history blame
3.48 kB
{
"best_metric": 0.7070627062706271,
"best_model_checkpoint": "food-image-classification/checkpoint-7000",
"epoch": 8.447729672650475,
"eval_steps": 1000,
"global_step": 8000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.06,
"learning_rate": 1.0559662090813095e-06,
"loss": 4.6112,
"step": 1000
},
{
"epoch": 1.06,
"eval_accuracy": 0.034851485148514855,
"eval_loss": 4.575930118560791,
"eval_runtime": 157.053,
"eval_samples_per_second": 96.464,
"eval_steps_per_second": 6.03,
"step": 1000
},
{
"epoch": 2.11,
"learning_rate": 2.111932418162619e-06,
"loss": 4.4899,
"step": 2000
},
{
"epoch": 2.11,
"eval_accuracy": 0.3103630363036304,
"eval_loss": 4.3788862228393555,
"eval_runtime": 154.3948,
"eval_samples_per_second": 98.125,
"eval_steps_per_second": 6.134,
"step": 2000
},
{
"epoch": 3.17,
"learning_rate": 3.167898627243928e-06,
"loss": 4.2111,
"step": 3000
},
{
"epoch": 3.17,
"eval_accuracy": 0.5498349834983498,
"eval_loss": 4.030922889709473,
"eval_runtime": 155.2257,
"eval_samples_per_second": 97.6,
"eval_steps_per_second": 6.101,
"step": 3000
},
{
"epoch": 4.22,
"learning_rate": 4.223864836325238e-06,
"loss": 3.8257,
"step": 4000
},
{
"epoch": 4.22,
"eval_accuracy": 0.6111551155115511,
"eval_loss": 3.634243965148926,
"eval_runtime": 156.2293,
"eval_samples_per_second": 96.973,
"eval_steps_per_second": 6.062,
"step": 4000
},
{
"epoch": 5.28,
"learning_rate": 5.279831045406547e-06,
"loss": 3.4182,
"step": 5000
},
{
"epoch": 5.28,
"eval_accuracy": 0.6514851485148515,
"eval_loss": 3.225186586380005,
"eval_runtime": 154.6911,
"eval_samples_per_second": 97.937,
"eval_steps_per_second": 6.122,
"step": 5000
},
{
"epoch": 6.34,
"learning_rate": 6.335797254487856e-06,
"loss": 2.9962,
"step": 6000
},
{
"epoch": 6.34,
"eval_accuracy": 0.687062706270627,
"eval_loss": 2.805878162384033,
"eval_runtime": 154.6826,
"eval_samples_per_second": 97.943,
"eval_steps_per_second": 6.122,
"step": 6000
},
{
"epoch": 7.39,
"learning_rate": 7.3917634635691666e-06,
"loss": 2.5605,
"step": 7000
},
{
"epoch": 7.39,
"eval_accuracy": 0.7070627062706271,
"eval_loss": 2.382246494293213,
"eval_runtime": 155.5967,
"eval_samples_per_second": 97.367,
"eval_steps_per_second": 6.086,
"step": 7000
},
{
"epoch": 8.45,
"learning_rate": 8.447729672650476e-06,
"loss": 2.1397,
"step": 8000
},
{
"epoch": 8.45,
"eval_accuracy": 0.7335973597359736,
"eval_loss": 1.975380539894104,
"eval_runtime": 156.8793,
"eval_samples_per_second": 96.571,
"eval_steps_per_second": 6.036,
"step": 8000
}
],
"logging_steps": 1000,
"max_steps": 473500,
"num_input_tokens_seen": 0,
"num_train_epochs": 500,
"save_steps": 1000,
"total_flos": 3.970613944525455e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}