food-image-classification / trainer_state.json
Shresthadev403's picture
End of training
f18a6c8 verified
raw
history blame
1.65 kB
{
"best_metric": 0.3103630363036304,
"best_model_checkpoint": "food-image-classification/checkpoint-2000",
"epoch": 3.167898627243928,
"eval_steps": 1000,
"global_step": 3000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.06,
"learning_rate": 1.0559662090813095e-06,
"loss": 4.6112,
"step": 1000
},
{
"epoch": 1.06,
"eval_accuracy": 0.034851485148514855,
"eval_loss": 4.575930118560791,
"eval_runtime": 157.053,
"eval_samples_per_second": 96.464,
"eval_steps_per_second": 6.03,
"step": 1000
},
{
"epoch": 2.11,
"learning_rate": 2.111932418162619e-06,
"loss": 4.4899,
"step": 2000
},
{
"epoch": 2.11,
"eval_accuracy": 0.3103630363036304,
"eval_loss": 4.3788862228393555,
"eval_runtime": 154.3948,
"eval_samples_per_second": 98.125,
"eval_steps_per_second": 6.134,
"step": 2000
},
{
"epoch": 3.17,
"learning_rate": 3.167898627243928e-06,
"loss": 4.2111,
"step": 3000
},
{
"epoch": 3.17,
"eval_accuracy": 0.5498349834983498,
"eval_loss": 4.030922889709473,
"eval_runtime": 155.2257,
"eval_samples_per_second": 97.6,
"eval_steps_per_second": 6.101,
"step": 3000
}
],
"logging_steps": 1000,
"max_steps": 473500,
"num_input_tokens_seen": 0,
"num_train_epochs": 500,
"save_steps": 1000,
"total_flos": 1.4889802291970458e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}