|
{ |
|
"best_metric": 0.872196261682243, |
|
"best_model_checkpoint": "vit-base-patch16-224-in21k-finetuned-cassava/checkpoint-399", |
|
"epoch": 2.994392523364486, |
|
"global_step": 399, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.25e-05, |
|
"loss": 1.6059, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.5e-05, |
|
"loss": 1.381, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 3.7500000000000003e-05, |
|
"loss": 1.1472, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 5e-05, |
|
"loss": 1.0363, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 4.860724233983287e-05, |
|
"loss": 0.9067, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.721448467966574e-05, |
|
"loss": 0.7733, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 4.582172701949861e-05, |
|
"loss": 0.7452, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.442896935933148e-05, |
|
"loss": 0.6585, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.303621169916435e-05, |
|
"loss": 0.6018, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.164345403899722e-05, |
|
"loss": 0.5398, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.0250696378830085e-05, |
|
"loss": 0.5506, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.885793871866296e-05, |
|
"loss": 0.5615, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 3.746518105849583e-05, |
|
"loss": 0.5242, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_accuracy": 0.8502336448598131, |
|
"eval_loss": 0.4905190169811249, |
|
"eval_runtime": 89.5244, |
|
"eval_samples_per_second": 47.808, |
|
"eval_steps_per_second": 1.497, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 3.607242339832869e-05, |
|
"loss": 0.5078, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 3.4679665738161556e-05, |
|
"loss": 0.5059, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 3.328690807799443e-05, |
|
"loss": 0.4914, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 3.18941504178273e-05, |
|
"loss": 0.4694, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 3.050139275766017e-05, |
|
"loss": 0.4538, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 2.9108635097493035e-05, |
|
"loss": 0.4467, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 2.7715877437325903e-05, |
|
"loss": 0.4958, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 2.6323119777158778e-05, |
|
"loss": 0.4647, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 2.4930362116991646e-05, |
|
"loss": 0.413, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 2.3537604456824514e-05, |
|
"loss": 0.4005, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 2.214484679665738e-05, |
|
"loss": 0.4374, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 2.0752089136490253e-05, |
|
"loss": 0.4366, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 1.935933147632312e-05, |
|
"loss": 0.4405, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"eval_accuracy": 0.8696261682242991, |
|
"eval_loss": 0.41539567708969116, |
|
"eval_runtime": 92.6526, |
|
"eval_samples_per_second": 46.194, |
|
"eval_steps_per_second": 1.446, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 1.7966573816155992e-05, |
|
"loss": 0.4296, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 1.6573816155988857e-05, |
|
"loss": 0.4433, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 1.518105849582173e-05, |
|
"loss": 0.4323, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 1.3788300835654596e-05, |
|
"loss": 0.4169, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 1.2395543175487466e-05, |
|
"loss": 0.395, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 1.1002785515320335e-05, |
|
"loss": 0.4045, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 9.610027855153205e-06, |
|
"loss": 0.3718, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 8.217270194986073e-06, |
|
"loss": 0.4601, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 6.8245125348189415e-06, |
|
"loss": 0.3969, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 5.43175487465181e-06, |
|
"loss": 0.397, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 4.03899721448468e-06, |
|
"loss": 0.3909, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 2.6462395543175487e-06, |
|
"loss": 0.4032, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 1.253481894150418e-06, |
|
"loss": 0.3938, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_accuracy": 0.872196261682243, |
|
"eval_loss": 0.4010835587978363, |
|
"eval_runtime": 89.5202, |
|
"eval_samples_per_second": 47.81, |
|
"eval_steps_per_second": 1.497, |
|
"step": 399 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"step": 399, |
|
"total_flos": 3.9721912052731453e+18, |
|
"train_loss": 0.5681018877148927, |
|
"train_runtime": 3778.1283, |
|
"train_samples_per_second": 13.592, |
|
"train_steps_per_second": 0.106 |
|
} |
|
], |
|
"max_steps": 399, |
|
"num_train_epochs": 3, |
|
"total_flos": 3.9721912052731453e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|