|
{ |
|
"best_metric": 0.996, |
|
"best_model_checkpoint": "AI_ImageClassification_SDXL/checkpoint-586", |
|
"epoch": 0.9995735607675906, |
|
"eval_steps": 500, |
|
"global_step": 586, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.840909090909091e-06, |
|
"loss": 1.2947, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 5.681818181818182e-06, |
|
"loss": 0.689, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 8.522727272727273e-06, |
|
"loss": 0.2576, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.1363636363636365e-05, |
|
"loss": 0.23, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.4204545454545456e-05, |
|
"loss": 0.1137, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.7045454545454546e-05, |
|
"loss": 0.0554, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9886363636363638e-05, |
|
"loss": 0.059, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 2.272727272727273e-05, |
|
"loss": 0.0583, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.5568181818181817e-05, |
|
"loss": 0.0349, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 2.8409090909090912e-05, |
|
"loss": 0.0864, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 3.125e-05, |
|
"loss": 0.0145, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 3.409090909090909e-05, |
|
"loss": 0.0358, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 3.6931818181818184e-05, |
|
"loss": 0.0473, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 3.9772727272727275e-05, |
|
"loss": 0.0253, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 4.261363636363637e-05, |
|
"loss": 0.0574, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 4.545454545454546e-05, |
|
"loss": 0.0403, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.829545454545455e-05, |
|
"loss": 0.0481, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.9873577749683945e-05, |
|
"loss": 0.0246, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.955752212389381e-05, |
|
"loss": 0.0094, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 4.924146649810367e-05, |
|
"loss": 0.0413, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 4.892541087231353e-05, |
|
"loss": 0.0339, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 4.860935524652339e-05, |
|
"loss": 0.0523, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 4.829329962073325e-05, |
|
"loss": 0.0104, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 4.797724399494311e-05, |
|
"loss": 0.0309, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 4.7661188369152975e-05, |
|
"loss": 0.0248, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.734513274336283e-05, |
|
"loss": 0.0306, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.70290771175727e-05, |
|
"loss": 0.0162, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.6713021491782554e-05, |
|
"loss": 0.016, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.639696586599241e-05, |
|
"loss": 0.0103, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 4.608091024020228e-05, |
|
"loss": 0.0196, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 4.576485461441214e-05, |
|
"loss": 0.0264, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 4.5448798988622e-05, |
|
"loss": 0.0344, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4.5132743362831855e-05, |
|
"loss": 0.036, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.4816687737041726e-05, |
|
"loss": 0.0082, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 4.4500632111251584e-05, |
|
"loss": 0.0149, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 4.418457648546144e-05, |
|
"loss": 0.0082, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 4.3868520859671306e-05, |
|
"loss": 0.0451, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 4.355246523388117e-05, |
|
"loss": 0.0055, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 4.323640960809103e-05, |
|
"loss": 0.0087, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 4.2920353982300885e-05, |
|
"loss": 0.0052, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 4.260429835651075e-05, |
|
"loss": 0.0033, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 4.2288242730720607e-05, |
|
"loss": 0.0148, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.197218710493047e-05, |
|
"loss": 0.0134, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 4.165613147914033e-05, |
|
"loss": 0.0125, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 4.134007585335019e-05, |
|
"loss": 0.0531, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.102402022756005e-05, |
|
"loss": 0.0059, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.0707964601769914e-05, |
|
"loss": 0.0059, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.039190897597978e-05, |
|
"loss": 0.0194, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 4.0075853350189636e-05, |
|
"loss": 0.0111, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 3.9759797724399494e-05, |
|
"loss": 0.0321, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 3.944374209860936e-05, |
|
"loss": 0.0261, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 3.912768647281922e-05, |
|
"loss": 0.0196, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.881163084702908e-05, |
|
"loss": 0.0038, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 3.849557522123894e-05, |
|
"loss": 0.0216, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 3.81795195954488e-05, |
|
"loss": 0.0047, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 3.7863463969658666e-05, |
|
"loss": 0.0095, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 3.754740834386852e-05, |
|
"loss": 0.0036, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 3.723135271807838e-05, |
|
"loss": 0.0025, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.996, |
|
"eval_loss": 0.015431854873895645, |
|
"eval_runtime": 44.7221, |
|
"eval_samples_per_second": 44.721, |
|
"eval_steps_per_second": 2.795, |
|
"step": 586 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1758, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 2.9390551738299924e+18, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|