|
{ |
|
"best_metric": 3.302131175994873, |
|
"best_model_checkpoint": "car_brands_image_detection/checkpoint-13060", |
|
"epoch": 4.0, |
|
"eval_steps": 500, |
|
"global_step": 13060, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 7.455317497253418, |
|
"learning_rate": 1.9308224442736356e-06, |
|
"loss": 3.4846, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 4.010385036468506, |
|
"learning_rate": 1.8539584934665642e-06, |
|
"loss": 3.4587, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 4.901058673858643, |
|
"learning_rate": 1.7770945426594927e-06, |
|
"loss": 3.45, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 5.844404220581055, |
|
"learning_rate": 1.700230591852421e-06, |
|
"loss": 3.4339, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 5.998371124267578, |
|
"learning_rate": 1.6233666410453497e-06, |
|
"loss": 3.414, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 5.008224010467529, |
|
"learning_rate": 1.5465026902382784e-06, |
|
"loss": 3.3956, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.541320237223395, |
|
"eval_loss": 3.4782135486602783, |
|
"eval_runtime": 711.0751, |
|
"eval_samples_per_second": 97.935, |
|
"eval_steps_per_second": 12.242, |
|
"step": 3265 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"grad_norm": 4.298598766326904, |
|
"learning_rate": 1.4696387394312066e-06, |
|
"loss": 3.3699, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"grad_norm": 4.588273525238037, |
|
"learning_rate": 1.3927747886241352e-06, |
|
"loss": 3.3439, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"grad_norm": 5.975885391235352, |
|
"learning_rate": 1.3159108378170639e-06, |
|
"loss": 3.3162, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"grad_norm": 4.5131096839904785, |
|
"learning_rate": 1.239046887009992e-06, |
|
"loss": 3.306, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"grad_norm": 4.919468879699707, |
|
"learning_rate": 1.1621829362029207e-06, |
|
"loss": 3.2967, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"grad_norm": 5.679530143737793, |
|
"learning_rate": 1.0853189853958494e-06, |
|
"loss": 3.273, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"grad_norm": 5.218754768371582, |
|
"learning_rate": 1.0084550345887778e-06, |
|
"loss": 3.2877, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.552879851807177, |
|
"eval_loss": 3.3825206756591797, |
|
"eval_runtime": 1330.9633, |
|
"eval_samples_per_second": 52.322, |
|
"eval_steps_per_second": 6.54, |
|
"step": 6530 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"grad_norm": 5.154468059539795, |
|
"learning_rate": 9.315910837817063e-07, |
|
"loss": 3.222, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"grad_norm": 4.286391735076904, |
|
"learning_rate": 8.547271329746349e-07, |
|
"loss": 3.2313, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"grad_norm": 5.875144004821777, |
|
"learning_rate": 7.778631821675634e-07, |
|
"loss": 3.2207, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"grad_norm": 5.433847904205322, |
|
"learning_rate": 7.009992313604919e-07, |
|
"loss": 3.2325, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"grad_norm": 4.9686102867126465, |
|
"learning_rate": 6.241352805534205e-07, |
|
"loss": 3.2162, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"grad_norm": 4.758098125457764, |
|
"learning_rate": 5.472713297463489e-07, |
|
"loss": 3.2061, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.5644394663909591, |
|
"eval_loss": 3.3224754333496094, |
|
"eval_runtime": 709.8901, |
|
"eval_samples_per_second": 98.098, |
|
"eval_steps_per_second": 12.262, |
|
"step": 9795 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"grad_norm": 5.5253729820251465, |
|
"learning_rate": 4.7040737893927744e-07, |
|
"loss": 3.1879, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 3.22, |
|
"grad_norm": 5.2010908126831055, |
|
"learning_rate": 3.9354342813220597e-07, |
|
"loss": 3.1831, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 3.37, |
|
"grad_norm": 5.191558837890625, |
|
"learning_rate": 3.166794773251345e-07, |
|
"loss": 3.1715, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"grad_norm": 5.484861850738525, |
|
"learning_rate": 2.3981552651806304e-07, |
|
"loss": 3.1752, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"grad_norm": 4.993890762329102, |
|
"learning_rate": 1.6295157571099155e-07, |
|
"loss": 3.1738, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"grad_norm": 4.897230625152588, |
|
"learning_rate": 8.608762490392006e-08, |
|
"loss": 3.171, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 3.98, |
|
"grad_norm": 5.773993968963623, |
|
"learning_rate": 9.223674096848577e-09, |
|
"loss": 3.1716, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.5659328824365657, |
|
"eval_loss": 3.302131175994873, |
|
"eval_runtime": 705.9576, |
|
"eval_samples_per_second": 98.645, |
|
"eval_steps_per_second": 12.331, |
|
"step": 13060 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 13060, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"total_flos": 3.247178702686662e+19, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|