|
{ |
|
"best_metric": 1.0954023599624634, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e4l58-l/checkpoint-4000", |
|
"epoch": 0.7367839381101492, |
|
"eval_steps": 500, |
|
"global_step": 4000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09209799226376865, |
|
"grad_norm": 0.0014697719598188996, |
|
"learning_rate": 4.884877509670289e-08, |
|
"loss": 0.4588, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09209799226376865, |
|
"eval_loss": 1.4194883108139038, |
|
"eval_runtime": 78.7703, |
|
"eval_samples_per_second": 15.31, |
|
"eval_steps_per_second": 1.917, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.1841959845275373, |
|
"grad_norm": 301.76043701171875, |
|
"learning_rate": 4.769755019340578e-08, |
|
"loss": 0.4255, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.1841959845275373, |
|
"eval_loss": 1.3416553735733032, |
|
"eval_runtime": 77.0051, |
|
"eval_samples_per_second": 15.661, |
|
"eval_steps_per_second": 1.961, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.27629397679130596, |
|
"grad_norm": 35.28853225708008, |
|
"learning_rate": 4.654632529010867e-08, |
|
"loss": 0.3724, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.27629397679130596, |
|
"eval_loss": 1.2872973680496216, |
|
"eval_runtime": 77.5439, |
|
"eval_samples_per_second": 15.552, |
|
"eval_steps_per_second": 1.947, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.3683919690550746, |
|
"grad_norm": 338.9349060058594, |
|
"learning_rate": 4.539510038681156e-08, |
|
"loss": 0.3251, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.3683919690550746, |
|
"eval_loss": 1.234910249710083, |
|
"eval_runtime": 78.7715, |
|
"eval_samples_per_second": 15.31, |
|
"eval_steps_per_second": 1.917, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46048996131884323, |
|
"grad_norm": 325.85101318359375, |
|
"learning_rate": 4.4243875483514457e-08, |
|
"loss": 0.3308, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46048996131884323, |
|
"eval_loss": 1.1944907903671265, |
|
"eval_runtime": 79.0397, |
|
"eval_samples_per_second": 15.258, |
|
"eval_steps_per_second": 1.91, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.5525879535826119, |
|
"grad_norm": 100.85855102539062, |
|
"learning_rate": 4.309265058021735e-08, |
|
"loss": 0.3017, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.5525879535826119, |
|
"eval_loss": 1.1593009233474731, |
|
"eval_runtime": 77.6354, |
|
"eval_samples_per_second": 15.534, |
|
"eval_steps_per_second": 1.945, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6446859458463805, |
|
"grad_norm": 8.865598678588867, |
|
"learning_rate": 4.194142567692024e-08, |
|
"loss": 0.2962, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6446859458463805, |
|
"eval_loss": 1.1259427070617676, |
|
"eval_runtime": 78.2715, |
|
"eval_samples_per_second": 15.408, |
|
"eval_steps_per_second": 1.929, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7367839381101492, |
|
"grad_norm": 4.393447399139404, |
|
"learning_rate": 4.079020077362314e-08, |
|
"loss": 0.2919, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7367839381101492, |
|
"eval_loss": 1.0954023599624634, |
|
"eval_runtime": 78.5366, |
|
"eval_samples_per_second": 15.356, |
|
"eval_steps_per_second": 1.923, |
|
"step": 4000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 21716, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1439752921742520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|