|
{ |
|
"best_metric": 0.6960715055465698, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l37-l/checkpoint-6500", |
|
"epoch": 1.1974944731024317, |
|
"eval_steps": 500, |
|
"global_step": 6500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"grad_norm": 8.437943458557129, |
|
"learning_rate": 2.907885040530582e-07, |
|
"loss": 0.3017, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"eval_loss": 1.164457082748413, |
|
"eval_runtime": 75.2697, |
|
"eval_samples_per_second": 16.036, |
|
"eval_steps_per_second": 2.006, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"grad_norm": 14.740598678588867, |
|
"learning_rate": 2.815770081061164e-07, |
|
"loss": 0.2899, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"eval_loss": 1.01627779006958, |
|
"eval_runtime": 76.4934, |
|
"eval_samples_per_second": 15.779, |
|
"eval_steps_per_second": 1.974, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"grad_norm": 0.17031528055667877, |
|
"learning_rate": 2.723655121591746e-07, |
|
"loss": 0.273, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"eval_loss": 0.9395213723182678, |
|
"eval_runtime": 76.6256, |
|
"eval_samples_per_second": 15.752, |
|
"eval_steps_per_second": 1.971, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"grad_norm": 0.0030106802005320787, |
|
"learning_rate": 2.6315401621223287e-07, |
|
"loss": 0.1818, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"eval_loss": 0.8942967057228088, |
|
"eval_runtime": 76.8089, |
|
"eval_samples_per_second": 15.714, |
|
"eval_steps_per_second": 1.966, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"grad_norm": 474.4410705566406, |
|
"learning_rate": 2.539425202652911e-07, |
|
"loss": 0.2094, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"eval_loss": 0.8330363631248474, |
|
"eval_runtime": 76.9939, |
|
"eval_samples_per_second": 15.677, |
|
"eval_steps_per_second": 1.961, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"grad_norm": 314.3533935546875, |
|
"learning_rate": 2.447310243183493e-07, |
|
"loss": 0.1949, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"eval_loss": 0.7885717749595642, |
|
"eval_runtime": 76.7534, |
|
"eval_samples_per_second": 15.726, |
|
"eval_steps_per_second": 1.967, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"grad_norm": 7.678028106689453, |
|
"learning_rate": 2.3551952837140753e-07, |
|
"loss": 0.1335, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"eval_loss": 0.7657251358032227, |
|
"eval_runtime": 76.791, |
|
"eval_samples_per_second": 15.718, |
|
"eval_steps_per_second": 1.966, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"grad_norm": 30.121461868286133, |
|
"learning_rate": 2.263080324244657e-07, |
|
"loss": 0.1541, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"eval_loss": 0.7473344802856445, |
|
"eval_runtime": 76.929, |
|
"eval_samples_per_second": 15.69, |
|
"eval_steps_per_second": 1.963, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"grad_norm": 654.2782592773438, |
|
"learning_rate": 2.1709653647752394e-07, |
|
"loss": 0.1267, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"eval_loss": 0.7376419305801392, |
|
"eval_runtime": 75.2838, |
|
"eval_samples_per_second": 16.033, |
|
"eval_steps_per_second": 2.006, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"grad_norm": 0.00021513362298719585, |
|
"learning_rate": 2.0788504053058218e-07, |
|
"loss": 0.1404, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"eval_loss": 0.724165678024292, |
|
"eval_runtime": 76.8393, |
|
"eval_samples_per_second": 15.708, |
|
"eval_steps_per_second": 1.965, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"grad_norm": 0.0016925306990742683, |
|
"learning_rate": 1.9867354458364036e-07, |
|
"loss": 0.1197, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"eval_loss": 0.7046866416931152, |
|
"eval_runtime": 76.6635, |
|
"eval_samples_per_second": 15.744, |
|
"eval_steps_per_second": 1.97, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"grad_norm": 0.38834691047668457, |
|
"learning_rate": 1.894620486366986e-07, |
|
"loss": 0.0834, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"eval_loss": 0.6995787620544434, |
|
"eval_runtime": 77.9523, |
|
"eval_samples_per_second": 15.484, |
|
"eval_steps_per_second": 1.937, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"grad_norm": 0.3254232704639435, |
|
"learning_rate": 1.8025055268975683e-07, |
|
"loss": 0.084, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"eval_loss": 0.6960715055465698, |
|
"eval_runtime": 76.0942, |
|
"eval_samples_per_second": 15.862, |
|
"eval_steps_per_second": 1.984, |
|
"step": 6500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 16284, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2339148799892520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|