|
{ |
|
"best_metric": 0.6983720660209656, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l37-l/checkpoint-8500", |
|
"epoch": 0.9568839356073399, |
|
"eval_steps": 500, |
|
"global_step": 8500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.056287290329843524, |
|
"grad_norm": 96.92656707763672, |
|
"learning_rate": 2.9437127096701565e-07, |
|
"loss": 0.3748, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.056287290329843524, |
|
"eval_loss": 1.2226382493972778, |
|
"eval_runtime": 131.2779, |
|
"eval_samples_per_second": 15.037, |
|
"eval_steps_per_second": 1.882, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.11257458065968705, |
|
"grad_norm": 402.31158447265625, |
|
"learning_rate": 2.887425419340313e-07, |
|
"loss": 0.3057, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.11257458065968705, |
|
"eval_loss": 1.0702259540557861, |
|
"eval_runtime": 131.0109, |
|
"eval_samples_per_second": 15.067, |
|
"eval_steps_per_second": 1.885, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.16886187098953057, |
|
"grad_norm": 515.0911865234375, |
|
"learning_rate": 2.831138129010469e-07, |
|
"loss": 0.2239, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.16886187098953057, |
|
"eval_loss": 0.9957238435745239, |
|
"eval_runtime": 134.8377, |
|
"eval_samples_per_second": 14.64, |
|
"eval_steps_per_second": 1.832, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2251491613193741, |
|
"grad_norm": 3.1967105865478516, |
|
"learning_rate": 2.774850838680626e-07, |
|
"loss": 0.2229, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.2251491613193741, |
|
"eval_loss": 0.9504629373550415, |
|
"eval_runtime": 129.0169, |
|
"eval_samples_per_second": 15.3, |
|
"eval_steps_per_second": 1.914, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.2814364516492176, |
|
"grad_norm": 276.22998046875, |
|
"learning_rate": 2.718563548350782e-07, |
|
"loss": 0.2098, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.2814364516492176, |
|
"eval_loss": 0.9006705284118652, |
|
"eval_runtime": 129.1816, |
|
"eval_samples_per_second": 15.281, |
|
"eval_steps_per_second": 1.912, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.33772374197906113, |
|
"grad_norm": 1105.317138671875, |
|
"learning_rate": 2.6622762580209386e-07, |
|
"loss": 0.1938, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.33772374197906113, |
|
"eval_loss": 0.8782906532287598, |
|
"eval_runtime": 128.6387, |
|
"eval_samples_per_second": 15.345, |
|
"eval_steps_per_second": 1.92, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.39401103230890466, |
|
"grad_norm": 0.00043465400813147426, |
|
"learning_rate": 2.605988967691095e-07, |
|
"loss": 0.1688, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.39401103230890466, |
|
"eval_loss": 0.8405746221542358, |
|
"eval_runtime": 128.6989, |
|
"eval_samples_per_second": 15.338, |
|
"eval_steps_per_second": 1.919, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.4502983226387482, |
|
"grad_norm": 0.027426382526755333, |
|
"learning_rate": 2.549701677361252e-07, |
|
"loss": 0.1457, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.4502983226387482, |
|
"eval_loss": 0.813768208026886, |
|
"eval_runtime": 128.7209, |
|
"eval_samples_per_second": 15.336, |
|
"eval_steps_per_second": 1.919, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.5065856129685917, |
|
"grad_norm": 0.0012935090344399214, |
|
"learning_rate": 2.4934143870314085e-07, |
|
"loss": 0.179, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.5065856129685917, |
|
"eval_loss": 0.7965527176856995, |
|
"eval_runtime": 128.3969, |
|
"eval_samples_per_second": 15.374, |
|
"eval_steps_per_second": 1.924, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.5628729032984352, |
|
"grad_norm": 0.0017458726651966572, |
|
"learning_rate": 2.4371270967015646e-07, |
|
"loss": 0.1224, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.5628729032984352, |
|
"eval_loss": 0.7788484692573547, |
|
"eval_runtime": 128.4939, |
|
"eval_samples_per_second": 15.363, |
|
"eval_steps_per_second": 1.922, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.6191601936282788, |
|
"grad_norm": 65.76844787597656, |
|
"learning_rate": 2.380839806371721e-07, |
|
"loss": 0.1551, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.6191601936282788, |
|
"eval_loss": 0.7626588344573975, |
|
"eval_runtime": 128.7302, |
|
"eval_samples_per_second": 15.334, |
|
"eval_steps_per_second": 1.919, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.6754474839581223, |
|
"grad_norm": 5.014094829559326, |
|
"learning_rate": 2.3245525160418776e-07, |
|
"loss": 0.1721, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.6754474839581223, |
|
"eval_loss": 0.7464810609817505, |
|
"eval_runtime": 128.749, |
|
"eval_samples_per_second": 15.332, |
|
"eval_steps_per_second": 1.918, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.7317347742879657, |
|
"grad_norm": 0.0022696161177009344, |
|
"learning_rate": 2.268265225712034e-07, |
|
"loss": 0.1532, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.7317347742879657, |
|
"eval_loss": 0.7335842251777649, |
|
"eval_runtime": 128.8165, |
|
"eval_samples_per_second": 15.324, |
|
"eval_steps_per_second": 1.917, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.7880220646178093, |
|
"grad_norm": 5.508715730684344e-06, |
|
"learning_rate": 2.2119779353821906e-07, |
|
"loss": 0.1991, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.7880220646178093, |
|
"eval_loss": 0.7244272232055664, |
|
"eval_runtime": 128.9211, |
|
"eval_samples_per_second": 15.312, |
|
"eval_steps_per_second": 1.916, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.8443093549476528, |
|
"grad_norm": 0.30320531129837036, |
|
"learning_rate": 2.155690645052347e-07, |
|
"loss": 0.1551, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.8443093549476528, |
|
"eval_loss": 0.718368411064148, |
|
"eval_runtime": 128.8149, |
|
"eval_samples_per_second": 15.324, |
|
"eval_steps_per_second": 1.917, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.9005966452774964, |
|
"grad_norm": 1.9788849385804497e-05, |
|
"learning_rate": 2.0994033547225037e-07, |
|
"loss": 0.1439, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.9005966452774964, |
|
"eval_loss": 0.7040167450904846, |
|
"eval_runtime": 128.4921, |
|
"eval_samples_per_second": 15.363, |
|
"eval_steps_per_second": 1.922, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.9568839356073399, |
|
"grad_norm": 1.0750063665909693e-06, |
|
"learning_rate": 2.04311606439266e-07, |
|
"loss": 0.1361, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.9568839356073399, |
|
"eval_loss": 0.6983720660209656, |
|
"eval_runtime": 128.5929, |
|
"eval_samples_per_second": 15.351, |
|
"eval_steps_per_second": 1.921, |
|
"step": 8500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 26649, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3058665502412520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|