|
{ |
|
"best_metric": 0.9886976480484009, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l58-l/checkpoint-8000", |
|
"epoch": 1.4738393515106853, |
|
"eval_steps": 500, |
|
"global_step": 8000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"grad_norm": 19.808820724487305, |
|
"learning_rate": 4.84647506755097e-08, |
|
"loss": 0.3758, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"eval_loss": 1.418845295906067, |
|
"eval_runtime": 75.2377, |
|
"eval_samples_per_second": 16.042, |
|
"eval_steps_per_second": 2.007, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"grad_norm": 76.37991333007812, |
|
"learning_rate": 4.69295013510194e-08, |
|
"loss": 0.4108, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"eval_loss": 1.3513683080673218, |
|
"eval_runtime": 74.9686, |
|
"eval_samples_per_second": 16.1, |
|
"eval_steps_per_second": 2.014, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"grad_norm": 0.5795987844467163, |
|
"learning_rate": 4.539425202652911e-08, |
|
"loss": 0.4335, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"eval_loss": 1.2895005941390991, |
|
"eval_runtime": 76.713, |
|
"eval_samples_per_second": 15.734, |
|
"eval_steps_per_second": 1.968, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"grad_norm": 0.1463615745306015, |
|
"learning_rate": 4.385900270203881e-08, |
|
"loss": 0.3436, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"eval_loss": 1.2414860725402832, |
|
"eval_runtime": 76.9749, |
|
"eval_samples_per_second": 15.68, |
|
"eval_steps_per_second": 1.962, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"grad_norm": 323.8576354980469, |
|
"learning_rate": 4.232375337754851e-08, |
|
"loss": 0.366, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"eval_loss": 1.1949334144592285, |
|
"eval_runtime": 77.0381, |
|
"eval_samples_per_second": 15.668, |
|
"eval_steps_per_second": 1.96, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"grad_norm": 339.28533935546875, |
|
"learning_rate": 4.078850405305821e-08, |
|
"loss": 0.3191, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"eval_loss": 1.1560280323028564, |
|
"eval_runtime": 77.0026, |
|
"eval_samples_per_second": 15.675, |
|
"eval_steps_per_second": 1.961, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"grad_norm": 60.27859878540039, |
|
"learning_rate": 3.925325472856792e-08, |
|
"loss": 0.2779, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"eval_loss": 1.1233855485916138, |
|
"eval_runtime": 76.9006, |
|
"eval_samples_per_second": 15.696, |
|
"eval_steps_per_second": 1.964, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"grad_norm": 18.268455505371094, |
|
"learning_rate": 3.7718005404077616e-08, |
|
"loss": 0.2942, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"eval_loss": 1.0993207693099976, |
|
"eval_runtime": 77.2834, |
|
"eval_samples_per_second": 15.618, |
|
"eval_steps_per_second": 1.954, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"grad_norm": 505.22723388671875, |
|
"learning_rate": 3.6182756079587324e-08, |
|
"loss": 0.2615, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"eval_loss": 1.080470085144043, |
|
"eval_runtime": 77.1214, |
|
"eval_samples_per_second": 15.651, |
|
"eval_steps_per_second": 1.958, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"grad_norm": 1.2971173524856567, |
|
"learning_rate": 3.4647506755097025e-08, |
|
"loss": 0.2715, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"eval_loss": 1.0614728927612305, |
|
"eval_runtime": 77.2982, |
|
"eval_samples_per_second": 15.615, |
|
"eval_steps_per_second": 1.953, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"grad_norm": 0.17611829936504364, |
|
"learning_rate": 3.3112257430606727e-08, |
|
"loss": 0.2509, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"eval_loss": 1.0461839437484741, |
|
"eval_runtime": 75.3494, |
|
"eval_samples_per_second": 16.019, |
|
"eval_steps_per_second": 2.004, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"grad_norm": 228.2562713623047, |
|
"learning_rate": 3.1577008106116435e-08, |
|
"loss": 0.2653, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"eval_loss": 1.0305675268173218, |
|
"eval_runtime": 77.0542, |
|
"eval_samples_per_second": 15.664, |
|
"eval_steps_per_second": 1.96, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"grad_norm": 432.4784240722656, |
|
"learning_rate": 3.0041758781626136e-08, |
|
"loss": 0.2199, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"eval_loss": 1.0182703733444214, |
|
"eval_runtime": 76.296, |
|
"eval_samples_per_second": 15.82, |
|
"eval_steps_per_second": 1.979, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"grad_norm": 0.6385967135429382, |
|
"learning_rate": 2.8506509457135834e-08, |
|
"loss": 0.1844, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"eval_loss": 1.0078601837158203, |
|
"eval_runtime": 76.5441, |
|
"eval_samples_per_second": 15.769, |
|
"eval_steps_per_second": 1.973, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"grad_norm": 57.47743606567383, |
|
"learning_rate": 2.697126013264554e-08, |
|
"loss": 0.2143, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"eval_loss": 0.9969784617424011, |
|
"eval_runtime": 75.6186, |
|
"eval_samples_per_second": 15.962, |
|
"eval_steps_per_second": 1.997, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"grad_norm": 0.11704438179731369, |
|
"learning_rate": 2.5436010808155244e-08, |
|
"loss": 0.2011, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"eval_loss": 0.9886976480484009, |
|
"eval_runtime": 75.238, |
|
"eval_samples_per_second": 16.042, |
|
"eval_steps_per_second": 2.007, |
|
"step": 8000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 16284, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2878786326782520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|