|
{ |
|
"best_metric": 0.9021016359329224, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e4l58-l/checkpoint-13000", |
|
"epoch": 2.3949889462048635, |
|
"eval_steps": 500, |
|
"global_step": 13000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"grad_norm": 20.04043960571289, |
|
"learning_rate": 4.8848563006632274e-08, |
|
"loss": 0.3758, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"eval_loss": 1.4184927940368652, |
|
"eval_runtime": 74.3562, |
|
"eval_samples_per_second": 16.233, |
|
"eval_steps_per_second": 2.031, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"grad_norm": 74.25025939941406, |
|
"learning_rate": 4.7697126013264556e-08, |
|
"loss": 0.4103, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"eval_loss": 1.3500770330429077, |
|
"eval_runtime": 75.7013, |
|
"eval_samples_per_second": 15.944, |
|
"eval_steps_per_second": 1.995, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"grad_norm": 0.5102410912513733, |
|
"learning_rate": 4.6545689019896826e-08, |
|
"loss": 0.433, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"eval_loss": 1.2885024547576904, |
|
"eval_runtime": 75.197, |
|
"eval_samples_per_second": 16.051, |
|
"eval_steps_per_second": 2.008, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"grad_norm": 0.1667678952217102, |
|
"learning_rate": 4.539425202652911e-08, |
|
"loss": 0.3424, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"eval_loss": 1.239119052886963, |
|
"eval_runtime": 75.387, |
|
"eval_samples_per_second": 16.011, |
|
"eval_steps_per_second": 2.003, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"grad_norm": 343.8175048828125, |
|
"learning_rate": 4.4242815033161385e-08, |
|
"loss": 0.3645, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"eval_loss": 1.1902339458465576, |
|
"eval_runtime": 75.9899, |
|
"eval_samples_per_second": 15.884, |
|
"eval_steps_per_second": 1.987, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"grad_norm": 305.5626525878906, |
|
"learning_rate": 4.309137803979366e-08, |
|
"loss": 0.3172, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"eval_loss": 1.1506118774414062, |
|
"eval_runtime": 74.8655, |
|
"eval_samples_per_second": 16.122, |
|
"eval_steps_per_second": 2.017, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"grad_norm": 65.59048461914062, |
|
"learning_rate": 4.193994104642594e-08, |
|
"loss": 0.2751, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"eval_loss": 1.1169357299804688, |
|
"eval_runtime": 76.2936, |
|
"eval_samples_per_second": 15.82, |
|
"eval_steps_per_second": 1.979, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"grad_norm": 18.106201171875, |
|
"learning_rate": 4.078850405305821e-08, |
|
"loss": 0.2919, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"eval_loss": 1.0920671224594116, |
|
"eval_runtime": 74.9101, |
|
"eval_samples_per_second": 16.113, |
|
"eval_steps_per_second": 2.016, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"grad_norm": 506.40570068359375, |
|
"learning_rate": 3.9637067059690496e-08, |
|
"loss": 0.2583, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"eval_loss": 1.0721209049224854, |
|
"eval_runtime": 75.8053, |
|
"eval_samples_per_second": 15.922, |
|
"eval_steps_per_second": 1.992, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"grad_norm": 1.1508910655975342, |
|
"learning_rate": 3.848563006632277e-08, |
|
"loss": 0.2679, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"eval_loss": 1.0519349575042725, |
|
"eval_runtime": 75.226, |
|
"eval_samples_per_second": 16.045, |
|
"eval_steps_per_second": 2.007, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"grad_norm": 0.1393290013074875, |
|
"learning_rate": 3.733419307295505e-08, |
|
"loss": 0.2472, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"eval_loss": 1.0355629920959473, |
|
"eval_runtime": 74.6535, |
|
"eval_samples_per_second": 16.168, |
|
"eval_steps_per_second": 2.023, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"grad_norm": 200.34664916992188, |
|
"learning_rate": 3.6182756079587324e-08, |
|
"loss": 0.26, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"eval_loss": 1.0176944732666016, |
|
"eval_runtime": 74.4374, |
|
"eval_samples_per_second": 16.215, |
|
"eval_steps_per_second": 2.029, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"grad_norm": 431.1804504394531, |
|
"learning_rate": 3.50313190862196e-08, |
|
"loss": 0.2153, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"eval_loss": 1.0044586658477783, |
|
"eval_runtime": 74.3247, |
|
"eval_samples_per_second": 16.24, |
|
"eval_steps_per_second": 2.032, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"grad_norm": 0.6204941272735596, |
|
"learning_rate": 3.387988209285188e-08, |
|
"loss": 0.1791, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"eval_loss": 0.9926707148551941, |
|
"eval_runtime": 75.7317, |
|
"eval_samples_per_second": 15.938, |
|
"eval_steps_per_second": 1.994, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"grad_norm": 52.27273178100586, |
|
"learning_rate": 3.272844509948415e-08, |
|
"loss": 0.2082, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"eval_loss": 0.9803809523582458, |
|
"eval_runtime": 74.9936, |
|
"eval_samples_per_second": 16.095, |
|
"eval_steps_per_second": 2.014, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"grad_norm": 0.0976635068655014, |
|
"learning_rate": 3.1577008106116435e-08, |
|
"loss": 0.196, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"eval_loss": 0.9711655378341675, |
|
"eval_runtime": 74.3537, |
|
"eval_samples_per_second": 16.233, |
|
"eval_steps_per_second": 2.031, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"grad_norm": 46.64162826538086, |
|
"learning_rate": 3.042557111274871e-08, |
|
"loss": 0.1946, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"eval_loss": 0.9621181488037109, |
|
"eval_runtime": 75.7205, |
|
"eval_samples_per_second": 15.94, |
|
"eval_steps_per_second": 1.994, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"grad_norm": 0.05690698325634003, |
|
"learning_rate": 2.927413411938099e-08, |
|
"loss": 0.2422, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"eval_loss": 0.9536784291267395, |
|
"eval_runtime": 75.1745, |
|
"eval_samples_per_second": 16.056, |
|
"eval_steps_per_second": 2.009, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.750184229918939, |
|
"grad_norm": 0.08470064401626587, |
|
"learning_rate": 2.8122697126013263e-08, |
|
"loss": 0.2106, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.750184229918939, |
|
"eval_loss": 0.9457550644874573, |
|
"eval_runtime": 74.5062, |
|
"eval_samples_per_second": 16.2, |
|
"eval_steps_per_second": 2.027, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.8422991893883567, |
|
"grad_norm": 1.5745967626571655, |
|
"learning_rate": 2.697126013264554e-08, |
|
"loss": 0.1801, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.8422991893883567, |
|
"eval_loss": 0.9393072724342346, |
|
"eval_runtime": 75.9432, |
|
"eval_samples_per_second": 15.893, |
|
"eval_steps_per_second": 1.988, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.9344141488577744, |
|
"grad_norm": 0.06661002337932587, |
|
"learning_rate": 2.5819823139277818e-08, |
|
"loss": 0.2117, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.9344141488577744, |
|
"eval_loss": 0.9308408498764038, |
|
"eval_runtime": 75.0811, |
|
"eval_samples_per_second": 16.076, |
|
"eval_steps_per_second": 2.011, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.026529108327192, |
|
"grad_norm": 0.236691415309906, |
|
"learning_rate": 2.4668386145910094e-08, |
|
"loss": 0.2061, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.026529108327192, |
|
"eval_loss": 0.9237484335899353, |
|
"eval_runtime": 74.6134, |
|
"eval_samples_per_second": 16.177, |
|
"eval_steps_per_second": 2.024, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.1186440677966103, |
|
"grad_norm": 8.831416130065918, |
|
"learning_rate": 2.3516949152542374e-08, |
|
"loss": 0.1878, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.1186440677966103, |
|
"eval_loss": 0.9166750311851501, |
|
"eval_runtime": 75.6886, |
|
"eval_samples_per_second": 15.947, |
|
"eval_steps_per_second": 1.995, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.210759027266028, |
|
"grad_norm": 0.787421464920044, |
|
"learning_rate": 2.236551215917465e-08, |
|
"loss": 0.1655, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.210759027266028, |
|
"eval_loss": 0.9109458327293396, |
|
"eval_runtime": 74.9866, |
|
"eval_samples_per_second": 16.096, |
|
"eval_steps_per_second": 2.014, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.3028739867354457, |
|
"grad_norm": 535.4310913085938, |
|
"learning_rate": 2.1214075165806926e-08, |
|
"loss": 0.1946, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.3028739867354457, |
|
"eval_loss": 0.9070548415184021, |
|
"eval_runtime": 74.3796, |
|
"eval_samples_per_second": 16.228, |
|
"eval_steps_per_second": 2.03, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.3949889462048635, |
|
"grad_norm": 0.8420014381408691, |
|
"learning_rate": 2.0062638172439202e-08, |
|
"loss": 0.1882, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.3949889462048635, |
|
"eval_loss": 0.9021016359329224, |
|
"eval_runtime": 75.8812, |
|
"eval_samples_per_second": 15.906, |
|
"eval_steps_per_second": 1.99, |
|
"step": 13000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 21712, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4677578083082520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|