|
{ |
|
"best_metric": 0.9536784291267395, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e4l58-l/checkpoint-9000", |
|
"epoch": 1.658069270449521, |
|
"eval_steps": 500, |
|
"global_step": 9000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"grad_norm": 20.04043960571289, |
|
"learning_rate": 4.8848563006632274e-08, |
|
"loss": 0.3758, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"eval_loss": 1.4184927940368652, |
|
"eval_runtime": 74.3562, |
|
"eval_samples_per_second": 16.233, |
|
"eval_steps_per_second": 2.031, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"grad_norm": 74.25025939941406, |
|
"learning_rate": 4.7697126013264556e-08, |
|
"loss": 0.4103, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"eval_loss": 1.3500770330429077, |
|
"eval_runtime": 75.7013, |
|
"eval_samples_per_second": 15.944, |
|
"eval_steps_per_second": 1.995, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"grad_norm": 0.5102410912513733, |
|
"learning_rate": 4.6545689019896826e-08, |
|
"loss": 0.433, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"eval_loss": 1.2885024547576904, |
|
"eval_runtime": 75.197, |
|
"eval_samples_per_second": 16.051, |
|
"eval_steps_per_second": 2.008, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"grad_norm": 0.1667678952217102, |
|
"learning_rate": 4.539425202652911e-08, |
|
"loss": 0.3424, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"eval_loss": 1.239119052886963, |
|
"eval_runtime": 75.387, |
|
"eval_samples_per_second": 16.011, |
|
"eval_steps_per_second": 2.003, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"grad_norm": 343.8175048828125, |
|
"learning_rate": 4.4242815033161385e-08, |
|
"loss": 0.3645, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"eval_loss": 1.1902339458465576, |
|
"eval_runtime": 75.9899, |
|
"eval_samples_per_second": 15.884, |
|
"eval_steps_per_second": 1.987, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"grad_norm": 305.5626525878906, |
|
"learning_rate": 4.309137803979366e-08, |
|
"loss": 0.3172, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"eval_loss": 1.1506118774414062, |
|
"eval_runtime": 74.8655, |
|
"eval_samples_per_second": 16.122, |
|
"eval_steps_per_second": 2.017, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"grad_norm": 65.59048461914062, |
|
"learning_rate": 4.193994104642594e-08, |
|
"loss": 0.2751, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"eval_loss": 1.1169357299804688, |
|
"eval_runtime": 76.2936, |
|
"eval_samples_per_second": 15.82, |
|
"eval_steps_per_second": 1.979, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"grad_norm": 18.106201171875, |
|
"learning_rate": 4.078850405305821e-08, |
|
"loss": 0.2919, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"eval_loss": 1.0920671224594116, |
|
"eval_runtime": 74.9101, |
|
"eval_samples_per_second": 16.113, |
|
"eval_steps_per_second": 2.016, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"grad_norm": 506.40570068359375, |
|
"learning_rate": 3.9637067059690496e-08, |
|
"loss": 0.2583, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"eval_loss": 1.0721209049224854, |
|
"eval_runtime": 75.8053, |
|
"eval_samples_per_second": 15.922, |
|
"eval_steps_per_second": 1.992, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"grad_norm": 1.1508910655975342, |
|
"learning_rate": 3.848563006632277e-08, |
|
"loss": 0.2679, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"eval_loss": 1.0519349575042725, |
|
"eval_runtime": 75.226, |
|
"eval_samples_per_second": 16.045, |
|
"eval_steps_per_second": 2.007, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"grad_norm": 0.1393290013074875, |
|
"learning_rate": 3.733419307295505e-08, |
|
"loss": 0.2472, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"eval_loss": 1.0355629920959473, |
|
"eval_runtime": 74.6535, |
|
"eval_samples_per_second": 16.168, |
|
"eval_steps_per_second": 2.023, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"grad_norm": 200.34664916992188, |
|
"learning_rate": 3.6182756079587324e-08, |
|
"loss": 0.26, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"eval_loss": 1.0176944732666016, |
|
"eval_runtime": 74.4374, |
|
"eval_samples_per_second": 16.215, |
|
"eval_steps_per_second": 2.029, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"grad_norm": 431.1804504394531, |
|
"learning_rate": 3.50313190862196e-08, |
|
"loss": 0.2153, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"eval_loss": 1.0044586658477783, |
|
"eval_runtime": 74.3247, |
|
"eval_samples_per_second": 16.24, |
|
"eval_steps_per_second": 2.032, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"grad_norm": 0.6204941272735596, |
|
"learning_rate": 3.387988209285188e-08, |
|
"loss": 0.1791, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"eval_loss": 0.9926707148551941, |
|
"eval_runtime": 75.7317, |
|
"eval_samples_per_second": 15.938, |
|
"eval_steps_per_second": 1.994, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"grad_norm": 52.27273178100586, |
|
"learning_rate": 3.272844509948415e-08, |
|
"loss": 0.2082, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"eval_loss": 0.9803809523582458, |
|
"eval_runtime": 74.9936, |
|
"eval_samples_per_second": 16.095, |
|
"eval_steps_per_second": 2.014, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"grad_norm": 0.0976635068655014, |
|
"learning_rate": 3.1577008106116435e-08, |
|
"loss": 0.196, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"eval_loss": 0.9711655378341675, |
|
"eval_runtime": 74.3537, |
|
"eval_samples_per_second": 16.233, |
|
"eval_steps_per_second": 2.031, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"grad_norm": 46.64162826538086, |
|
"learning_rate": 3.042557111274871e-08, |
|
"loss": 0.1946, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"eval_loss": 0.9621181488037109, |
|
"eval_runtime": 75.7205, |
|
"eval_samples_per_second": 15.94, |
|
"eval_steps_per_second": 1.994, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"grad_norm": 0.05690698325634003, |
|
"learning_rate": 2.927413411938099e-08, |
|
"loss": 0.2422, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"eval_loss": 0.9536784291267395, |
|
"eval_runtime": 75.1745, |
|
"eval_samples_per_second": 16.056, |
|
"eval_steps_per_second": 2.009, |
|
"step": 9000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 21712, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3238544678042520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|