kevinoli's picture
Training in progress, step 16000, checkpoint
0eee316 verified
raw
history blame
13.3 kB
{
"best_metric": 0.9258912801742554,
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l58-l/checkpoint-16000",
"epoch": 2.9476787030213707,
"eval_steps": 500,
"global_step": 16000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09211495946941783,
"grad_norm": 19.808820724487305,
"learning_rate": 4.84647506755097e-08,
"loss": 0.3758,
"step": 500
},
{
"epoch": 0.09211495946941783,
"eval_loss": 1.418845295906067,
"eval_runtime": 75.2377,
"eval_samples_per_second": 16.042,
"eval_steps_per_second": 2.007,
"step": 500
},
{
"epoch": 0.18422991893883567,
"grad_norm": 76.37991333007812,
"learning_rate": 4.69295013510194e-08,
"loss": 0.4108,
"step": 1000
},
{
"epoch": 0.18422991893883567,
"eval_loss": 1.3513683080673218,
"eval_runtime": 74.9686,
"eval_samples_per_second": 16.1,
"eval_steps_per_second": 2.014,
"step": 1000
},
{
"epoch": 0.2763448784082535,
"grad_norm": 0.5795987844467163,
"learning_rate": 4.539425202652911e-08,
"loss": 0.4335,
"step": 1500
},
{
"epoch": 0.2763448784082535,
"eval_loss": 1.2895005941390991,
"eval_runtime": 76.713,
"eval_samples_per_second": 15.734,
"eval_steps_per_second": 1.968,
"step": 1500
},
{
"epoch": 0.36845983787767134,
"grad_norm": 0.1463615745306015,
"learning_rate": 4.385900270203881e-08,
"loss": 0.3436,
"step": 2000
},
{
"epoch": 0.36845983787767134,
"eval_loss": 1.2414860725402832,
"eval_runtime": 76.9749,
"eval_samples_per_second": 15.68,
"eval_steps_per_second": 1.962,
"step": 2000
},
{
"epoch": 0.46057479734708917,
"grad_norm": 323.8576354980469,
"learning_rate": 4.232375337754851e-08,
"loss": 0.366,
"step": 2500
},
{
"epoch": 0.46057479734708917,
"eval_loss": 1.1949334144592285,
"eval_runtime": 77.0381,
"eval_samples_per_second": 15.668,
"eval_steps_per_second": 1.96,
"step": 2500
},
{
"epoch": 0.552689756816507,
"grad_norm": 339.28533935546875,
"learning_rate": 4.078850405305821e-08,
"loss": 0.3191,
"step": 3000
},
{
"epoch": 0.552689756816507,
"eval_loss": 1.1560280323028564,
"eval_runtime": 77.0026,
"eval_samples_per_second": 15.675,
"eval_steps_per_second": 1.961,
"step": 3000
},
{
"epoch": 0.6448047162859248,
"grad_norm": 60.27859878540039,
"learning_rate": 3.925325472856792e-08,
"loss": 0.2779,
"step": 3500
},
{
"epoch": 0.6448047162859248,
"eval_loss": 1.1233855485916138,
"eval_runtime": 76.9006,
"eval_samples_per_second": 15.696,
"eval_steps_per_second": 1.964,
"step": 3500
},
{
"epoch": 0.7369196757553427,
"grad_norm": 18.268455505371094,
"learning_rate": 3.7718005404077616e-08,
"loss": 0.2942,
"step": 4000
},
{
"epoch": 0.7369196757553427,
"eval_loss": 1.0993207693099976,
"eval_runtime": 77.2834,
"eval_samples_per_second": 15.618,
"eval_steps_per_second": 1.954,
"step": 4000
},
{
"epoch": 0.8290346352247605,
"grad_norm": 505.22723388671875,
"learning_rate": 3.6182756079587324e-08,
"loss": 0.2615,
"step": 4500
},
{
"epoch": 0.8290346352247605,
"eval_loss": 1.080470085144043,
"eval_runtime": 77.1214,
"eval_samples_per_second": 15.651,
"eval_steps_per_second": 1.958,
"step": 4500
},
{
"epoch": 0.9211495946941783,
"grad_norm": 1.2971173524856567,
"learning_rate": 3.4647506755097025e-08,
"loss": 0.2715,
"step": 5000
},
{
"epoch": 0.9211495946941783,
"eval_loss": 1.0614728927612305,
"eval_runtime": 77.2982,
"eval_samples_per_second": 15.615,
"eval_steps_per_second": 1.953,
"step": 5000
},
{
"epoch": 1.013264554163596,
"grad_norm": 0.17611829936504364,
"learning_rate": 3.3112257430606727e-08,
"loss": 0.2509,
"step": 5500
},
{
"epoch": 1.013264554163596,
"eval_loss": 1.0461839437484741,
"eval_runtime": 75.3494,
"eval_samples_per_second": 16.019,
"eval_steps_per_second": 2.004,
"step": 5500
},
{
"epoch": 1.105379513633014,
"grad_norm": 228.2562713623047,
"learning_rate": 3.1577008106116435e-08,
"loss": 0.2653,
"step": 6000
},
{
"epoch": 1.105379513633014,
"eval_loss": 1.0305675268173218,
"eval_runtime": 77.0542,
"eval_samples_per_second": 15.664,
"eval_steps_per_second": 1.96,
"step": 6000
},
{
"epoch": 1.1974944731024317,
"grad_norm": 432.4784240722656,
"learning_rate": 3.0041758781626136e-08,
"loss": 0.2199,
"step": 6500
},
{
"epoch": 1.1974944731024317,
"eval_loss": 1.0182703733444214,
"eval_runtime": 76.296,
"eval_samples_per_second": 15.82,
"eval_steps_per_second": 1.979,
"step": 6500
},
{
"epoch": 1.2896094325718497,
"grad_norm": 0.6385967135429382,
"learning_rate": 2.8506509457135834e-08,
"loss": 0.1844,
"step": 7000
},
{
"epoch": 1.2896094325718497,
"eval_loss": 1.0078601837158203,
"eval_runtime": 76.5441,
"eval_samples_per_second": 15.769,
"eval_steps_per_second": 1.973,
"step": 7000
},
{
"epoch": 1.3817243920412676,
"grad_norm": 57.47743606567383,
"learning_rate": 2.697126013264554e-08,
"loss": 0.2143,
"step": 7500
},
{
"epoch": 1.3817243920412676,
"eval_loss": 0.9969784617424011,
"eval_runtime": 75.6186,
"eval_samples_per_second": 15.962,
"eval_steps_per_second": 1.997,
"step": 7500
},
{
"epoch": 1.4738393515106853,
"grad_norm": 0.11704438179731369,
"learning_rate": 2.5436010808155244e-08,
"loss": 0.2011,
"step": 8000
},
{
"epoch": 1.4738393515106853,
"eval_loss": 0.9886976480484009,
"eval_runtime": 75.238,
"eval_samples_per_second": 16.042,
"eval_steps_per_second": 2.007,
"step": 8000
},
{
"epoch": 1.565954310980103,
"grad_norm": 39.71356964111328,
"learning_rate": 2.3900761483664948e-08,
"loss": 0.1995,
"step": 8500
},
{
"epoch": 1.565954310980103,
"eval_loss": 0.980699360370636,
"eval_runtime": 75.7533,
"eval_samples_per_second": 15.933,
"eval_steps_per_second": 1.993,
"step": 8500
},
{
"epoch": 1.658069270449521,
"grad_norm": 0.09355564415454865,
"learning_rate": 2.236551215917465e-08,
"loss": 0.2493,
"step": 9000
},
{
"epoch": 1.658069270449521,
"eval_loss": 0.9733120203018188,
"eval_runtime": 75.1219,
"eval_samples_per_second": 16.067,
"eval_steps_per_second": 2.01,
"step": 9000
},
{
"epoch": 1.750184229918939,
"grad_norm": 0.12566468119621277,
"learning_rate": 2.083026283468435e-08,
"loss": 0.2192,
"step": 9500
},
{
"epoch": 1.750184229918939,
"eval_loss": 0.966866672039032,
"eval_runtime": 76.5812,
"eval_samples_per_second": 15.761,
"eval_steps_per_second": 1.972,
"step": 9500
},
{
"epoch": 1.8422991893883567,
"grad_norm": 1.9423623085021973,
"learning_rate": 1.9295013510194056e-08,
"loss": 0.1882,
"step": 10000
},
{
"epoch": 1.8422991893883567,
"eval_loss": 0.9616385698318481,
"eval_runtime": 75.5454,
"eval_samples_per_second": 15.977,
"eval_steps_per_second": 1.999,
"step": 10000
},
{
"epoch": 1.9344141488577744,
"grad_norm": 0.12048076093196869,
"learning_rate": 1.7759764185703757e-08,
"loss": 0.2193,
"step": 10500
},
{
"epoch": 1.9344141488577744,
"eval_loss": 0.9550592303276062,
"eval_runtime": 75.1134,
"eval_samples_per_second": 16.069,
"eval_steps_per_second": 2.01,
"step": 10500
},
{
"epoch": 2.026529108327192,
"grad_norm": 0.19933176040649414,
"learning_rate": 1.622451486121346e-08,
"loss": 0.2148,
"step": 11000
},
{
"epoch": 2.026529108327192,
"eval_loss": 0.9494845271110535,
"eval_runtime": 76.2803,
"eval_samples_per_second": 15.823,
"eval_steps_per_second": 1.98,
"step": 11000
},
{
"epoch": 2.1186440677966103,
"grad_norm": 13.599303245544434,
"learning_rate": 1.4689265536723163e-08,
"loss": 0.1975,
"step": 11500
},
{
"epoch": 2.1186440677966103,
"eval_loss": 0.9449298977851868,
"eval_runtime": 75.6422,
"eval_samples_per_second": 15.957,
"eval_steps_per_second": 1.996,
"step": 11500
},
{
"epoch": 2.210759027266028,
"grad_norm": 0.8174373507499695,
"learning_rate": 1.3154016212232866e-08,
"loss": 0.1791,
"step": 12000
},
{
"epoch": 2.210759027266028,
"eval_loss": 0.940927267074585,
"eval_runtime": 75.2379,
"eval_samples_per_second": 16.042,
"eval_steps_per_second": 2.007,
"step": 12000
},
{
"epoch": 2.3028739867354457,
"grad_norm": 705.062744140625,
"learning_rate": 1.161876688774257e-08,
"loss": 0.2057,
"step": 12500
},
{
"epoch": 2.3028739867354457,
"eval_loss": 0.9382032155990601,
"eval_runtime": 76.1075,
"eval_samples_per_second": 15.859,
"eval_steps_per_second": 1.984,
"step": 12500
},
{
"epoch": 2.3949889462048635,
"grad_norm": 0.7270774841308594,
"learning_rate": 1.0083517563252273e-08,
"loss": 0.2037,
"step": 13000
},
{
"epoch": 2.3949889462048635,
"eval_loss": 0.9352426528930664,
"eval_runtime": 75.5821,
"eval_samples_per_second": 15.969,
"eval_steps_per_second": 1.998,
"step": 13000
},
{
"epoch": 2.4871039056742816,
"grad_norm": 2.034397602081299,
"learning_rate": 8.548268238761974e-09,
"loss": 0.2011,
"step": 13500
},
{
"epoch": 2.4871039056742816,
"eval_loss": 0.9317955374717712,
"eval_runtime": 75.3697,
"eval_samples_per_second": 16.014,
"eval_steps_per_second": 2.003,
"step": 13500
},
{
"epoch": 2.5792188651436994,
"grad_norm": 299.313232421875,
"learning_rate": 7.013018914271678e-09,
"loss": 0.1551,
"step": 14000
},
{
"epoch": 2.5792188651436994,
"eval_loss": 0.9295068979263306,
"eval_runtime": 76.109,
"eval_samples_per_second": 15.859,
"eval_steps_per_second": 1.984,
"step": 14000
},
{
"epoch": 2.671333824613117,
"grad_norm": 0.04151364788413048,
"learning_rate": 5.47776958978138e-09,
"loss": 0.1565,
"step": 14500
},
{
"epoch": 2.671333824613117,
"eval_loss": 0.9278931617736816,
"eval_runtime": 75.8114,
"eval_samples_per_second": 15.921,
"eval_steps_per_second": 1.992,
"step": 14500
},
{
"epoch": 2.7634487840825352,
"grad_norm": 4.239564895629883,
"learning_rate": 3.942520265291083e-09,
"loss": 0.1963,
"step": 15000
},
{
"epoch": 2.7634487840825352,
"eval_loss": 0.9268263578414917,
"eval_runtime": 75.5159,
"eval_samples_per_second": 15.983,
"eval_steps_per_second": 2.0,
"step": 15000
},
{
"epoch": 2.855563743551953,
"grad_norm": 0.001815026975236833,
"learning_rate": 2.407270940800786e-09,
"loss": 0.1823,
"step": 15500
},
{
"epoch": 2.855563743551953,
"eval_loss": 0.9261995553970337,
"eval_runtime": 76.2027,
"eval_samples_per_second": 15.839,
"eval_steps_per_second": 1.982,
"step": 15500
},
{
"epoch": 2.9476787030213707,
"grad_norm": 221.31707763671875,
"learning_rate": 8.720216163104888e-10,
"loss": 0.1854,
"step": 16000
},
{
"epoch": 2.9476787030213707,
"eval_loss": 0.9258912801742554,
"eval_runtime": 75.783,
"eval_samples_per_second": 15.927,
"eval_steps_per_second": 1.993,
"step": 16000
}
],
"logging_steps": 500,
"max_steps": 16284,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5756853136862520.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}