ben81828's picture
Training in progress, step 400, checkpoint
08aa5d2 verified
raw
history blame
19.9 kB
{
"best_metric": 0.8908902406692505,
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
"epoch": 0.20602626834921453,
"eval_steps": 50,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025753283543651817,
"grad_norm": 21.336819681898895,
"learning_rate": 2.9411764705882355e-06,
"loss": 3.0444,
"num_input_tokens_seen": 58496,
"step": 5
},
{
"epoch": 0.0051506567087303634,
"grad_norm": 20.576623155848594,
"learning_rate": 5.882352941176471e-06,
"loss": 2.9824,
"num_input_tokens_seen": 116960,
"step": 10
},
{
"epoch": 0.007725985063095545,
"grad_norm": 22.989873871108518,
"learning_rate": 8.823529411764707e-06,
"loss": 2.8371,
"num_input_tokens_seen": 175448,
"step": 15
},
{
"epoch": 0.010301313417460727,
"grad_norm": 19.533434089690918,
"learning_rate": 1.1764705882352942e-05,
"loss": 2.5198,
"num_input_tokens_seen": 233944,
"step": 20
},
{
"epoch": 0.012876641771825908,
"grad_norm": 12.509494197145006,
"learning_rate": 1.4705882352941177e-05,
"loss": 1.772,
"num_input_tokens_seen": 292416,
"step": 25
},
{
"epoch": 0.01545197012619109,
"grad_norm": 3.6901887027066667,
"learning_rate": 1.7647058823529414e-05,
"loss": 1.2263,
"num_input_tokens_seen": 350904,
"step": 30
},
{
"epoch": 0.018027298480556272,
"grad_norm": 2.3996076770849744,
"learning_rate": 2.058823529411765e-05,
"loss": 1.0102,
"num_input_tokens_seen": 409384,
"step": 35
},
{
"epoch": 0.020602626834921454,
"grad_norm": 0.9253415848864577,
"learning_rate": 2.3529411764705884e-05,
"loss": 0.9378,
"num_input_tokens_seen": 467864,
"step": 40
},
{
"epoch": 0.023177955189286635,
"grad_norm": 1.1966244115097795,
"learning_rate": 2.647058823529412e-05,
"loss": 0.9265,
"num_input_tokens_seen": 526384,
"step": 45
},
{
"epoch": 0.025753283543651816,
"grad_norm": 1.853648349752417,
"learning_rate": 2.9411764705882354e-05,
"loss": 0.9157,
"num_input_tokens_seen": 584856,
"step": 50
},
{
"epoch": 0.025753283543651816,
"eval_loss": 0.9191630482673645,
"eval_runtime": 36.6123,
"eval_samples_per_second": 1.639,
"eval_steps_per_second": 0.41,
"num_input_tokens_seen": 584856,
"step": 50
},
{
"epoch": 0.028328611898016998,
"grad_norm": 0.8294990584587586,
"learning_rate": 3.235294117647059e-05,
"loss": 0.9009,
"num_input_tokens_seen": 643344,
"step": 55
},
{
"epoch": 0.03090394025238218,
"grad_norm": 0.8278765532866457,
"learning_rate": 3.529411764705883e-05,
"loss": 0.9063,
"num_input_tokens_seen": 701808,
"step": 60
},
{
"epoch": 0.03347926860674736,
"grad_norm": 0.7285901101792476,
"learning_rate": 3.8235294117647055e-05,
"loss": 0.9031,
"num_input_tokens_seen": 760304,
"step": 65
},
{
"epoch": 0.036054596961112545,
"grad_norm": 0.5341783688819233,
"learning_rate": 4.11764705882353e-05,
"loss": 0.8991,
"num_input_tokens_seen": 818760,
"step": 70
},
{
"epoch": 0.03862992531547772,
"grad_norm": 0.46059313680988906,
"learning_rate": 4.411764705882353e-05,
"loss": 0.9055,
"num_input_tokens_seen": 877256,
"step": 75
},
{
"epoch": 0.04120525366984291,
"grad_norm": 0.8194379237293679,
"learning_rate": 4.705882352941177e-05,
"loss": 0.9092,
"num_input_tokens_seen": 935752,
"step": 80
},
{
"epoch": 0.043780582024208085,
"grad_norm": 0.6745093544830881,
"learning_rate": 5e-05,
"loss": 0.9069,
"num_input_tokens_seen": 994216,
"step": 85
},
{
"epoch": 0.04635591037857327,
"grad_norm": 0.2894672897884604,
"learning_rate": 5.294117647058824e-05,
"loss": 0.8924,
"num_input_tokens_seen": 1052704,
"step": 90
},
{
"epoch": 0.04893123873293845,
"grad_norm": 0.5108489024576455,
"learning_rate": 5.588235294117647e-05,
"loss": 0.9059,
"num_input_tokens_seen": 1111176,
"step": 95
},
{
"epoch": 0.05150656708730363,
"grad_norm": 0.40317180386305224,
"learning_rate": 5.882352941176471e-05,
"loss": 0.901,
"num_input_tokens_seen": 1169664,
"step": 100
},
{
"epoch": 0.05150656708730363,
"eval_loss": 0.9077914953231812,
"eval_runtime": 16.8879,
"eval_samples_per_second": 3.553,
"eval_steps_per_second": 0.888,
"num_input_tokens_seen": 1169664,
"step": 100
},
{
"epoch": 0.05408189544166881,
"grad_norm": 0.412918917979438,
"learning_rate": 6.176470588235295e-05,
"loss": 0.9159,
"num_input_tokens_seen": 1228112,
"step": 105
},
{
"epoch": 0.056657223796033995,
"grad_norm": 0.34797408069968117,
"learning_rate": 6.470588235294118e-05,
"loss": 0.91,
"num_input_tokens_seen": 1286608,
"step": 110
},
{
"epoch": 0.05923255215039917,
"grad_norm": 0.27558494796967653,
"learning_rate": 6.764705882352942e-05,
"loss": 0.9047,
"num_input_tokens_seen": 1345072,
"step": 115
},
{
"epoch": 0.06180788050476436,
"grad_norm": 0.5422134023513459,
"learning_rate": 7.058823529411765e-05,
"loss": 0.9022,
"num_input_tokens_seen": 1403544,
"step": 120
},
{
"epoch": 0.06438320885912954,
"grad_norm": 0.4452796218739235,
"learning_rate": 7.352941176470589e-05,
"loss": 0.9081,
"num_input_tokens_seen": 1462024,
"step": 125
},
{
"epoch": 0.06695853721349472,
"grad_norm": 0.5632558160730559,
"learning_rate": 7.647058823529411e-05,
"loss": 0.8939,
"num_input_tokens_seen": 1520528,
"step": 130
},
{
"epoch": 0.0695338655678599,
"grad_norm": 0.3383115884436812,
"learning_rate": 7.941176470588235e-05,
"loss": 0.9029,
"num_input_tokens_seen": 1579024,
"step": 135
},
{
"epoch": 0.07210919392222509,
"grad_norm": 0.3506611095466577,
"learning_rate": 8.23529411764706e-05,
"loss": 0.9014,
"num_input_tokens_seen": 1637504,
"step": 140
},
{
"epoch": 0.07468452227659027,
"grad_norm": 0.6328034405712752,
"learning_rate": 8.529411764705883e-05,
"loss": 0.9053,
"num_input_tokens_seen": 1696024,
"step": 145
},
{
"epoch": 0.07725985063095545,
"grad_norm": 0.3511657661506363,
"learning_rate": 8.823529411764706e-05,
"loss": 0.9032,
"num_input_tokens_seen": 1754512,
"step": 150
},
{
"epoch": 0.07725985063095545,
"eval_loss": 0.8962129950523376,
"eval_runtime": 17.0673,
"eval_samples_per_second": 3.515,
"eval_steps_per_second": 0.879,
"num_input_tokens_seen": 1754512,
"step": 150
},
{
"epoch": 0.07983517898532062,
"grad_norm": 0.4047681172482029,
"learning_rate": 9.11764705882353e-05,
"loss": 0.8985,
"num_input_tokens_seen": 1812976,
"step": 155
},
{
"epoch": 0.08241050733968582,
"grad_norm": 0.37729033726569733,
"learning_rate": 9.411764705882353e-05,
"loss": 0.8949,
"num_input_tokens_seen": 1871464,
"step": 160
},
{
"epoch": 0.08498583569405099,
"grad_norm": 0.4655744785034158,
"learning_rate": 9.705882352941177e-05,
"loss": 0.9069,
"num_input_tokens_seen": 1929928,
"step": 165
},
{
"epoch": 0.08756116404841617,
"grad_norm": 0.30643056878817176,
"learning_rate": 0.0001,
"loss": 0.9049,
"num_input_tokens_seen": 1988432,
"step": 170
},
{
"epoch": 0.09013649240278135,
"grad_norm": 0.39944696269496754,
"learning_rate": 9.999940874631277e-05,
"loss": 0.9026,
"num_input_tokens_seen": 2046920,
"step": 175
},
{
"epoch": 0.09271182075714654,
"grad_norm": 0.31301259106593154,
"learning_rate": 9.999763499923432e-05,
"loss": 0.8984,
"num_input_tokens_seen": 2105392,
"step": 180
},
{
"epoch": 0.09528714911151172,
"grad_norm": 0.4309753054454554,
"learning_rate": 9.999467880071402e-05,
"loss": 0.9057,
"num_input_tokens_seen": 2163872,
"step": 185
},
{
"epoch": 0.0978624774658769,
"grad_norm": 0.262930252305763,
"learning_rate": 9.999054022066641e-05,
"loss": 0.9078,
"num_input_tokens_seen": 2222352,
"step": 190
},
{
"epoch": 0.10043780582024209,
"grad_norm": 0.22073598270887426,
"learning_rate": 9.998521935696953e-05,
"loss": 0.9028,
"num_input_tokens_seen": 2280800,
"step": 195
},
{
"epoch": 0.10301313417460727,
"grad_norm": 0.23764668792524696,
"learning_rate": 9.997871633546257e-05,
"loss": 0.9053,
"num_input_tokens_seen": 2339304,
"step": 200
},
{
"epoch": 0.10301313417460727,
"eval_loss": 0.8982028961181641,
"eval_runtime": 16.9118,
"eval_samples_per_second": 3.548,
"eval_steps_per_second": 0.887,
"num_input_tokens_seen": 2339304,
"step": 200
},
{
"epoch": 0.10558846252897244,
"grad_norm": 0.6222576114383499,
"learning_rate": 9.997103130994296e-05,
"loss": 0.9003,
"num_input_tokens_seen": 2397808,
"step": 205
},
{
"epoch": 0.10816379088333762,
"grad_norm": 0.2983149992592585,
"learning_rate": 9.996216446216267e-05,
"loss": 0.8969,
"num_input_tokens_seen": 2456288,
"step": 210
},
{
"epoch": 0.11073911923770281,
"grad_norm": 0.3505370510576513,
"learning_rate": 9.995211600182397e-05,
"loss": 0.9114,
"num_input_tokens_seen": 2514784,
"step": 215
},
{
"epoch": 0.11331444759206799,
"grad_norm": 0.3683806652106065,
"learning_rate": 9.994088616657444e-05,
"loss": 0.899,
"num_input_tokens_seen": 2573240,
"step": 220
},
{
"epoch": 0.11588977594643317,
"grad_norm": 0.21111769827155855,
"learning_rate": 9.992847522200133e-05,
"loss": 0.898,
"num_input_tokens_seen": 2631672,
"step": 225
},
{
"epoch": 0.11846510430079835,
"grad_norm": 0.3426987181783304,
"learning_rate": 9.99148834616253e-05,
"loss": 0.9006,
"num_input_tokens_seen": 2690112,
"step": 230
},
{
"epoch": 0.12104043265516354,
"grad_norm": 0.236983209071443,
"learning_rate": 9.990011120689351e-05,
"loss": 0.8973,
"num_input_tokens_seen": 2748608,
"step": 235
},
{
"epoch": 0.12361576100952872,
"grad_norm": 0.4575208248826409,
"learning_rate": 9.988415880717194e-05,
"loss": 0.8885,
"num_input_tokens_seen": 2807080,
"step": 240
},
{
"epoch": 0.1261910893638939,
"grad_norm": 0.5470317919414993,
"learning_rate": 9.986702663973722e-05,
"loss": 0.9066,
"num_input_tokens_seen": 2865520,
"step": 245
},
{
"epoch": 0.12876641771825909,
"grad_norm": 0.4992479706331095,
"learning_rate": 9.98487151097676e-05,
"loss": 0.9098,
"num_input_tokens_seen": 2924016,
"step": 250
},
{
"epoch": 0.12876641771825909,
"eval_loss": 0.8956434726715088,
"eval_runtime": 17.4804,
"eval_samples_per_second": 3.432,
"eval_steps_per_second": 0.858,
"num_input_tokens_seen": 2924016,
"step": 250
},
{
"epoch": 0.13134174607262425,
"grad_norm": 0.3762164361984238,
"learning_rate": 9.98292246503335e-05,
"loss": 0.8987,
"num_input_tokens_seen": 2982520,
"step": 255
},
{
"epoch": 0.13391707442698944,
"grad_norm": 0.6447043002410199,
"learning_rate": 9.980855572238714e-05,
"loss": 0.9036,
"num_input_tokens_seen": 3041008,
"step": 260
},
{
"epoch": 0.13649240278135463,
"grad_norm": 0.5308092769971742,
"learning_rate": 9.978670881475172e-05,
"loss": 0.8961,
"num_input_tokens_seen": 3099464,
"step": 265
},
{
"epoch": 0.1390677311357198,
"grad_norm": 0.508333330469703,
"learning_rate": 9.976368444410985e-05,
"loss": 0.9012,
"num_input_tokens_seen": 3157944,
"step": 270
},
{
"epoch": 0.141643059490085,
"grad_norm": 0.6801788563719119,
"learning_rate": 9.973948315499126e-05,
"loss": 0.8985,
"num_input_tokens_seen": 3216448,
"step": 275
},
{
"epoch": 0.14421838784445018,
"grad_norm": 0.6933074703933572,
"learning_rate": 9.971410551976002e-05,
"loss": 0.9114,
"num_input_tokens_seen": 3274928,
"step": 280
},
{
"epoch": 0.14679371619881534,
"grad_norm": 0.21208820897494882,
"learning_rate": 9.968755213860094e-05,
"loss": 0.8886,
"num_input_tokens_seen": 3333408,
"step": 285
},
{
"epoch": 0.14936904455318054,
"grad_norm": 0.5791422669000065,
"learning_rate": 9.96598236395054e-05,
"loss": 0.8929,
"num_input_tokens_seen": 3391896,
"step": 290
},
{
"epoch": 0.1519443729075457,
"grad_norm": 0.3460368893191152,
"learning_rate": 9.96309206782565e-05,
"loss": 0.9091,
"num_input_tokens_seen": 3450392,
"step": 295
},
{
"epoch": 0.1545197012619109,
"grad_norm": 0.22425222135997747,
"learning_rate": 9.960084393841355e-05,
"loss": 0.8893,
"num_input_tokens_seen": 3508888,
"step": 300
},
{
"epoch": 0.1545197012619109,
"eval_loss": 0.8908902406692505,
"eval_runtime": 16.9521,
"eval_samples_per_second": 3.539,
"eval_steps_per_second": 0.885,
"num_input_tokens_seen": 3508888,
"step": 300
},
{
"epoch": 0.15709502961627608,
"grad_norm": 0.23111596622064604,
"learning_rate": 9.956959413129585e-05,
"loss": 0.9056,
"num_input_tokens_seen": 3567368,
"step": 305
},
{
"epoch": 0.15967035797064125,
"grad_norm": 0.3918406894807393,
"learning_rate": 9.953717199596598e-05,
"loss": 0.8982,
"num_input_tokens_seen": 3625848,
"step": 310
},
{
"epoch": 0.16224568632500644,
"grad_norm": 0.22081666860189372,
"learning_rate": 9.95035782992122e-05,
"loss": 0.8968,
"num_input_tokens_seen": 3684336,
"step": 315
},
{
"epoch": 0.16482101467937163,
"grad_norm": 0.18024383676398176,
"learning_rate": 9.94688138355304e-05,
"loss": 0.8975,
"num_input_tokens_seen": 3742800,
"step": 320
},
{
"epoch": 0.1673963430337368,
"grad_norm": 0.3866897344302321,
"learning_rate": 9.943287942710527e-05,
"loss": 0.9061,
"num_input_tokens_seen": 3801280,
"step": 325
},
{
"epoch": 0.16997167138810199,
"grad_norm": 0.4804151381712559,
"learning_rate": 9.939577592379088e-05,
"loss": 0.8948,
"num_input_tokens_seen": 3859792,
"step": 330
},
{
"epoch": 0.17254699974246718,
"grad_norm": 0.35878231707669056,
"learning_rate": 9.935750420309055e-05,
"loss": 0.9063,
"num_input_tokens_seen": 3918272,
"step": 335
},
{
"epoch": 0.17512232809683234,
"grad_norm": 0.8713957774909928,
"learning_rate": 9.931806517013612e-05,
"loss": 0.8952,
"num_input_tokens_seen": 3976760,
"step": 340
},
{
"epoch": 0.17769765645119753,
"grad_norm": 0.6671526212854116,
"learning_rate": 9.927745975766654e-05,
"loss": 0.9136,
"num_input_tokens_seen": 4035240,
"step": 345
},
{
"epoch": 0.1802729848055627,
"grad_norm": 0.28702679234521244,
"learning_rate": 9.923568892600578e-05,
"loss": 0.9075,
"num_input_tokens_seen": 4093688,
"step": 350
},
{
"epoch": 0.1802729848055627,
"eval_loss": 0.89204341173172,
"eval_runtime": 16.5819,
"eval_samples_per_second": 3.618,
"eval_steps_per_second": 0.905,
"num_input_tokens_seen": 4093688,
"step": 350
},
{
"epoch": 0.1828483131599279,
"grad_norm": 0.32233149132200706,
"learning_rate": 9.91927536630402e-05,
"loss": 0.8812,
"num_input_tokens_seen": 4152160,
"step": 355
},
{
"epoch": 0.18542364151429308,
"grad_norm": 0.5071871697326992,
"learning_rate": 9.91486549841951e-05,
"loss": 0.9109,
"num_input_tokens_seen": 4210648,
"step": 360
},
{
"epoch": 0.18799896986865824,
"grad_norm": 0.4532792519849944,
"learning_rate": 9.91033939324107e-05,
"loss": 0.9176,
"num_input_tokens_seen": 4269136,
"step": 365
},
{
"epoch": 0.19057429822302344,
"grad_norm": 0.5409761562534501,
"learning_rate": 9.905697157811761e-05,
"loss": 0.9077,
"num_input_tokens_seen": 4327664,
"step": 370
},
{
"epoch": 0.19314962657738863,
"grad_norm": 0.3432361562809093,
"learning_rate": 9.900938901921131e-05,
"loss": 0.893,
"num_input_tokens_seen": 4386120,
"step": 375
},
{
"epoch": 0.1957249549317538,
"grad_norm": 0.4756530294720616,
"learning_rate": 9.896064738102635e-05,
"loss": 0.9094,
"num_input_tokens_seen": 4444560,
"step": 380
},
{
"epoch": 0.19830028328611898,
"grad_norm": 0.424836974193983,
"learning_rate": 9.891074781630966e-05,
"loss": 0.9091,
"num_input_tokens_seen": 4503016,
"step": 385
},
{
"epoch": 0.20087561164048418,
"grad_norm": 0.31316926977469683,
"learning_rate": 9.885969150519331e-05,
"loss": 0.9033,
"num_input_tokens_seen": 4561496,
"step": 390
},
{
"epoch": 0.20345093999484934,
"grad_norm": 0.6108378682480797,
"learning_rate": 9.88074796551666e-05,
"loss": 0.8851,
"num_input_tokens_seen": 4619944,
"step": 395
},
{
"epoch": 0.20602626834921453,
"grad_norm": 0.38294566619219206,
"learning_rate": 9.875411350104744e-05,
"loss": 0.9004,
"num_input_tokens_seen": 4678384,
"step": 400
},
{
"epoch": 0.20602626834921453,
"eval_loss": 0.9086406826972961,
"eval_runtime": 16.7827,
"eval_samples_per_second": 3.575,
"eval_steps_per_second": 0.894,
"num_input_tokens_seen": 4678384,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 3400,
"num_input_tokens_seen": 4678384,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 262667545346048.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}