ben81828's picture
Training in progress, step 1100, checkpoint
23e2970 verified
raw
history blame
53.5 kB
{
"best_metric": 0.7743102312088013,
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1100",
"epoch": 0.56657223796034,
"eval_steps": 50,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025753283543651817,
"grad_norm": 21.336819681898895,
"learning_rate": 2.9411764705882355e-06,
"loss": 3.0444,
"num_input_tokens_seen": 58496,
"step": 5
},
{
"epoch": 0.0051506567087303634,
"grad_norm": 20.576623155848594,
"learning_rate": 5.882352941176471e-06,
"loss": 2.9824,
"num_input_tokens_seen": 116960,
"step": 10
},
{
"epoch": 0.007725985063095545,
"grad_norm": 22.989873871108518,
"learning_rate": 8.823529411764707e-06,
"loss": 2.8371,
"num_input_tokens_seen": 175448,
"step": 15
},
{
"epoch": 0.010301313417460727,
"grad_norm": 19.533434089690918,
"learning_rate": 1.1764705882352942e-05,
"loss": 2.5198,
"num_input_tokens_seen": 233944,
"step": 20
},
{
"epoch": 0.012876641771825908,
"grad_norm": 12.509494197145006,
"learning_rate": 1.4705882352941177e-05,
"loss": 1.772,
"num_input_tokens_seen": 292416,
"step": 25
},
{
"epoch": 0.01545197012619109,
"grad_norm": 3.6901887027066667,
"learning_rate": 1.7647058823529414e-05,
"loss": 1.2263,
"num_input_tokens_seen": 350904,
"step": 30
},
{
"epoch": 0.018027298480556272,
"grad_norm": 2.3996076770849744,
"learning_rate": 2.058823529411765e-05,
"loss": 1.0102,
"num_input_tokens_seen": 409384,
"step": 35
},
{
"epoch": 0.020602626834921454,
"grad_norm": 0.9253415848864577,
"learning_rate": 2.3529411764705884e-05,
"loss": 0.9378,
"num_input_tokens_seen": 467864,
"step": 40
},
{
"epoch": 0.023177955189286635,
"grad_norm": 1.1966244115097795,
"learning_rate": 2.647058823529412e-05,
"loss": 0.9265,
"num_input_tokens_seen": 526384,
"step": 45
},
{
"epoch": 0.025753283543651816,
"grad_norm": 1.853648349752417,
"learning_rate": 2.9411764705882354e-05,
"loss": 0.9157,
"num_input_tokens_seen": 584856,
"step": 50
},
{
"epoch": 0.025753283543651816,
"eval_loss": 0.9191630482673645,
"eval_runtime": 36.6123,
"eval_samples_per_second": 1.639,
"eval_steps_per_second": 0.41,
"num_input_tokens_seen": 584856,
"step": 50
},
{
"epoch": 0.028328611898016998,
"grad_norm": 0.8294990584587586,
"learning_rate": 3.235294117647059e-05,
"loss": 0.9009,
"num_input_tokens_seen": 643344,
"step": 55
},
{
"epoch": 0.03090394025238218,
"grad_norm": 0.8278765532866457,
"learning_rate": 3.529411764705883e-05,
"loss": 0.9063,
"num_input_tokens_seen": 701808,
"step": 60
},
{
"epoch": 0.03347926860674736,
"grad_norm": 0.7285901101792476,
"learning_rate": 3.8235294117647055e-05,
"loss": 0.9031,
"num_input_tokens_seen": 760304,
"step": 65
},
{
"epoch": 0.036054596961112545,
"grad_norm": 0.5341783688819233,
"learning_rate": 4.11764705882353e-05,
"loss": 0.8991,
"num_input_tokens_seen": 818760,
"step": 70
},
{
"epoch": 0.03862992531547772,
"grad_norm": 0.46059313680988906,
"learning_rate": 4.411764705882353e-05,
"loss": 0.9055,
"num_input_tokens_seen": 877256,
"step": 75
},
{
"epoch": 0.04120525366984291,
"grad_norm": 0.8194379237293679,
"learning_rate": 4.705882352941177e-05,
"loss": 0.9092,
"num_input_tokens_seen": 935752,
"step": 80
},
{
"epoch": 0.043780582024208085,
"grad_norm": 0.6745093544830881,
"learning_rate": 5e-05,
"loss": 0.9069,
"num_input_tokens_seen": 994216,
"step": 85
},
{
"epoch": 0.04635591037857327,
"grad_norm": 0.2894672897884604,
"learning_rate": 5.294117647058824e-05,
"loss": 0.8924,
"num_input_tokens_seen": 1052704,
"step": 90
},
{
"epoch": 0.04893123873293845,
"grad_norm": 0.5108489024576455,
"learning_rate": 5.588235294117647e-05,
"loss": 0.9059,
"num_input_tokens_seen": 1111176,
"step": 95
},
{
"epoch": 0.05150656708730363,
"grad_norm": 0.40317180386305224,
"learning_rate": 5.882352941176471e-05,
"loss": 0.901,
"num_input_tokens_seen": 1169664,
"step": 100
},
{
"epoch": 0.05150656708730363,
"eval_loss": 0.9077914953231812,
"eval_runtime": 16.8879,
"eval_samples_per_second": 3.553,
"eval_steps_per_second": 0.888,
"num_input_tokens_seen": 1169664,
"step": 100
},
{
"epoch": 0.05408189544166881,
"grad_norm": 0.412918917979438,
"learning_rate": 6.176470588235295e-05,
"loss": 0.9159,
"num_input_tokens_seen": 1228112,
"step": 105
},
{
"epoch": 0.056657223796033995,
"grad_norm": 0.34797408069968117,
"learning_rate": 6.470588235294118e-05,
"loss": 0.91,
"num_input_tokens_seen": 1286608,
"step": 110
},
{
"epoch": 0.05923255215039917,
"grad_norm": 0.27558494796967653,
"learning_rate": 6.764705882352942e-05,
"loss": 0.9047,
"num_input_tokens_seen": 1345072,
"step": 115
},
{
"epoch": 0.06180788050476436,
"grad_norm": 0.5422134023513459,
"learning_rate": 7.058823529411765e-05,
"loss": 0.9022,
"num_input_tokens_seen": 1403544,
"step": 120
},
{
"epoch": 0.06438320885912954,
"grad_norm": 0.4452796218739235,
"learning_rate": 7.352941176470589e-05,
"loss": 0.9081,
"num_input_tokens_seen": 1462024,
"step": 125
},
{
"epoch": 0.06695853721349472,
"grad_norm": 0.5632558160730559,
"learning_rate": 7.647058823529411e-05,
"loss": 0.8939,
"num_input_tokens_seen": 1520528,
"step": 130
},
{
"epoch": 0.0695338655678599,
"grad_norm": 0.3383115884436812,
"learning_rate": 7.941176470588235e-05,
"loss": 0.9029,
"num_input_tokens_seen": 1579024,
"step": 135
},
{
"epoch": 0.07210919392222509,
"grad_norm": 0.3506611095466577,
"learning_rate": 8.23529411764706e-05,
"loss": 0.9014,
"num_input_tokens_seen": 1637504,
"step": 140
},
{
"epoch": 0.07468452227659027,
"grad_norm": 0.6328034405712752,
"learning_rate": 8.529411764705883e-05,
"loss": 0.9053,
"num_input_tokens_seen": 1696024,
"step": 145
},
{
"epoch": 0.07725985063095545,
"grad_norm": 0.3511657661506363,
"learning_rate": 8.823529411764706e-05,
"loss": 0.9032,
"num_input_tokens_seen": 1754512,
"step": 150
},
{
"epoch": 0.07725985063095545,
"eval_loss": 0.8962129950523376,
"eval_runtime": 17.0673,
"eval_samples_per_second": 3.515,
"eval_steps_per_second": 0.879,
"num_input_tokens_seen": 1754512,
"step": 150
},
{
"epoch": 0.07983517898532062,
"grad_norm": 0.4047681172482029,
"learning_rate": 9.11764705882353e-05,
"loss": 0.8985,
"num_input_tokens_seen": 1812976,
"step": 155
},
{
"epoch": 0.08241050733968582,
"grad_norm": 0.37729033726569733,
"learning_rate": 9.411764705882353e-05,
"loss": 0.8949,
"num_input_tokens_seen": 1871464,
"step": 160
},
{
"epoch": 0.08498583569405099,
"grad_norm": 0.4655744785034158,
"learning_rate": 9.705882352941177e-05,
"loss": 0.9069,
"num_input_tokens_seen": 1929928,
"step": 165
},
{
"epoch": 0.08756116404841617,
"grad_norm": 0.30643056878817176,
"learning_rate": 0.0001,
"loss": 0.9049,
"num_input_tokens_seen": 1988432,
"step": 170
},
{
"epoch": 0.09013649240278135,
"grad_norm": 0.39944696269496754,
"learning_rate": 9.999940874631277e-05,
"loss": 0.9026,
"num_input_tokens_seen": 2046920,
"step": 175
},
{
"epoch": 0.09271182075714654,
"grad_norm": 0.31301259106593154,
"learning_rate": 9.999763499923432e-05,
"loss": 0.8984,
"num_input_tokens_seen": 2105392,
"step": 180
},
{
"epoch": 0.09528714911151172,
"grad_norm": 0.4309753054454554,
"learning_rate": 9.999467880071402e-05,
"loss": 0.9057,
"num_input_tokens_seen": 2163872,
"step": 185
},
{
"epoch": 0.0978624774658769,
"grad_norm": 0.262930252305763,
"learning_rate": 9.999054022066641e-05,
"loss": 0.9078,
"num_input_tokens_seen": 2222352,
"step": 190
},
{
"epoch": 0.10043780582024209,
"grad_norm": 0.22073598270887426,
"learning_rate": 9.998521935696953e-05,
"loss": 0.9028,
"num_input_tokens_seen": 2280800,
"step": 195
},
{
"epoch": 0.10301313417460727,
"grad_norm": 0.23764668792524696,
"learning_rate": 9.997871633546257e-05,
"loss": 0.9053,
"num_input_tokens_seen": 2339304,
"step": 200
},
{
"epoch": 0.10301313417460727,
"eval_loss": 0.8982028961181641,
"eval_runtime": 16.9118,
"eval_samples_per_second": 3.548,
"eval_steps_per_second": 0.887,
"num_input_tokens_seen": 2339304,
"step": 200
},
{
"epoch": 0.10558846252897244,
"grad_norm": 0.6222576114383499,
"learning_rate": 9.997103130994296e-05,
"loss": 0.9003,
"num_input_tokens_seen": 2397808,
"step": 205
},
{
"epoch": 0.10816379088333762,
"grad_norm": 0.2983149992592585,
"learning_rate": 9.996216446216267e-05,
"loss": 0.8969,
"num_input_tokens_seen": 2456288,
"step": 210
},
{
"epoch": 0.11073911923770281,
"grad_norm": 0.3505370510576513,
"learning_rate": 9.995211600182397e-05,
"loss": 0.9114,
"num_input_tokens_seen": 2514784,
"step": 215
},
{
"epoch": 0.11331444759206799,
"grad_norm": 0.3683806652106065,
"learning_rate": 9.994088616657444e-05,
"loss": 0.899,
"num_input_tokens_seen": 2573240,
"step": 220
},
{
"epoch": 0.11588977594643317,
"grad_norm": 0.21111769827155855,
"learning_rate": 9.992847522200133e-05,
"loss": 0.898,
"num_input_tokens_seen": 2631672,
"step": 225
},
{
"epoch": 0.11846510430079835,
"grad_norm": 0.3426987181783304,
"learning_rate": 9.99148834616253e-05,
"loss": 0.9006,
"num_input_tokens_seen": 2690112,
"step": 230
},
{
"epoch": 0.12104043265516354,
"grad_norm": 0.236983209071443,
"learning_rate": 9.990011120689351e-05,
"loss": 0.8973,
"num_input_tokens_seen": 2748608,
"step": 235
},
{
"epoch": 0.12361576100952872,
"grad_norm": 0.4575208248826409,
"learning_rate": 9.988415880717194e-05,
"loss": 0.8885,
"num_input_tokens_seen": 2807080,
"step": 240
},
{
"epoch": 0.1261910893638939,
"grad_norm": 0.5470317919414993,
"learning_rate": 9.986702663973722e-05,
"loss": 0.9066,
"num_input_tokens_seen": 2865520,
"step": 245
},
{
"epoch": 0.12876641771825909,
"grad_norm": 0.4992479706331095,
"learning_rate": 9.98487151097676e-05,
"loss": 0.9098,
"num_input_tokens_seen": 2924016,
"step": 250
},
{
"epoch": 0.12876641771825909,
"eval_loss": 0.8956434726715088,
"eval_runtime": 17.4804,
"eval_samples_per_second": 3.432,
"eval_steps_per_second": 0.858,
"num_input_tokens_seen": 2924016,
"step": 250
},
{
"epoch": 0.13134174607262425,
"grad_norm": 0.3762164361984238,
"learning_rate": 9.98292246503335e-05,
"loss": 0.8987,
"num_input_tokens_seen": 2982520,
"step": 255
},
{
"epoch": 0.13391707442698944,
"grad_norm": 0.6447043002410199,
"learning_rate": 9.980855572238714e-05,
"loss": 0.9036,
"num_input_tokens_seen": 3041008,
"step": 260
},
{
"epoch": 0.13649240278135463,
"grad_norm": 0.5308092769971742,
"learning_rate": 9.978670881475172e-05,
"loss": 0.8961,
"num_input_tokens_seen": 3099464,
"step": 265
},
{
"epoch": 0.1390677311357198,
"grad_norm": 0.508333330469703,
"learning_rate": 9.976368444410985e-05,
"loss": 0.9012,
"num_input_tokens_seen": 3157944,
"step": 270
},
{
"epoch": 0.141643059490085,
"grad_norm": 0.6801788563719119,
"learning_rate": 9.973948315499126e-05,
"loss": 0.8985,
"num_input_tokens_seen": 3216448,
"step": 275
},
{
"epoch": 0.14421838784445018,
"grad_norm": 0.6933074703933572,
"learning_rate": 9.971410551976002e-05,
"loss": 0.9114,
"num_input_tokens_seen": 3274928,
"step": 280
},
{
"epoch": 0.14679371619881534,
"grad_norm": 0.21208820897494882,
"learning_rate": 9.968755213860094e-05,
"loss": 0.8886,
"num_input_tokens_seen": 3333408,
"step": 285
},
{
"epoch": 0.14936904455318054,
"grad_norm": 0.5791422669000065,
"learning_rate": 9.96598236395054e-05,
"loss": 0.8929,
"num_input_tokens_seen": 3391896,
"step": 290
},
{
"epoch": 0.1519443729075457,
"grad_norm": 0.3460368893191152,
"learning_rate": 9.96309206782565e-05,
"loss": 0.9091,
"num_input_tokens_seen": 3450392,
"step": 295
},
{
"epoch": 0.1545197012619109,
"grad_norm": 0.22425222135997747,
"learning_rate": 9.960084393841355e-05,
"loss": 0.8893,
"num_input_tokens_seen": 3508888,
"step": 300
},
{
"epoch": 0.1545197012619109,
"eval_loss": 0.8908902406692505,
"eval_runtime": 16.9521,
"eval_samples_per_second": 3.539,
"eval_steps_per_second": 0.885,
"num_input_tokens_seen": 3508888,
"step": 300
},
{
"epoch": 0.15709502961627608,
"grad_norm": 0.23111596622064604,
"learning_rate": 9.956959413129585e-05,
"loss": 0.9056,
"num_input_tokens_seen": 3567368,
"step": 305
},
{
"epoch": 0.15967035797064125,
"grad_norm": 0.3918406894807393,
"learning_rate": 9.953717199596598e-05,
"loss": 0.8982,
"num_input_tokens_seen": 3625848,
"step": 310
},
{
"epoch": 0.16224568632500644,
"grad_norm": 0.22081666860189372,
"learning_rate": 9.95035782992122e-05,
"loss": 0.8968,
"num_input_tokens_seen": 3684336,
"step": 315
},
{
"epoch": 0.16482101467937163,
"grad_norm": 0.18024383676398176,
"learning_rate": 9.94688138355304e-05,
"loss": 0.8975,
"num_input_tokens_seen": 3742800,
"step": 320
},
{
"epoch": 0.1673963430337368,
"grad_norm": 0.3866897344302321,
"learning_rate": 9.943287942710527e-05,
"loss": 0.9061,
"num_input_tokens_seen": 3801280,
"step": 325
},
{
"epoch": 0.16997167138810199,
"grad_norm": 0.4804151381712559,
"learning_rate": 9.939577592379088e-05,
"loss": 0.8948,
"num_input_tokens_seen": 3859792,
"step": 330
},
{
"epoch": 0.17254699974246718,
"grad_norm": 0.35878231707669056,
"learning_rate": 9.935750420309055e-05,
"loss": 0.9063,
"num_input_tokens_seen": 3918272,
"step": 335
},
{
"epoch": 0.17512232809683234,
"grad_norm": 0.8713957774909928,
"learning_rate": 9.931806517013612e-05,
"loss": 0.8952,
"num_input_tokens_seen": 3976760,
"step": 340
},
{
"epoch": 0.17769765645119753,
"grad_norm": 0.6671526212854116,
"learning_rate": 9.927745975766654e-05,
"loss": 0.9136,
"num_input_tokens_seen": 4035240,
"step": 345
},
{
"epoch": 0.1802729848055627,
"grad_norm": 0.28702679234521244,
"learning_rate": 9.923568892600578e-05,
"loss": 0.9075,
"num_input_tokens_seen": 4093688,
"step": 350
},
{
"epoch": 0.1802729848055627,
"eval_loss": 0.89204341173172,
"eval_runtime": 16.5819,
"eval_samples_per_second": 3.618,
"eval_steps_per_second": 0.905,
"num_input_tokens_seen": 4093688,
"step": 350
},
{
"epoch": 0.1828483131599279,
"grad_norm": 0.32233149132200706,
"learning_rate": 9.91927536630402e-05,
"loss": 0.8812,
"num_input_tokens_seen": 4152160,
"step": 355
},
{
"epoch": 0.18542364151429308,
"grad_norm": 0.5071871697326992,
"learning_rate": 9.91486549841951e-05,
"loss": 0.9109,
"num_input_tokens_seen": 4210648,
"step": 360
},
{
"epoch": 0.18799896986865824,
"grad_norm": 0.4532792519849944,
"learning_rate": 9.91033939324107e-05,
"loss": 0.9176,
"num_input_tokens_seen": 4269136,
"step": 365
},
{
"epoch": 0.19057429822302344,
"grad_norm": 0.5409761562534501,
"learning_rate": 9.905697157811761e-05,
"loss": 0.9077,
"num_input_tokens_seen": 4327664,
"step": 370
},
{
"epoch": 0.19314962657738863,
"grad_norm": 0.3432361562809093,
"learning_rate": 9.900938901921131e-05,
"loss": 0.893,
"num_input_tokens_seen": 4386120,
"step": 375
},
{
"epoch": 0.1957249549317538,
"grad_norm": 0.4756530294720616,
"learning_rate": 9.896064738102635e-05,
"loss": 0.9094,
"num_input_tokens_seen": 4444560,
"step": 380
},
{
"epoch": 0.19830028328611898,
"grad_norm": 0.424836974193983,
"learning_rate": 9.891074781630966e-05,
"loss": 0.9091,
"num_input_tokens_seen": 4503016,
"step": 385
},
{
"epoch": 0.20087561164048418,
"grad_norm": 0.31316926977469683,
"learning_rate": 9.885969150519331e-05,
"loss": 0.9033,
"num_input_tokens_seen": 4561496,
"step": 390
},
{
"epoch": 0.20345093999484934,
"grad_norm": 0.6108378682480797,
"learning_rate": 9.88074796551666e-05,
"loss": 0.8851,
"num_input_tokens_seen": 4619944,
"step": 395
},
{
"epoch": 0.20602626834921453,
"grad_norm": 0.38294566619219206,
"learning_rate": 9.875411350104744e-05,
"loss": 0.9004,
"num_input_tokens_seen": 4678384,
"step": 400
},
{
"epoch": 0.20602626834921453,
"eval_loss": 0.9086406826972961,
"eval_runtime": 16.7827,
"eval_samples_per_second": 3.575,
"eval_steps_per_second": 0.894,
"num_input_tokens_seen": 4678384,
"step": 400
},
{
"epoch": 0.2086015967035797,
"grad_norm": 0.4283475401297436,
"learning_rate": 9.86995943049533e-05,
"loss": 0.8976,
"num_input_tokens_seen": 4736904,
"step": 405
},
{
"epoch": 0.2111769250579449,
"grad_norm": 0.40329738287583206,
"learning_rate": 9.864392335627117e-05,
"loss": 0.9134,
"num_input_tokens_seen": 4795376,
"step": 410
},
{
"epoch": 0.21375225341231008,
"grad_norm": 0.37890634863656475,
"learning_rate": 9.858710197162721e-05,
"loss": 0.8955,
"num_input_tokens_seen": 4853880,
"step": 415
},
{
"epoch": 0.21632758176667524,
"grad_norm": 0.32402245835420784,
"learning_rate": 9.852913149485556e-05,
"loss": 0.9014,
"num_input_tokens_seen": 4912360,
"step": 420
},
{
"epoch": 0.21890291012104043,
"grad_norm": 0.49572499508345125,
"learning_rate": 9.847001329696653e-05,
"loss": 0.9065,
"num_input_tokens_seen": 4970872,
"step": 425
},
{
"epoch": 0.22147823847540563,
"grad_norm": 0.11883567118448765,
"learning_rate": 9.840974877611422e-05,
"loss": 0.8952,
"num_input_tokens_seen": 5029304,
"step": 430
},
{
"epoch": 0.2240535668297708,
"grad_norm": 0.7105724703149633,
"learning_rate": 9.834833935756344e-05,
"loss": 0.9106,
"num_input_tokens_seen": 5087800,
"step": 435
},
{
"epoch": 0.22662889518413598,
"grad_norm": 0.708953365388227,
"learning_rate": 9.828578649365601e-05,
"loss": 0.8996,
"num_input_tokens_seen": 5146312,
"step": 440
},
{
"epoch": 0.22920422353850115,
"grad_norm": 0.4503080730364326,
"learning_rate": 9.822209166377635e-05,
"loss": 0.8999,
"num_input_tokens_seen": 5204800,
"step": 445
},
{
"epoch": 0.23177955189286634,
"grad_norm": 0.20754132336834788,
"learning_rate": 9.815725637431662e-05,
"loss": 0.9076,
"num_input_tokens_seen": 5263304,
"step": 450
},
{
"epoch": 0.23177955189286634,
"eval_loss": 0.8962157368659973,
"eval_runtime": 17.2029,
"eval_samples_per_second": 3.488,
"eval_steps_per_second": 0.872,
"num_input_tokens_seen": 5263304,
"step": 450
},
{
"epoch": 0.23435488024723153,
"grad_norm": 0.5906403377099594,
"learning_rate": 9.809128215864097e-05,
"loss": 0.8942,
"num_input_tokens_seen": 5321760,
"step": 455
},
{
"epoch": 0.2369302086015967,
"grad_norm": 0.5706805631290568,
"learning_rate": 9.802417057704931e-05,
"loss": 0.9099,
"num_input_tokens_seen": 5380224,
"step": 460
},
{
"epoch": 0.23950553695596188,
"grad_norm": 0.164631948732384,
"learning_rate": 9.795592321674045e-05,
"loss": 0.8981,
"num_input_tokens_seen": 5438704,
"step": 465
},
{
"epoch": 0.24208086531032708,
"grad_norm": 0.32986780285522194,
"learning_rate": 9.788654169177453e-05,
"loss": 0.8952,
"num_input_tokens_seen": 5497208,
"step": 470
},
{
"epoch": 0.24465619366469224,
"grad_norm": 0.40551569446674784,
"learning_rate": 9.781602764303487e-05,
"loss": 0.8959,
"num_input_tokens_seen": 5555704,
"step": 475
},
{
"epoch": 0.24723152201905743,
"grad_norm": 0.20928586231326682,
"learning_rate": 9.774438273818911e-05,
"loss": 0.901,
"num_input_tokens_seen": 5614160,
"step": 480
},
{
"epoch": 0.24980685037342262,
"grad_norm": 0.34365307116824517,
"learning_rate": 9.767160867164979e-05,
"loss": 0.9008,
"num_input_tokens_seen": 5672640,
"step": 485
},
{
"epoch": 0.2523821787277878,
"grad_norm": 0.4212274243028996,
"learning_rate": 9.759770716453436e-05,
"loss": 0.9016,
"num_input_tokens_seen": 5731072,
"step": 490
},
{
"epoch": 0.254957507082153,
"grad_norm": 0.39823625576558597,
"learning_rate": 9.752267996462434e-05,
"loss": 0.9132,
"num_input_tokens_seen": 5789544,
"step": 495
},
{
"epoch": 0.25753283543651817,
"grad_norm": 0.24856324117583653,
"learning_rate": 9.744652884632406e-05,
"loss": 0.8962,
"num_input_tokens_seen": 5848048,
"step": 500
},
{
"epoch": 0.25753283543651817,
"eval_loss": 0.8987945914268494,
"eval_runtime": 17.1622,
"eval_samples_per_second": 3.496,
"eval_steps_per_second": 0.874,
"num_input_tokens_seen": 5848048,
"step": 500
},
{
"epoch": 0.26010816379088336,
"grad_norm": 0.25461397268106634,
"learning_rate": 9.736925561061871e-05,
"loss": 0.8954,
"num_input_tokens_seen": 5906512,
"step": 505
},
{
"epoch": 0.2626834921452485,
"grad_norm": 0.38602603275675745,
"learning_rate": 9.729086208503174e-05,
"loss": 0.8927,
"num_input_tokens_seen": 5965024,
"step": 510
},
{
"epoch": 0.2652588204996137,
"grad_norm": 0.150082825225123,
"learning_rate": 9.721135012358156e-05,
"loss": 0.898,
"num_input_tokens_seen": 6023496,
"step": 515
},
{
"epoch": 0.2678341488539789,
"grad_norm": 0.26881662025899655,
"learning_rate": 9.713072160673777e-05,
"loss": 0.9016,
"num_input_tokens_seen": 6082000,
"step": 520
},
{
"epoch": 0.2704094772083441,
"grad_norm": 0.5039123575147229,
"learning_rate": 9.704897844137673e-05,
"loss": 0.8842,
"num_input_tokens_seen": 6140480,
"step": 525
},
{
"epoch": 0.27298480556270927,
"grad_norm": 0.27836945453098666,
"learning_rate": 9.696612256073633e-05,
"loss": 0.8921,
"num_input_tokens_seen": 6198968,
"step": 530
},
{
"epoch": 0.2755601339170744,
"grad_norm": 0.22936338891946384,
"learning_rate": 9.688215592437039e-05,
"loss": 0.8979,
"num_input_tokens_seen": 6257464,
"step": 535
},
{
"epoch": 0.2781354622714396,
"grad_norm": 0.396486857609105,
"learning_rate": 9.679708051810221e-05,
"loss": 0.8951,
"num_input_tokens_seen": 6315944,
"step": 540
},
{
"epoch": 0.2807107906258048,
"grad_norm": 0.4751226662261396,
"learning_rate": 9.67108983539777e-05,
"loss": 0.9149,
"num_input_tokens_seen": 6374408,
"step": 545
},
{
"epoch": 0.28328611898017,
"grad_norm": 0.26829103885131056,
"learning_rate": 9.662361147021779e-05,
"loss": 0.9013,
"num_input_tokens_seen": 6432936,
"step": 550
},
{
"epoch": 0.28328611898017,
"eval_loss": 0.9001271724700928,
"eval_runtime": 16.9878,
"eval_samples_per_second": 3.532,
"eval_steps_per_second": 0.883,
"num_input_tokens_seen": 6432936,
"step": 550
},
{
"epoch": 0.28586144733453517,
"grad_norm": 0.5334970266367584,
"learning_rate": 9.653522193117013e-05,
"loss": 0.8981,
"num_input_tokens_seen": 6491400,
"step": 555
},
{
"epoch": 0.28843677568890036,
"grad_norm": 0.33261202813259866,
"learning_rate": 9.644573182726035e-05,
"loss": 0.9041,
"num_input_tokens_seen": 6549872,
"step": 560
},
{
"epoch": 0.2910121040432655,
"grad_norm": 0.19122862132727417,
"learning_rate": 9.63551432749426e-05,
"loss": 0.9024,
"num_input_tokens_seen": 6608296,
"step": 565
},
{
"epoch": 0.2935874323976307,
"grad_norm": 0.27778009425329764,
"learning_rate": 9.626345841664953e-05,
"loss": 0.9002,
"num_input_tokens_seen": 6666768,
"step": 570
},
{
"epoch": 0.2961627607519959,
"grad_norm": 0.3065314332046026,
"learning_rate": 9.617067942074153e-05,
"loss": 0.9035,
"num_input_tokens_seen": 6725248,
"step": 575
},
{
"epoch": 0.29873808910636107,
"grad_norm": 0.24431496415058412,
"learning_rate": 9.607680848145558e-05,
"loss": 0.9019,
"num_input_tokens_seen": 6783680,
"step": 580
},
{
"epoch": 0.30131341746072626,
"grad_norm": 0.27088193021301504,
"learning_rate": 9.598184781885318e-05,
"loss": 0.9001,
"num_input_tokens_seen": 6842144,
"step": 585
},
{
"epoch": 0.3038887458150914,
"grad_norm": 0.33893098113605125,
"learning_rate": 9.588579967876806e-05,
"loss": 0.8961,
"num_input_tokens_seen": 6900656,
"step": 590
},
{
"epoch": 0.3064640741694566,
"grad_norm": 0.3038921833221806,
"learning_rate": 9.578866633275288e-05,
"loss": 0.9,
"num_input_tokens_seen": 6959128,
"step": 595
},
{
"epoch": 0.3090394025238218,
"grad_norm": 0.48929637235055645,
"learning_rate": 9.569045007802559e-05,
"loss": 0.9046,
"num_input_tokens_seen": 7017576,
"step": 600
},
{
"epoch": 0.3090394025238218,
"eval_loss": 0.9053278565406799,
"eval_runtime": 17.1218,
"eval_samples_per_second": 3.504,
"eval_steps_per_second": 0.876,
"num_input_tokens_seen": 7017576,
"step": 600
},
{
"epoch": 0.311614730878187,
"grad_norm": 0.3545950949033049,
"learning_rate": 9.55911532374151e-05,
"loss": 0.9019,
"num_input_tokens_seen": 7076032,
"step": 605
},
{
"epoch": 0.31419005923255217,
"grad_norm": 0.2355627006333952,
"learning_rate": 9.549077815930636e-05,
"loss": 0.8956,
"num_input_tokens_seen": 7134536,
"step": 610
},
{
"epoch": 0.31676538758691736,
"grad_norm": 0.17552483625655946,
"learning_rate": 9.538932721758474e-05,
"loss": 0.898,
"num_input_tokens_seen": 7193032,
"step": 615
},
{
"epoch": 0.3193407159412825,
"grad_norm": 0.1749010635522076,
"learning_rate": 9.528680281157999e-05,
"loss": 0.8991,
"num_input_tokens_seen": 7251568,
"step": 620
},
{
"epoch": 0.3219160442956477,
"grad_norm": 0.19885182954224315,
"learning_rate": 9.518320736600943e-05,
"loss": 0.8961,
"num_input_tokens_seen": 7310072,
"step": 625
},
{
"epoch": 0.3244913726500129,
"grad_norm": 0.4778756508206831,
"learning_rate": 9.507854333092063e-05,
"loss": 0.8994,
"num_input_tokens_seen": 7368560,
"step": 630
},
{
"epoch": 0.32706670100437807,
"grad_norm": 0.4123272743887767,
"learning_rate": 9.497281318163346e-05,
"loss": 0.8925,
"num_input_tokens_seen": 7427040,
"step": 635
},
{
"epoch": 0.32964202935874326,
"grad_norm": 0.34409942667705734,
"learning_rate": 9.486601941868154e-05,
"loss": 0.9087,
"num_input_tokens_seen": 7485552,
"step": 640
},
{
"epoch": 0.3322173577131084,
"grad_norm": 0.43327107411223276,
"learning_rate": 9.475816456775313e-05,
"loss": 0.8924,
"num_input_tokens_seen": 7544040,
"step": 645
},
{
"epoch": 0.3347926860674736,
"grad_norm": 0.6643023904352003,
"learning_rate": 9.464925117963133e-05,
"loss": 0.904,
"num_input_tokens_seen": 7602512,
"step": 650
},
{
"epoch": 0.3347926860674736,
"eval_loss": 0.90328449010849,
"eval_runtime": 16.1444,
"eval_samples_per_second": 3.716,
"eval_steps_per_second": 0.929,
"num_input_tokens_seen": 7602512,
"step": 650
},
{
"epoch": 0.3373680144218388,
"grad_norm": 0.620349194493935,
"learning_rate": 9.453928183013385e-05,
"loss": 0.8929,
"num_input_tokens_seen": 7660968,
"step": 655
},
{
"epoch": 0.33994334277620397,
"grad_norm": 0.18611846349930314,
"learning_rate": 9.442825912005202e-05,
"loss": 0.9078,
"num_input_tokens_seen": 7719448,
"step": 660
},
{
"epoch": 0.34251867113056916,
"grad_norm": 0.4448289413172567,
"learning_rate": 9.431618567508933e-05,
"loss": 0.8963,
"num_input_tokens_seen": 7777928,
"step": 665
},
{
"epoch": 0.34509399948493436,
"grad_norm": 0.6187189362250411,
"learning_rate": 9.420306414579925e-05,
"loss": 0.9134,
"num_input_tokens_seen": 7836424,
"step": 670
},
{
"epoch": 0.3476693278392995,
"grad_norm": 0.35247743418537675,
"learning_rate": 9.408889720752266e-05,
"loss": 0.8984,
"num_input_tokens_seen": 7894904,
"step": 675
},
{
"epoch": 0.3502446561936647,
"grad_norm": 0.20652916455346712,
"learning_rate": 9.397368756032445e-05,
"loss": 0.8997,
"num_input_tokens_seen": 7953432,
"step": 680
},
{
"epoch": 0.3528199845480299,
"grad_norm": 0.4289996063998063,
"learning_rate": 9.385743792892982e-05,
"loss": 0.8926,
"num_input_tokens_seen": 8011888,
"step": 685
},
{
"epoch": 0.35539531290239507,
"grad_norm": 0.13764054506536547,
"learning_rate": 9.374015106265968e-05,
"loss": 0.9008,
"num_input_tokens_seen": 8070344,
"step": 690
},
{
"epoch": 0.35797064125676026,
"grad_norm": 0.22142459689499855,
"learning_rate": 9.362182973536569e-05,
"loss": 0.8986,
"num_input_tokens_seen": 8128816,
"step": 695
},
{
"epoch": 0.3605459696111254,
"grad_norm": 0.3234539650829873,
"learning_rate": 9.35024767453647e-05,
"loss": 0.8972,
"num_input_tokens_seen": 8187320,
"step": 700
},
{
"epoch": 0.3605459696111254,
"eval_loss": 0.9028835892677307,
"eval_runtime": 16.1635,
"eval_samples_per_second": 3.712,
"eval_steps_per_second": 0.928,
"num_input_tokens_seen": 8187320,
"step": 700
},
{
"epoch": 0.3631212979654906,
"grad_norm": 0.3215674690491891,
"learning_rate": 9.338209491537257e-05,
"loss": 0.8998,
"num_input_tokens_seen": 8245776,
"step": 705
},
{
"epoch": 0.3656966263198558,
"grad_norm": 0.36428692362396536,
"learning_rate": 9.326068709243727e-05,
"loss": 0.8999,
"num_input_tokens_seen": 8304280,
"step": 710
},
{
"epoch": 0.36827195467422097,
"grad_norm": 0.280459809393624,
"learning_rate": 9.313825614787177e-05,
"loss": 0.8983,
"num_input_tokens_seen": 8362728,
"step": 715
},
{
"epoch": 0.37084728302858616,
"grad_norm": 0.1819339731162554,
"learning_rate": 9.301480497718593e-05,
"loss": 0.892,
"num_input_tokens_seen": 8421224,
"step": 720
},
{
"epoch": 0.37342261138295135,
"grad_norm": 0.23784840563699303,
"learning_rate": 9.289033650001817e-05,
"loss": 0.9034,
"num_input_tokens_seen": 8479720,
"step": 725
},
{
"epoch": 0.3759979397373165,
"grad_norm": 0.24070744588741375,
"learning_rate": 9.276485366006634e-05,
"loss": 0.895,
"num_input_tokens_seen": 8538192,
"step": 730
},
{
"epoch": 0.3785732680916817,
"grad_norm": 0.24846723619231478,
"learning_rate": 9.263835942501807e-05,
"loss": 0.8973,
"num_input_tokens_seen": 8596664,
"step": 735
},
{
"epoch": 0.3811485964460469,
"grad_norm": 0.2601614440419362,
"learning_rate": 9.251085678648072e-05,
"loss": 0.8972,
"num_input_tokens_seen": 8655128,
"step": 740
},
{
"epoch": 0.38372392480041206,
"grad_norm": 0.30194733839751087,
"learning_rate": 9.238234875991046e-05,
"loss": 0.8987,
"num_input_tokens_seen": 8713624,
"step": 745
},
{
"epoch": 0.38629925315477726,
"grad_norm": 0.3015609177439829,
"learning_rate": 9.225283838454111e-05,
"loss": 0.9005,
"num_input_tokens_seen": 8772104,
"step": 750
},
{
"epoch": 0.38629925315477726,
"eval_loss": 0.8981761336326599,
"eval_runtime": 16.0177,
"eval_samples_per_second": 3.746,
"eval_steps_per_second": 0.936,
"num_input_tokens_seen": 8772104,
"step": 750
},
{
"epoch": 0.3888745815091424,
"grad_norm": 0.44991480631292463,
"learning_rate": 9.21223287233121e-05,
"loss": 0.8973,
"num_input_tokens_seen": 8830568,
"step": 755
},
{
"epoch": 0.3914499098635076,
"grad_norm": 0.22570310903133853,
"learning_rate": 9.199082286279622e-05,
"loss": 0.8974,
"num_input_tokens_seen": 8889072,
"step": 760
},
{
"epoch": 0.3940252382178728,
"grad_norm": 0.22090133233732026,
"learning_rate": 9.185832391312644e-05,
"loss": 0.8985,
"num_input_tokens_seen": 8947568,
"step": 765
},
{
"epoch": 0.39660056657223797,
"grad_norm": 0.23738058530347297,
"learning_rate": 9.172483500792244e-05,
"loss": 0.8935,
"num_input_tokens_seen": 9006056,
"step": 770
},
{
"epoch": 0.39917589492660316,
"grad_norm": 0.41232659301572594,
"learning_rate": 9.159035930421658e-05,
"loss": 0.8985,
"num_input_tokens_seen": 9064592,
"step": 775
},
{
"epoch": 0.40175122328096835,
"grad_norm": 0.2004855543001356,
"learning_rate": 9.145489998237902e-05,
"loss": 0.9105,
"num_input_tokens_seen": 9123096,
"step": 780
},
{
"epoch": 0.4043265516353335,
"grad_norm": 0.16209487510237375,
"learning_rate": 9.131846024604274e-05,
"loss": 0.8925,
"num_input_tokens_seen": 9181576,
"step": 785
},
{
"epoch": 0.4069018799896987,
"grad_norm": 0.24319930530142153,
"learning_rate": 9.11810433220276e-05,
"loss": 0.8955,
"num_input_tokens_seen": 9240048,
"step": 790
},
{
"epoch": 0.40947720834406387,
"grad_norm": 0.24311562892750557,
"learning_rate": 9.104265246026415e-05,
"loss": 0.8986,
"num_input_tokens_seen": 9298528,
"step": 795
},
{
"epoch": 0.41205253669842906,
"grad_norm": 0.2891177185942039,
"learning_rate": 9.090329093371666e-05,
"loss": 0.8881,
"num_input_tokens_seen": 9357016,
"step": 800
},
{
"epoch": 0.41205253669842906,
"eval_loss": 0.8973079919815063,
"eval_runtime": 16.1396,
"eval_samples_per_second": 3.718,
"eval_steps_per_second": 0.929,
"num_input_tokens_seen": 9357016,
"step": 800
},
{
"epoch": 0.41462786505279425,
"grad_norm": 0.4728970278357675,
"learning_rate": 9.076296203830579e-05,
"loss": 0.8798,
"num_input_tokens_seen": 9415480,
"step": 805
},
{
"epoch": 0.4172031934071594,
"grad_norm": 0.2420351489416807,
"learning_rate": 9.062166909283062e-05,
"loss": 0.9104,
"num_input_tokens_seen": 9473928,
"step": 810
},
{
"epoch": 0.4197785217615246,
"grad_norm": 0.2262623911682871,
"learning_rate": 9.047941543889014e-05,
"loss": 0.9007,
"num_input_tokens_seen": 9532408,
"step": 815
},
{
"epoch": 0.4223538501158898,
"grad_norm": 0.18258980329217392,
"learning_rate": 9.033620444080428e-05,
"loss": 0.8974,
"num_input_tokens_seen": 9590920,
"step": 820
},
{
"epoch": 0.42492917847025496,
"grad_norm": 0.2898762949979446,
"learning_rate": 9.019203948553422e-05,
"loss": 0.8992,
"num_input_tokens_seen": 9649400,
"step": 825
},
{
"epoch": 0.42750450682462016,
"grad_norm": 0.3884592601874919,
"learning_rate": 9.004692398260244e-05,
"loss": 0.8991,
"num_input_tokens_seen": 9707888,
"step": 830
},
{
"epoch": 0.43007983517898535,
"grad_norm": 0.24055719869667014,
"learning_rate": 8.9900861364012e-05,
"loss": 0.8964,
"num_input_tokens_seen": 9766384,
"step": 835
},
{
"epoch": 0.4326551635333505,
"grad_norm": 0.4482774361285702,
"learning_rate": 8.975385508416532e-05,
"loss": 0.8723,
"num_input_tokens_seen": 9824896,
"step": 840
},
{
"epoch": 0.4352304918877157,
"grad_norm": 0.4612030185875055,
"learning_rate": 8.960590861978265e-05,
"loss": 0.874,
"num_input_tokens_seen": 9883408,
"step": 845
},
{
"epoch": 0.43780582024208087,
"grad_norm": 0.44197834194509644,
"learning_rate": 8.945702546981969e-05,
"loss": 0.9035,
"num_input_tokens_seen": 9941896,
"step": 850
},
{
"epoch": 0.43780582024208087,
"eval_loss": 0.8779178261756897,
"eval_runtime": 16.159,
"eval_samples_per_second": 3.713,
"eval_steps_per_second": 0.928,
"num_input_tokens_seen": 9941896,
"step": 850
},
{
"epoch": 0.44038114859644606,
"grad_norm": 0.8207188524660312,
"learning_rate": 8.930720915538487e-05,
"loss": 0.8516,
"num_input_tokens_seen": 10000336,
"step": 855
},
{
"epoch": 0.44295647695081125,
"grad_norm": 1.5881804699369033,
"learning_rate": 8.915646321965614e-05,
"loss": 0.9206,
"num_input_tokens_seen": 10058816,
"step": 860
},
{
"epoch": 0.4455318053051764,
"grad_norm": 0.3364043503653687,
"learning_rate": 8.900479122779712e-05,
"loss": 0.9028,
"num_input_tokens_seen": 10117320,
"step": 865
},
{
"epoch": 0.4481071336595416,
"grad_norm": 0.2888069815557639,
"learning_rate": 8.885219676687277e-05,
"loss": 0.8991,
"num_input_tokens_seen": 10175824,
"step": 870
},
{
"epoch": 0.45068246201390677,
"grad_norm": 0.26081919755231314,
"learning_rate": 8.869868344576459e-05,
"loss": 0.8934,
"num_input_tokens_seen": 10234288,
"step": 875
},
{
"epoch": 0.45325779036827196,
"grad_norm": 0.1672074260476841,
"learning_rate": 8.854425489508532e-05,
"loss": 0.8908,
"num_input_tokens_seen": 10292736,
"step": 880
},
{
"epoch": 0.45583311872263715,
"grad_norm": 0.3141498425127344,
"learning_rate": 8.838891476709288e-05,
"loss": 0.8988,
"num_input_tokens_seen": 10351224,
"step": 885
},
{
"epoch": 0.4584084470770023,
"grad_norm": 0.28442383194638554,
"learning_rate": 8.823266673560426e-05,
"loss": 0.8965,
"num_input_tokens_seen": 10409736,
"step": 890
},
{
"epoch": 0.4609837754313675,
"grad_norm": 0.24793143025843287,
"learning_rate": 8.807551449590846e-05,
"loss": 0.8989,
"num_input_tokens_seen": 10468240,
"step": 895
},
{
"epoch": 0.4635591037857327,
"grad_norm": 0.18173090045802157,
"learning_rate": 8.791746176467907e-05,
"loss": 0.8961,
"num_input_tokens_seen": 10526712,
"step": 900
},
{
"epoch": 0.4635591037857327,
"eval_loss": 0.891426146030426,
"eval_runtime": 16.0357,
"eval_samples_per_second": 3.742,
"eval_steps_per_second": 0.935,
"num_input_tokens_seen": 10526712,
"step": 900
},
{
"epoch": 0.46613443214009787,
"grad_norm": 0.18755280770432675,
"learning_rate": 8.775851227988656e-05,
"loss": 0.8955,
"num_input_tokens_seen": 10585232,
"step": 905
},
{
"epoch": 0.46870976049446306,
"grad_norm": 0.16684040416821233,
"learning_rate": 8.759866980070963e-05,
"loss": 0.8951,
"num_input_tokens_seen": 10643728,
"step": 910
},
{
"epoch": 0.47128508884882825,
"grad_norm": 0.33346521793095785,
"learning_rate": 8.743793810744654e-05,
"loss": 0.8951,
"num_input_tokens_seen": 10702240,
"step": 915
},
{
"epoch": 0.4738604172031934,
"grad_norm": 0.23650054707790025,
"learning_rate": 8.727632100142551e-05,
"loss": 0.9066,
"num_input_tokens_seen": 10760656,
"step": 920
},
{
"epoch": 0.4764357455575586,
"grad_norm": 0.20217442955339224,
"learning_rate": 8.711382230491493e-05,
"loss": 0.8953,
"num_input_tokens_seen": 10819128,
"step": 925
},
{
"epoch": 0.47901107391192377,
"grad_norm": 0.1648307621403396,
"learning_rate": 8.695044586103296e-05,
"loss": 0.8961,
"num_input_tokens_seen": 10877600,
"step": 930
},
{
"epoch": 0.48158640226628896,
"grad_norm": 0.25983065938238986,
"learning_rate": 8.678619553365659e-05,
"loss": 0.8965,
"num_input_tokens_seen": 10936088,
"step": 935
},
{
"epoch": 0.48416173062065415,
"grad_norm": 0.17882463002474594,
"learning_rate": 8.662107520733027e-05,
"loss": 0.9018,
"num_input_tokens_seen": 10994560,
"step": 940
},
{
"epoch": 0.4867370589750193,
"grad_norm": 0.14644012846994445,
"learning_rate": 8.64550887871741e-05,
"loss": 0.8944,
"num_input_tokens_seen": 11053016,
"step": 945
},
{
"epoch": 0.4893123873293845,
"grad_norm": 0.23751630760966444,
"learning_rate": 8.628824019879137e-05,
"loss": 0.8852,
"num_input_tokens_seen": 11111520,
"step": 950
},
{
"epoch": 0.4893123873293845,
"eval_loss": 0.8915690183639526,
"eval_runtime": 16.2589,
"eval_samples_per_second": 3.69,
"eval_steps_per_second": 0.923,
"num_input_tokens_seen": 11111520,
"step": 950
},
{
"epoch": 0.49188771568374967,
"grad_norm": 0.3904846319143667,
"learning_rate": 8.612053338817581e-05,
"loss": 0.9087,
"num_input_tokens_seen": 11170016,
"step": 955
},
{
"epoch": 0.49446304403811486,
"grad_norm": 0.44920450892911645,
"learning_rate": 8.595197232161824e-05,
"loss": 0.8915,
"num_input_tokens_seen": 11228496,
"step": 960
},
{
"epoch": 0.49703837239248005,
"grad_norm": 0.6093857047738649,
"learning_rate": 8.578256098561275e-05,
"loss": 0.8836,
"num_input_tokens_seen": 11286928,
"step": 965
},
{
"epoch": 0.49961370074684525,
"grad_norm": 0.6282945106836194,
"learning_rate": 8.561230338676239e-05,
"loss": 0.9116,
"num_input_tokens_seen": 11345400,
"step": 970
},
{
"epoch": 0.5021890291012104,
"grad_norm": 0.3187294296147391,
"learning_rate": 8.544120355168451e-05,
"loss": 0.8809,
"num_input_tokens_seen": 11403912,
"step": 975
},
{
"epoch": 0.5047643574555756,
"grad_norm": 0.4019889420836467,
"learning_rate": 8.526926552691544e-05,
"loss": 0.8895,
"num_input_tokens_seen": 11462344,
"step": 980
},
{
"epoch": 0.5073396858099408,
"grad_norm": 0.4762279449607594,
"learning_rate": 8.509649337881483e-05,
"loss": 0.8674,
"num_input_tokens_seen": 11520808,
"step": 985
},
{
"epoch": 0.509915014164306,
"grad_norm": 1.7062273050040726,
"learning_rate": 8.492289119346943e-05,
"loss": 0.8832,
"num_input_tokens_seen": 11579248,
"step": 990
},
{
"epoch": 0.5124903425186711,
"grad_norm": 0.7896696939552226,
"learning_rate": 8.474846307659658e-05,
"loss": 0.8581,
"num_input_tokens_seen": 11637712,
"step": 995
},
{
"epoch": 0.5150656708730363,
"grad_norm": 0.9287129351980297,
"learning_rate": 8.457321315344694e-05,
"loss": 0.8635,
"num_input_tokens_seen": 11696200,
"step": 1000
},
{
"epoch": 0.5150656708730363,
"eval_loss": 0.860200047492981,
"eval_runtime": 16.1196,
"eval_samples_per_second": 3.722,
"eval_steps_per_second": 0.931,
"num_input_tokens_seen": 11696200,
"step": 1000
},
{
"epoch": 0.5176409992274015,
"grad_norm": 0.9492829276877938,
"learning_rate": 8.439714556870704e-05,
"loss": 0.8499,
"num_input_tokens_seen": 11754720,
"step": 1005
},
{
"epoch": 0.5202163275817667,
"grad_norm": 1.57473364910246,
"learning_rate": 8.422026448640124e-05,
"loss": 0.8556,
"num_input_tokens_seen": 11813216,
"step": 1010
},
{
"epoch": 0.5227916559361319,
"grad_norm": 0.6562994819534732,
"learning_rate": 8.40425740897932e-05,
"loss": 0.8533,
"num_input_tokens_seen": 11871712,
"step": 1015
},
{
"epoch": 0.525366984290497,
"grad_norm": 0.5420643724864006,
"learning_rate": 8.386407858128706e-05,
"loss": 0.8921,
"num_input_tokens_seen": 11930200,
"step": 1020
},
{
"epoch": 0.5279423126448622,
"grad_norm": 0.4900953324933905,
"learning_rate": 8.368478218232787e-05,
"loss": 0.8815,
"num_input_tokens_seen": 11988704,
"step": 1025
},
{
"epoch": 0.5305176409992274,
"grad_norm": 0.46534021808416004,
"learning_rate": 8.350468913330192e-05,
"loss": 0.854,
"num_input_tokens_seen": 12047176,
"step": 1030
},
{
"epoch": 0.5330929693535926,
"grad_norm": 0.6739669998528043,
"learning_rate": 8.33238036934364e-05,
"loss": 0.8642,
"num_input_tokens_seen": 12105680,
"step": 1035
},
{
"epoch": 0.5356682977079578,
"grad_norm": 1.100337259258234,
"learning_rate": 8.31421301406986e-05,
"loss": 0.8072,
"num_input_tokens_seen": 12164208,
"step": 1040
},
{
"epoch": 0.5382436260623229,
"grad_norm": 1.2731858488127639,
"learning_rate": 8.29596727716949e-05,
"loss": 0.8532,
"num_input_tokens_seen": 12222672,
"step": 1045
},
{
"epoch": 0.5408189544166881,
"grad_norm": 0.8686963016555517,
"learning_rate": 8.277643590156894e-05,
"loss": 0.8844,
"num_input_tokens_seen": 12281072,
"step": 1050
},
{
"epoch": 0.5408189544166881,
"eval_loss": 0.8446129560470581,
"eval_runtime": 16.0508,
"eval_samples_per_second": 3.738,
"eval_steps_per_second": 0.935,
"num_input_tokens_seen": 12281072,
"step": 1050
},
{
"epoch": 0.5433942827710533,
"grad_norm": 0.5518554447099218,
"learning_rate": 8.259242386389973e-05,
"loss": 0.8602,
"num_input_tokens_seen": 12339544,
"step": 1055
},
{
"epoch": 0.5459696111254185,
"grad_norm": 0.7300911438509382,
"learning_rate": 8.240764101059912e-05,
"loss": 0.8615,
"num_input_tokens_seen": 12397992,
"step": 1060
},
{
"epoch": 0.5485449394797837,
"grad_norm": 0.7364983085887583,
"learning_rate": 8.222209171180883e-05,
"loss": 0.8732,
"num_input_tokens_seen": 12456480,
"step": 1065
},
{
"epoch": 0.5511202678341488,
"grad_norm": 0.4840408774949972,
"learning_rate": 8.203578035579715e-05,
"loss": 0.8691,
"num_input_tokens_seen": 12515000,
"step": 1070
},
{
"epoch": 0.553695596188514,
"grad_norm": 0.516278691776577,
"learning_rate": 8.184871134885513e-05,
"loss": 0.8544,
"num_input_tokens_seen": 12573504,
"step": 1075
},
{
"epoch": 0.5562709245428792,
"grad_norm": 0.8626943002609527,
"learning_rate": 8.166088911519235e-05,
"loss": 0.8501,
"num_input_tokens_seen": 12632008,
"step": 1080
},
{
"epoch": 0.5588462528972444,
"grad_norm": 0.7409465187036862,
"learning_rate": 8.147231809683236e-05,
"loss": 0.8646,
"num_input_tokens_seen": 12690520,
"step": 1085
},
{
"epoch": 0.5614215812516096,
"grad_norm": 0.5736639247313171,
"learning_rate": 8.128300275350756e-05,
"loss": 0.8327,
"num_input_tokens_seen": 12749032,
"step": 1090
},
{
"epoch": 0.5639969096059748,
"grad_norm": 0.7720514157947642,
"learning_rate": 8.109294756255375e-05,
"loss": 0.8218,
"num_input_tokens_seen": 12807504,
"step": 1095
},
{
"epoch": 0.56657223796034,
"grad_norm": 0.9129011996506371,
"learning_rate": 8.090215701880419e-05,
"loss": 0.8427,
"num_input_tokens_seen": 12865992,
"step": 1100
},
{
"epoch": 0.56657223796034,
"eval_loss": 0.7743102312088013,
"eval_runtime": 16.1034,
"eval_samples_per_second": 3.726,
"eval_steps_per_second": 0.931,
"num_input_tokens_seen": 12865992,
"step": 1100
}
],
"logging_steps": 5,
"max_steps": 3400,
"num_input_tokens_seen": 12865992,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 722443216093184.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}