imdatta0's picture
End of training
8b1b653 verified
raw
history blame
27.8 kB
{
"best_metric": 2.02008056640625,
"best_model_checkpoint": "/home/datta0/models/lora_final/Qwen2-7B_pct_default/checkpoint-8",
"epoch": 0.9893719806763285,
"eval_steps": 8,
"global_step": 384,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025764895330112722,
"grad_norm": 5.605008125305176,
"learning_rate": 3.75e-05,
"loss": 2.0287,
"step": 1
},
{
"epoch": 0.010305958132045089,
"grad_norm": 3.37660551071167,
"learning_rate": 0.00015,
"loss": 2.1017,
"step": 4
},
{
"epoch": 0.020611916264090178,
"grad_norm": 3.2211694717407227,
"learning_rate": 0.0003,
"loss": 2.1169,
"step": 8
},
{
"epoch": 0.020611916264090178,
"eval_loss": 2.02008056640625,
"eval_runtime": 10.2397,
"eval_samples_per_second": 23.927,
"eval_steps_per_second": 3.027,
"step": 8
},
{
"epoch": 0.030917874396135265,
"grad_norm": 4.083240509033203,
"learning_rate": 0.0002999179886011389,
"loss": 2.0548,
"step": 12
},
{
"epoch": 0.041223832528180356,
"grad_norm": 4.696908473968506,
"learning_rate": 0.00029967204408281613,
"loss": 2.1264,
"step": 16
},
{
"epoch": 0.041223832528180356,
"eval_loss": 2.0886311531066895,
"eval_runtime": 10.2373,
"eval_samples_per_second": 23.932,
"eval_steps_per_second": 3.028,
"step": 16
},
{
"epoch": 0.05152979066022544,
"grad_norm": 6.009027481079102,
"learning_rate": 0.0002992624353817517,
"loss": 2.0976,
"step": 20
},
{
"epoch": 0.06183574879227053,
"grad_norm": 4.280951976776123,
"learning_rate": 0.00029868961039904624,
"loss": 2.1468,
"step": 24
},
{
"epoch": 0.06183574879227053,
"eval_loss": 2.074465036392212,
"eval_runtime": 10.269,
"eval_samples_per_second": 23.858,
"eval_steps_per_second": 3.019,
"step": 24
},
{
"epoch": 0.07214170692431562,
"grad_norm": 4.368206024169922,
"learning_rate": 0.00029795419551040833,
"loss": 2.1692,
"step": 28
},
{
"epoch": 0.08244766505636071,
"grad_norm": 4.116436958312988,
"learning_rate": 0.0002970569948812214,
"loss": 2.0954,
"step": 32
},
{
"epoch": 0.08244766505636071,
"eval_loss": 2.072464942932129,
"eval_runtime": 10.2251,
"eval_samples_per_second": 23.961,
"eval_steps_per_second": 3.032,
"step": 32
},
{
"epoch": 0.0927536231884058,
"grad_norm": 4.114132881164551,
"learning_rate": 0.0002959989895872009,
"loss": 2.1358,
"step": 36
},
{
"epoch": 0.10305958132045089,
"grad_norm": 3.317972183227539,
"learning_rate": 0.0002947813365416023,
"loss": 2.136,
"step": 40
},
{
"epoch": 0.10305958132045089,
"eval_loss": 2.0759308338165283,
"eval_runtime": 10.1843,
"eval_samples_per_second": 24.057,
"eval_steps_per_second": 3.044,
"step": 40
},
{
"epoch": 0.11336553945249597,
"grad_norm": 3.8599319458007812,
"learning_rate": 0.0002934053672301536,
"loss": 2.0745,
"step": 44
},
{
"epoch": 0.12367149758454106,
"grad_norm": 14.128602027893066,
"learning_rate": 0.00029187258625509513,
"loss": 2.1289,
"step": 48
},
{
"epoch": 0.12367149758454106,
"eval_loss": 2.1111183166503906,
"eval_runtime": 10.1965,
"eval_samples_per_second": 24.028,
"eval_steps_per_second": 3.04,
"step": 48
},
{
"epoch": 0.13397745571658615,
"grad_norm": 3.305791139602661,
"learning_rate": 0.0002901846696899191,
"loss": 2.1029,
"step": 52
},
{
"epoch": 0.14428341384863125,
"grad_norm": 3.663986921310425,
"learning_rate": 0.0002883434632466077,
"loss": 2.131,
"step": 56
},
{
"epoch": 0.14428341384863125,
"eval_loss": 2.07441782951355,
"eval_runtime": 10.1465,
"eval_samples_per_second": 24.146,
"eval_steps_per_second": 3.055,
"step": 56
},
{
"epoch": 0.15458937198067632,
"grad_norm": 3.1207165718078613,
"learning_rate": 0.00028635098025737434,
"loss": 2.1223,
"step": 60
},
{
"epoch": 0.16489533011272142,
"grad_norm": 2.8845832347869873,
"learning_rate": 0.0002842093994731145,
"loss": 2.1168,
"step": 64
},
{
"epoch": 0.16489533011272142,
"eval_loss": 2.076624870300293,
"eval_runtime": 10.2094,
"eval_samples_per_second": 23.997,
"eval_steps_per_second": 3.036,
"step": 64
},
{
"epoch": 0.1752012882447665,
"grad_norm": 3.5518147945404053,
"learning_rate": 0.00028192106268097334,
"loss": 2.1806,
"step": 68
},
{
"epoch": 0.1855072463768116,
"grad_norm": 4.108978748321533,
"learning_rate": 0.0002794884721436361,
"loss": 2.149,
"step": 72
},
{
"epoch": 0.1855072463768116,
"eval_loss": 2.102821111679077,
"eval_runtime": 10.1389,
"eval_samples_per_second": 24.164,
"eval_steps_per_second": 3.058,
"step": 72
},
{
"epoch": 0.19581320450885667,
"grad_norm": 4.722578048706055,
"learning_rate": 0.0002769142878631403,
"loss": 2.1607,
"step": 76
},
{
"epoch": 0.20611916264090177,
"grad_norm": 3.503679037094116,
"learning_rate": 0.000274201324672203,
"loss": 2.1947,
"step": 80
},
{
"epoch": 0.20611916264090177,
"eval_loss": 2.0999114513397217,
"eval_runtime": 10.125,
"eval_samples_per_second": 24.197,
"eval_steps_per_second": 3.062,
"step": 80
},
{
"epoch": 0.21642512077294687,
"grad_norm": 14.818785667419434,
"learning_rate": 0.0002713525491562421,
"loss": 2.2451,
"step": 84
},
{
"epoch": 0.22673107890499195,
"grad_norm": 3.2971980571746826,
"learning_rate": 0.00026837107640945905,
"loss": 2.1727,
"step": 88
},
{
"epoch": 0.22673107890499195,
"eval_loss": 2.099900007247925,
"eval_runtime": 10.0913,
"eval_samples_per_second": 24.278,
"eval_steps_per_second": 3.072,
"step": 88
},
{
"epoch": 0.23703703703703705,
"grad_norm": 3.2176926136016846,
"learning_rate": 0.00026526016662852886,
"loss": 2.1158,
"step": 92
},
{
"epoch": 0.24734299516908212,
"grad_norm": 4.07711124420166,
"learning_rate": 0.0002620232215476231,
"loss": 2.1438,
"step": 96
},
{
"epoch": 0.24734299516908212,
"eval_loss": 2.097912073135376,
"eval_runtime": 10.0912,
"eval_samples_per_second": 24.279,
"eval_steps_per_second": 3.072,
"step": 96
},
{
"epoch": 0.2576489533011272,
"grad_norm": 2.714487314224243,
"learning_rate": 0.00025866378071866334,
"loss": 2.19,
"step": 100
},
{
"epoch": 0.2679549114331723,
"grad_norm": 3.775660276412964,
"learning_rate": 0.00025518551764087326,
"loss": 2.1639,
"step": 104
},
{
"epoch": 0.2679549114331723,
"eval_loss": 2.0984129905700684,
"eval_runtime": 10.0362,
"eval_samples_per_second": 24.412,
"eval_steps_per_second": 3.089,
"step": 104
},
{
"epoch": 0.2782608695652174,
"grad_norm": 3.7832205295562744,
"learning_rate": 0.00025159223574386114,
"loss": 2.1516,
"step": 108
},
{
"epoch": 0.2885668276972625,
"grad_norm": 3.784287929534912,
"learning_rate": 0.00024788786422862526,
"loss": 2.0768,
"step": 112
},
{
"epoch": 0.2885668276972625,
"eval_loss": 2.096742630004883,
"eval_runtime": 89.7867,
"eval_samples_per_second": 2.729,
"eval_steps_per_second": 0.345,
"step": 112
},
{
"epoch": 0.29887278582930754,
"grad_norm": 2.5659940242767334,
"learning_rate": 0.00024407645377103054,
"loss": 2.1725,
"step": 116
},
{
"epoch": 0.30917874396135264,
"grad_norm": 2.8653833866119385,
"learning_rate": 0.00024016217209245374,
"loss": 2.1262,
"step": 120
},
{
"epoch": 0.30917874396135264,
"eval_loss": 2.0943338871002197,
"eval_runtime": 86.1943,
"eval_samples_per_second": 2.842,
"eval_steps_per_second": 0.36,
"step": 120
},
{
"epoch": 0.31948470209339774,
"grad_norm": 2.8778867721557617,
"learning_rate": 0.0002361492994024415,
"loss": 2.1553,
"step": 124
},
{
"epoch": 0.32979066022544284,
"grad_norm": 3.133491277694702,
"learning_rate": 0.00023204222371836405,
"loss": 2.1261,
"step": 128
},
{
"epoch": 0.32979066022544284,
"eval_loss": 2.099522113800049,
"eval_runtime": 84.4047,
"eval_samples_per_second": 2.903,
"eval_steps_per_second": 0.367,
"step": 128
},
{
"epoch": 0.34009661835748795,
"grad_norm": 3.3253138065338135,
"learning_rate": 0.00022784543606718227,
"loss": 2.1485,
"step": 132
},
{
"epoch": 0.350402576489533,
"grad_norm": 3.879983901977539,
"learning_rate": 0.0002235635255745762,
"loss": 2.1411,
"step": 136
},
{
"epoch": 0.350402576489533,
"eval_loss": 2.102804660797119,
"eval_runtime": 84.9029,
"eval_samples_per_second": 2.886,
"eval_steps_per_second": 0.365,
"step": 136
},
{
"epoch": 0.3607085346215781,
"grad_norm": 2.706500768661499,
"learning_rate": 0.00021920117444680317,
"loss": 2.1075,
"step": 140
},
{
"epoch": 0.3710144927536232,
"grad_norm": 4.237916946411133,
"learning_rate": 0.0002147631528507739,
"loss": 2.1369,
"step": 144
},
{
"epoch": 0.3710144927536232,
"eval_loss": 2.1029863357543945,
"eval_runtime": 89.0082,
"eval_samples_per_second": 2.753,
"eval_steps_per_second": 0.348,
"step": 144
},
{
"epoch": 0.3813204508856683,
"grad_norm": 2.939685344696045,
"learning_rate": 0.0002102543136979454,
"loss": 2.144,
"step": 148
},
{
"epoch": 0.39162640901771334,
"grad_norm": 2.547884941101074,
"learning_rate": 0.0002056795873377331,
"loss": 2.1419,
"step": 152
},
{
"epoch": 0.39162640901771334,
"eval_loss": 2.098917007446289,
"eval_runtime": 84.8587,
"eval_samples_per_second": 2.887,
"eval_steps_per_second": 0.365,
"step": 152
},
{
"epoch": 0.40193236714975844,
"grad_norm": 3.1116697788238525,
"learning_rate": 0.00020104397616624645,
"loss": 2.1543,
"step": 156
},
{
"epoch": 0.41223832528180354,
"grad_norm": 2.5041441917419434,
"learning_rate": 0.0001963525491562421,
"loss": 2.165,
"step": 160
},
{
"epoch": 0.41223832528180354,
"eval_loss": 2.0971949100494385,
"eval_runtime": 83.1216,
"eval_samples_per_second": 2.947,
"eval_steps_per_second": 0.373,
"step": 160
},
{
"epoch": 0.42254428341384864,
"grad_norm": 2.993987798690796,
"learning_rate": 0.00019161043631427666,
"loss": 2.1837,
"step": 164
},
{
"epoch": 0.43285024154589374,
"grad_norm": 2.5344765186309814,
"learning_rate": 0.00018682282307111987,
"loss": 2.1948,
"step": 168
},
{
"epoch": 0.43285024154589374,
"eval_loss": 2.0925116539001465,
"eval_runtime": 10.2576,
"eval_samples_per_second": 23.885,
"eval_steps_per_second": 3.022,
"step": 168
},
{
"epoch": 0.4431561996779388,
"grad_norm": 2.8610734939575195,
"learning_rate": 0.00018199494461156203,
"loss": 2.1322,
"step": 172
},
{
"epoch": 0.4534621578099839,
"grad_norm": 3.8807930946350098,
"learning_rate": 0.00017713208014981648,
"loss": 2.1076,
"step": 176
},
{
"epoch": 0.4534621578099839,
"eval_loss": 2.0968382358551025,
"eval_runtime": 10.2298,
"eval_samples_per_second": 23.95,
"eval_steps_per_second": 3.03,
"step": 176
},
{
"epoch": 0.463768115942029,
"grad_norm": 4.0053606033325195,
"learning_rate": 0.00017223954715677627,
"loss": 2.1992,
"step": 180
},
{
"epoch": 0.4740740740740741,
"grad_norm": 3.4194562435150146,
"learning_rate": 0.00016732269554543794,
"loss": 2.1183,
"step": 184
},
{
"epoch": 0.4740740740740741,
"eval_loss": 2.0915744304656982,
"eval_runtime": 10.2256,
"eval_samples_per_second": 23.959,
"eval_steps_per_second": 3.032,
"step": 184
},
{
"epoch": 0.48438003220611914,
"grad_norm": 2.9371349811553955,
"learning_rate": 0.00016238690182084986,
"loss": 2.1272,
"step": 188
},
{
"epoch": 0.49468599033816424,
"grad_norm": 3.1413886547088623,
"learning_rate": 0.00015743756320098332,
"loss": 2.16,
"step": 192
},
{
"epoch": 0.49468599033816424,
"eval_loss": 2.088491678237915,
"eval_runtime": 10.2399,
"eval_samples_per_second": 23.926,
"eval_steps_per_second": 3.027,
"step": 192
},
{
"epoch": 0.5049919484702093,
"grad_norm": 2.9778144359588623,
"learning_rate": 0.00015248009171495378,
"loss": 2.0854,
"step": 196
},
{
"epoch": 0.5152979066022544,
"grad_norm": 2.8931424617767334,
"learning_rate": 0.00014751990828504622,
"loss": 2.0938,
"step": 200
},
{
"epoch": 0.5152979066022544,
"eval_loss": 2.0884335041046143,
"eval_runtime": 10.226,
"eval_samples_per_second": 23.959,
"eval_steps_per_second": 3.031,
"step": 200
},
{
"epoch": 0.5256038647342995,
"grad_norm": 3.7302730083465576,
"learning_rate": 0.00014256243679901663,
"loss": 2.1851,
"step": 204
},
{
"epoch": 0.5359098228663446,
"grad_norm": 2.5322744846343994,
"learning_rate": 0.00013761309817915014,
"loss": 2.1387,
"step": 208
},
{
"epoch": 0.5359098228663446,
"eval_loss": 2.086639404296875,
"eval_runtime": 10.1991,
"eval_samples_per_second": 24.022,
"eval_steps_per_second": 3.039,
"step": 208
},
{
"epoch": 0.5462157809983897,
"grad_norm": 2.5629405975341797,
"learning_rate": 0.00013267730445456208,
"loss": 2.1301,
"step": 212
},
{
"epoch": 0.5565217391304348,
"grad_norm": 3.7714431285858154,
"learning_rate": 0.00012776045284322368,
"loss": 2.1735,
"step": 216
},
{
"epoch": 0.5565217391304348,
"eval_loss": 2.0854458808898926,
"eval_runtime": 10.176,
"eval_samples_per_second": 24.076,
"eval_steps_per_second": 3.046,
"step": 216
},
{
"epoch": 0.5668276972624798,
"grad_norm": 2.724277973175049,
"learning_rate": 0.00012286791985018355,
"loss": 2.1503,
"step": 220
},
{
"epoch": 0.577133655394525,
"grad_norm": 2.5124669075012207,
"learning_rate": 0.00011800505538843798,
"loss": 2.0786,
"step": 224
},
{
"epoch": 0.577133655394525,
"eval_loss": 2.0755257606506348,
"eval_runtime": 10.1831,
"eval_samples_per_second": 24.059,
"eval_steps_per_second": 3.044,
"step": 224
},
{
"epoch": 0.58743961352657,
"grad_norm": 2.6235222816467285,
"learning_rate": 0.00011317717692888012,
"loss": 2.1536,
"step": 228
},
{
"epoch": 0.5977455716586151,
"grad_norm": 3.0219547748565674,
"learning_rate": 0.00010838956368572334,
"loss": 2.0929,
"step": 232
},
{
"epoch": 0.5977455716586151,
"eval_loss": 2.079296350479126,
"eval_runtime": 10.1385,
"eval_samples_per_second": 24.165,
"eval_steps_per_second": 3.058,
"step": 232
},
{
"epoch": 0.6080515297906602,
"grad_norm": 2.6528589725494385,
"learning_rate": 0.0001036474508437579,
"loss": 2.123,
"step": 236
},
{
"epoch": 0.6183574879227053,
"grad_norm": 2.549923896789551,
"learning_rate": 9.895602383375353e-05,
"loss": 2.0871,
"step": 240
},
{
"epoch": 0.6183574879227053,
"eval_loss": 2.0634686946868896,
"eval_runtime": 10.1669,
"eval_samples_per_second": 24.098,
"eval_steps_per_second": 3.049,
"step": 240
},
{
"epoch": 0.6286634460547504,
"grad_norm": 2.451648235321045,
"learning_rate": 9.432041266226686e-05,
"loss": 2.0951,
"step": 244
},
{
"epoch": 0.6389694041867955,
"grad_norm": 2.5378222465515137,
"learning_rate": 8.97456863020546e-05,
"loss": 2.0744,
"step": 248
},
{
"epoch": 0.6389694041867955,
"eval_loss": 2.0636842250823975,
"eval_runtime": 10.1186,
"eval_samples_per_second": 24.213,
"eval_steps_per_second": 3.064,
"step": 248
},
{
"epoch": 0.6492753623188405,
"grad_norm": 2.2349236011505127,
"learning_rate": 8.523684714922608e-05,
"loss": 2.0909,
"step": 252
},
{
"epoch": 0.6595813204508857,
"grad_norm": 2.813871145248413,
"learning_rate": 8.079882555319684e-05,
"loss": 2.1142,
"step": 256
},
{
"epoch": 0.6595813204508857,
"eval_loss": 2.061554431915283,
"eval_runtime": 10.0831,
"eval_samples_per_second": 24.298,
"eval_steps_per_second": 3.074,
"step": 256
},
{
"epoch": 0.6698872785829307,
"grad_norm": 2.8577089309692383,
"learning_rate": 7.643647442542382e-05,
"loss": 2.1188,
"step": 260
},
{
"epoch": 0.6801932367149759,
"grad_norm": 3.5591704845428467,
"learning_rate": 7.215456393281776e-05,
"loss": 2.0861,
"step": 264
},
{
"epoch": 0.6801932367149759,
"eval_loss": 2.0569865703582764,
"eval_runtime": 10.0856,
"eval_samples_per_second": 24.292,
"eval_steps_per_second": 3.074,
"step": 264
},
{
"epoch": 0.6904991948470209,
"grad_norm": 2.480534791946411,
"learning_rate": 6.795777628163599e-05,
"loss": 2.1246,
"step": 268
},
{
"epoch": 0.700805152979066,
"grad_norm": 1.919311761856079,
"learning_rate": 6.385070059755846e-05,
"loss": 2.1428,
"step": 272
},
{
"epoch": 0.700805152979066,
"eval_loss": 2.0533571243286133,
"eval_runtime": 85.9415,
"eval_samples_per_second": 2.851,
"eval_steps_per_second": 0.361,
"step": 272
},
{
"epoch": 0.7111111111111111,
"grad_norm": 2.5054383277893066,
"learning_rate": 5.983782790754623e-05,
"loss": 2.0624,
"step": 276
},
{
"epoch": 0.7214170692431562,
"grad_norm": 2.642378807067871,
"learning_rate": 5.592354622896944e-05,
"loss": 2.0474,
"step": 280
},
{
"epoch": 0.7214170692431562,
"eval_loss": 2.048586130142212,
"eval_runtime": 65.6601,
"eval_samples_per_second": 3.731,
"eval_steps_per_second": 0.472,
"step": 280
},
{
"epoch": 0.7317230273752013,
"grad_norm": 2.2351460456848145,
"learning_rate": 5.211213577137469e-05,
"loss": 2.1221,
"step": 284
},
{
"epoch": 0.7420289855072464,
"grad_norm": 2.515111207962036,
"learning_rate": 4.840776425613886e-05,
"loss": 2.1296,
"step": 288
},
{
"epoch": 0.7420289855072464,
"eval_loss": 2.043865442276001,
"eval_runtime": 70.6526,
"eval_samples_per_second": 3.468,
"eval_steps_per_second": 0.439,
"step": 288
},
{
"epoch": 0.7523349436392914,
"grad_norm": 1.9993336200714111,
"learning_rate": 4.481448235912671e-05,
"loss": 2.1072,
"step": 292
},
{
"epoch": 0.7626409017713366,
"grad_norm": 2.0789833068847656,
"learning_rate": 4.133621928133665e-05,
"loss": 2.062,
"step": 296
},
{
"epoch": 0.7626409017713366,
"eval_loss": 2.0424935817718506,
"eval_runtime": 90.2963,
"eval_samples_per_second": 2.713,
"eval_steps_per_second": 0.343,
"step": 296
},
{
"epoch": 0.7729468599033816,
"grad_norm": 2.315333604812622,
"learning_rate": 3.797677845237696e-05,
"loss": 2.1235,
"step": 300
},
{
"epoch": 0.7832528180354267,
"grad_norm": 2.269334077835083,
"learning_rate": 3.473983337147118e-05,
"loss": 2.0758,
"step": 304
},
{
"epoch": 0.7832528180354267,
"eval_loss": 2.0405173301696777,
"eval_runtime": 77.3189,
"eval_samples_per_second": 3.169,
"eval_steps_per_second": 0.401,
"step": 304
},
{
"epoch": 0.7935587761674718,
"grad_norm": 2.193899393081665,
"learning_rate": 3.162892359054098e-05,
"loss": 2.0314,
"step": 308
},
{
"epoch": 0.8038647342995169,
"grad_norm": 2.2756619453430176,
"learning_rate": 2.8647450843757897e-05,
"loss": 2.0604,
"step": 312
},
{
"epoch": 0.8038647342995169,
"eval_loss": 2.037036180496216,
"eval_runtime": 85.5713,
"eval_samples_per_second": 2.863,
"eval_steps_per_second": 0.362,
"step": 312
},
{
"epoch": 0.814170692431562,
"grad_norm": 3.295262575149536,
"learning_rate": 2.5798675327796993e-05,
"loss": 2.0895,
"step": 316
},
{
"epoch": 0.8244766505636071,
"grad_norm": 2.0809378623962402,
"learning_rate": 2.3085712136859668e-05,
"loss": 2.0963,
"step": 320
},
{
"epoch": 0.8244766505636071,
"eval_loss": 2.036062717437744,
"eval_runtime": 87.0694,
"eval_samples_per_second": 2.814,
"eval_steps_per_second": 0.356,
"step": 320
},
{
"epoch": 0.8347826086956521,
"grad_norm": 1.9167258739471436,
"learning_rate": 2.0511527856363912e-05,
"loss": 2.1139,
"step": 324
},
{
"epoch": 0.8450885668276973,
"grad_norm": 1.832511305809021,
"learning_rate": 1.8078937319026654e-05,
"loss": 2.0926,
"step": 328
},
{
"epoch": 0.8450885668276973,
"eval_loss": 2.0341949462890625,
"eval_runtime": 10.2561,
"eval_samples_per_second": 23.888,
"eval_steps_per_second": 3.023,
"step": 328
},
{
"epoch": 0.8553945249597423,
"grad_norm": 2.3450729846954346,
"learning_rate": 1.579060052688548e-05,
"loss": 2.0517,
"step": 332
},
{
"epoch": 0.8657004830917875,
"grad_norm": 2.012261152267456,
"learning_rate": 1.3649019742625623e-05,
"loss": 2.0571,
"step": 336
},
{
"epoch": 0.8657004830917875,
"eval_loss": 2.0307068824768066,
"eval_runtime": 10.2233,
"eval_samples_per_second": 23.965,
"eval_steps_per_second": 3.032,
"step": 336
},
{
"epoch": 0.8760064412238325,
"grad_norm": 2.20833158493042,
"learning_rate": 1.1656536753392287e-05,
"loss": 2.0968,
"step": 340
},
{
"epoch": 0.8863123993558776,
"grad_norm": 2.631930112838745,
"learning_rate": 9.815330310080887e-06,
"loss": 2.0858,
"step": 344
},
{
"epoch": 0.8863123993558776,
"eval_loss": 2.0296614170074463,
"eval_runtime": 10.2685,
"eval_samples_per_second": 23.859,
"eval_steps_per_second": 3.019,
"step": 344
},
{
"epoch": 0.8966183574879227,
"grad_norm": 1.9802478551864624,
"learning_rate": 8.127413744904804e-06,
"loss": 2.0328,
"step": 348
},
{
"epoch": 0.9069243156199678,
"grad_norm": 2.16888689994812,
"learning_rate": 6.594632769846353e-06,
"loss": 2.066,
"step": 352
},
{
"epoch": 0.9069243156199678,
"eval_loss": 2.026951313018799,
"eval_runtime": 10.2296,
"eval_samples_per_second": 23.95,
"eval_steps_per_second": 3.03,
"step": 352
},
{
"epoch": 0.9172302737520129,
"grad_norm": 2.294292449951172,
"learning_rate": 5.218663458397715e-06,
"loss": 2.133,
"step": 356
},
{
"epoch": 0.927536231884058,
"grad_norm": 2.047563076019287,
"learning_rate": 4.001010412799138e-06,
"loss": 2.1284,
"step": 360
},
{
"epoch": 0.927536231884058,
"eval_loss": 2.026015520095825,
"eval_runtime": 10.2018,
"eval_samples_per_second": 24.015,
"eval_steps_per_second": 3.039,
"step": 360
},
{
"epoch": 0.937842190016103,
"grad_norm": 2.0471081733703613,
"learning_rate": 2.9430051187785962e-06,
"loss": 2.0978,
"step": 364
},
{
"epoch": 0.9481481481481482,
"grad_norm": 2.1072356700897217,
"learning_rate": 2.0458044895916513e-06,
"loss": 2.0618,
"step": 368
},
{
"epoch": 0.9481481481481482,
"eval_loss": 2.0257070064544678,
"eval_runtime": 10.1953,
"eval_samples_per_second": 24.031,
"eval_steps_per_second": 3.041,
"step": 368
},
{
"epoch": 0.9584541062801932,
"grad_norm": 2.2480766773223877,
"learning_rate": 1.3103896009537207e-06,
"loss": 2.0197,
"step": 372
},
{
"epoch": 0.9687600644122383,
"grad_norm": 2.2486612796783447,
"learning_rate": 7.375646182482875e-07,
"loss": 2.1074,
"step": 376
},
{
"epoch": 0.9687600644122383,
"eval_loss": 2.025618076324463,
"eval_runtime": 10.1895,
"eval_samples_per_second": 24.044,
"eval_steps_per_second": 3.042,
"step": 376
},
{
"epoch": 0.9790660225442834,
"grad_norm": 2.1844937801361084,
"learning_rate": 3.2795591718381975e-07,
"loss": 2.0191,
"step": 380
},
{
"epoch": 0.9893719806763285,
"grad_norm": 2.3360085487365723,
"learning_rate": 8.201139886109264e-08,
"loss": 2.0625,
"step": 384
},
{
"epoch": 0.9893719806763285,
"eval_loss": 2.025890350341797,
"eval_runtime": 10.1675,
"eval_samples_per_second": 24.096,
"eval_steps_per_second": 3.049,
"step": 384
}
],
"logging_steps": 4,
"max_steps": 388,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 8,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.523723017815327e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}