imdatta0's picture
End of training
8b1b653 verified
{
"best_metric": 1.9893906116485596,
"best_model_checkpoint": "/home/datta0/models/lora_final/Qwen2-7B_pct_ortho/checkpoint-16",
"epoch": 0.9996779388083736,
"eval_steps": 8,
"global_step": 388,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025764895330112722,
"grad_norm": 3.2372000217437744,
"learning_rate": 3.75e-05,
"loss": 2.0292,
"step": 1
},
{
"epoch": 0.010305958132045089,
"grad_norm": 2.4374635219573975,
"learning_rate": 0.00015,
"loss": 2.1277,
"step": 4
},
{
"epoch": 0.020611916264090178,
"grad_norm": 2.3065264225006104,
"learning_rate": 0.0003,
"loss": 2.0836,
"step": 8
},
{
"epoch": 0.020611916264090178,
"eval_loss": 1.999721884727478,
"eval_runtime": 10.3706,
"eval_samples_per_second": 23.624,
"eval_steps_per_second": 2.989,
"step": 8
},
{
"epoch": 0.030917874396135265,
"grad_norm": 2.9390103816986084,
"learning_rate": 0.0002999179886011389,
"loss": 2.0317,
"step": 12
},
{
"epoch": 0.041223832528180356,
"grad_norm": 1.5938222408294678,
"learning_rate": 0.00029967204408281613,
"loss": 2.0451,
"step": 16
},
{
"epoch": 0.041223832528180356,
"eval_loss": 1.9893906116485596,
"eval_runtime": 10.3682,
"eval_samples_per_second": 23.63,
"eval_steps_per_second": 2.99,
"step": 16
},
{
"epoch": 0.05152979066022544,
"grad_norm": 1.8015908002853394,
"learning_rate": 0.0002992624353817517,
"loss": 2.0237,
"step": 20
},
{
"epoch": 0.06183574879227053,
"grad_norm": 2.2030797004699707,
"learning_rate": 0.00029868961039904624,
"loss": 2.0868,
"step": 24
},
{
"epoch": 0.06183574879227053,
"eval_loss": 2.0094170570373535,
"eval_runtime": 10.3475,
"eval_samples_per_second": 23.677,
"eval_steps_per_second": 2.996,
"step": 24
},
{
"epoch": 0.07214170692431562,
"grad_norm": 1.716269612312317,
"learning_rate": 0.00029795419551040833,
"loss": 2.0891,
"step": 28
},
{
"epoch": 0.08244766505636071,
"grad_norm": 1.7382190227508545,
"learning_rate": 0.0002970569948812214,
"loss": 2.0404,
"step": 32
},
{
"epoch": 0.08244766505636071,
"eval_loss": 2.023028612136841,
"eval_runtime": 10.3577,
"eval_samples_per_second": 23.654,
"eval_steps_per_second": 2.993,
"step": 32
},
{
"epoch": 0.0927536231884058,
"grad_norm": 1.8182299137115479,
"learning_rate": 0.0002959989895872009,
"loss": 2.0905,
"step": 36
},
{
"epoch": 0.10305958132045089,
"grad_norm": 2.180691719055176,
"learning_rate": 0.0002947813365416023,
"loss": 2.0951,
"step": 40
},
{
"epoch": 0.10305958132045089,
"eval_loss": 2.0405566692352295,
"eval_runtime": 10.3579,
"eval_samples_per_second": 23.653,
"eval_steps_per_second": 2.993,
"step": 40
},
{
"epoch": 0.11336553945249597,
"grad_norm": 1.717347264289856,
"learning_rate": 0.0002934053672301536,
"loss": 2.0454,
"step": 44
},
{
"epoch": 0.12367149758454106,
"grad_norm": 1.971640944480896,
"learning_rate": 0.00029187258625509513,
"loss": 2.1037,
"step": 48
},
{
"epoch": 0.12367149758454106,
"eval_loss": 2.0564253330230713,
"eval_runtime": 10.3215,
"eval_samples_per_second": 23.737,
"eval_steps_per_second": 3.003,
"step": 48
},
{
"epoch": 0.13397745571658615,
"grad_norm": 1.4907288551330566,
"learning_rate": 0.0002901846696899191,
"loss": 2.0669,
"step": 52
},
{
"epoch": 0.14428341384863125,
"grad_norm": 2.1952505111694336,
"learning_rate": 0.0002883434632466077,
"loss": 2.1105,
"step": 56
},
{
"epoch": 0.14428341384863125,
"eval_loss": 2.057234525680542,
"eval_runtime": 10.2889,
"eval_samples_per_second": 23.812,
"eval_steps_per_second": 3.013,
"step": 56
},
{
"epoch": 0.15458937198067632,
"grad_norm": 1.6156632900238037,
"learning_rate": 0.00028635098025737434,
"loss": 2.1114,
"step": 60
},
{
"epoch": 0.16489533011272142,
"grad_norm": 2.114741325378418,
"learning_rate": 0.0002842093994731145,
"loss": 2.098,
"step": 64
},
{
"epoch": 0.16489533011272142,
"eval_loss": 2.066581964492798,
"eval_runtime": 10.2423,
"eval_samples_per_second": 23.92,
"eval_steps_per_second": 3.027,
"step": 64
},
{
"epoch": 0.1752012882447665,
"grad_norm": 1.9021594524383545,
"learning_rate": 0.00028192106268097334,
"loss": 2.1778,
"step": 68
},
{
"epoch": 0.1855072463768116,
"grad_norm": 2.0035440921783447,
"learning_rate": 0.0002794884721436361,
"loss": 2.1234,
"step": 72
},
{
"epoch": 0.1855072463768116,
"eval_loss": 2.0809762477874756,
"eval_runtime": 10.2456,
"eval_samples_per_second": 23.913,
"eval_steps_per_second": 3.026,
"step": 72
},
{
"epoch": 0.19581320450885667,
"grad_norm": 2.106894016265869,
"learning_rate": 0.0002769142878631403,
"loss": 2.1337,
"step": 76
},
{
"epoch": 0.20611916264090177,
"grad_norm": 1.7780187129974365,
"learning_rate": 0.000274201324672203,
"loss": 2.1848,
"step": 80
},
{
"epoch": 0.20611916264090177,
"eval_loss": 2.076953887939453,
"eval_runtime": 10.3339,
"eval_samples_per_second": 23.708,
"eval_steps_per_second": 3.0,
"step": 80
},
{
"epoch": 0.21642512077294687,
"grad_norm": 1.5657672882080078,
"learning_rate": 0.0002713525491562421,
"loss": 2.2282,
"step": 84
},
{
"epoch": 0.22673107890499195,
"grad_norm": 1.7085858583450317,
"learning_rate": 0.00026837107640945905,
"loss": 2.1566,
"step": 88
},
{
"epoch": 0.22673107890499195,
"eval_loss": 2.0833017826080322,
"eval_runtime": 10.3482,
"eval_samples_per_second": 23.676,
"eval_steps_per_second": 2.996,
"step": 88
},
{
"epoch": 0.23703703703703705,
"grad_norm": 1.7911115884780884,
"learning_rate": 0.00026526016662852886,
"loss": 2.0901,
"step": 92
},
{
"epoch": 0.24734299516908212,
"grad_norm": 2.0994713306427,
"learning_rate": 0.0002620232215476231,
"loss": 2.1434,
"step": 96
},
{
"epoch": 0.24734299516908212,
"eval_loss": 2.077416181564331,
"eval_runtime": 10.3508,
"eval_samples_per_second": 23.67,
"eval_steps_per_second": 2.995,
"step": 96
},
{
"epoch": 0.2576489533011272,
"grad_norm": 2.3806815147399902,
"learning_rate": 0.00025866378071866334,
"loss": 2.1926,
"step": 100
},
{
"epoch": 0.2679549114331723,
"grad_norm": 1.6448813676834106,
"learning_rate": 0.00025518551764087326,
"loss": 2.1722,
"step": 104
},
{
"epoch": 0.2679549114331723,
"eval_loss": 2.089759588241577,
"eval_runtime": 10.358,
"eval_samples_per_second": 23.653,
"eval_steps_per_second": 2.993,
"step": 104
},
{
"epoch": 0.2782608695652174,
"grad_norm": 1.5598400831222534,
"learning_rate": 0.00025159223574386114,
"loss": 2.1602,
"step": 108
},
{
"epoch": 0.2885668276972625,
"grad_norm": 1.6742339134216309,
"learning_rate": 0.00024788786422862526,
"loss": 2.0835,
"step": 112
},
{
"epoch": 0.2885668276972625,
"eval_loss": 2.1009163856506348,
"eval_runtime": 10.3505,
"eval_samples_per_second": 23.67,
"eval_steps_per_second": 2.995,
"step": 112
},
{
"epoch": 0.29887278582930754,
"grad_norm": 1.6221950054168701,
"learning_rate": 0.00024407645377103054,
"loss": 2.1954,
"step": 116
},
{
"epoch": 0.30917874396135264,
"grad_norm": 1.9361300468444824,
"learning_rate": 0.00024016217209245374,
"loss": 2.1355,
"step": 120
},
{
"epoch": 0.30917874396135264,
"eval_loss": 2.1047351360321045,
"eval_runtime": 10.3462,
"eval_samples_per_second": 23.68,
"eval_steps_per_second": 2.996,
"step": 120
},
{
"epoch": 0.31948470209339774,
"grad_norm": 2.017512798309326,
"learning_rate": 0.0002361492994024415,
"loss": 2.173,
"step": 124
},
{
"epoch": 0.32979066022544284,
"grad_norm": 1.7186895608901978,
"learning_rate": 0.00023204222371836405,
"loss": 2.1492,
"step": 128
},
{
"epoch": 0.32979066022544284,
"eval_loss": 2.0960302352905273,
"eval_runtime": 10.3407,
"eval_samples_per_second": 23.693,
"eval_steps_per_second": 2.998,
"step": 128
},
{
"epoch": 0.34009661835748795,
"grad_norm": 1.9205563068389893,
"learning_rate": 0.00022784543606718227,
"loss": 2.1602,
"step": 132
},
{
"epoch": 0.350402576489533,
"grad_norm": 1.667883276939392,
"learning_rate": 0.0002235635255745762,
"loss": 2.1524,
"step": 136
},
{
"epoch": 0.350402576489533,
"eval_loss": 2.106950283050537,
"eval_runtime": 10.3412,
"eval_samples_per_second": 23.692,
"eval_steps_per_second": 2.998,
"step": 136
},
{
"epoch": 0.3607085346215781,
"grad_norm": 1.8527525663375854,
"learning_rate": 0.00021920117444680317,
"loss": 2.1214,
"step": 140
},
{
"epoch": 0.3710144927536232,
"grad_norm": 1.5822211503982544,
"learning_rate": 0.0002147631528507739,
"loss": 2.1429,
"step": 144
},
{
"epoch": 0.3710144927536232,
"eval_loss": 2.11195707321167,
"eval_runtime": 10.3335,
"eval_samples_per_second": 23.709,
"eval_steps_per_second": 3.0,
"step": 144
},
{
"epoch": 0.3813204508856683,
"grad_norm": 1.7781555652618408,
"learning_rate": 0.0002102543136979454,
"loss": 2.1603,
"step": 148
},
{
"epoch": 0.39162640901771334,
"grad_norm": 1.9275873899459839,
"learning_rate": 0.0002056795873377331,
"loss": 2.1611,
"step": 152
},
{
"epoch": 0.39162640901771334,
"eval_loss": 2.1226720809936523,
"eval_runtime": 10.3248,
"eval_samples_per_second": 23.729,
"eval_steps_per_second": 3.002,
"step": 152
},
{
"epoch": 0.40193236714975844,
"grad_norm": 1.5050312280654907,
"learning_rate": 0.00020104397616624645,
"loss": 2.1729,
"step": 156
},
{
"epoch": 0.41223832528180354,
"grad_norm": 1.7592459917068481,
"learning_rate": 0.0001963525491562421,
"loss": 2.1943,
"step": 160
},
{
"epoch": 0.41223832528180354,
"eval_loss": 2.1149258613586426,
"eval_runtime": 10.2904,
"eval_samples_per_second": 23.809,
"eval_steps_per_second": 3.013,
"step": 160
},
{
"epoch": 0.42254428341384864,
"grad_norm": 2.097492218017578,
"learning_rate": 0.00019161043631427666,
"loss": 2.2041,
"step": 164
},
{
"epoch": 0.43285024154589374,
"grad_norm": 2.399651288986206,
"learning_rate": 0.00018682282307111987,
"loss": 2.2268,
"step": 168
},
{
"epoch": 0.43285024154589374,
"eval_loss": 2.110522508621216,
"eval_runtime": 10.2285,
"eval_samples_per_second": 23.953,
"eval_steps_per_second": 3.031,
"step": 168
},
{
"epoch": 0.4431561996779388,
"grad_norm": 1.730817198753357,
"learning_rate": 0.00018199494461156203,
"loss": 2.1502,
"step": 172
},
{
"epoch": 0.4534621578099839,
"grad_norm": 1.9517581462860107,
"learning_rate": 0.00017713208014981648,
"loss": 2.135,
"step": 176
},
{
"epoch": 0.4534621578099839,
"eval_loss": 2.108701229095459,
"eval_runtime": 10.2002,
"eval_samples_per_second": 24.019,
"eval_steps_per_second": 3.039,
"step": 176
},
{
"epoch": 0.463768115942029,
"grad_norm": 1.800856590270996,
"learning_rate": 0.00017223954715677627,
"loss": 2.2322,
"step": 180
},
{
"epoch": 0.4740740740740741,
"grad_norm": 1.8001936674118042,
"learning_rate": 0.00016732269554543794,
"loss": 2.1443,
"step": 184
},
{
"epoch": 0.4740740740740741,
"eval_loss": 2.107579469680786,
"eval_runtime": 44.2515,
"eval_samples_per_second": 5.537,
"eval_steps_per_second": 0.701,
"step": 184
},
{
"epoch": 0.48438003220611914,
"grad_norm": 2.1538124084472656,
"learning_rate": 0.00016238690182084986,
"loss": 2.1626,
"step": 188
},
{
"epoch": 0.49468599033816424,
"grad_norm": 1.6191723346710205,
"learning_rate": 0.00015743756320098332,
"loss": 2.1925,
"step": 192
},
{
"epoch": 0.49468599033816424,
"eval_loss": 2.106790542602539,
"eval_runtime": 46.736,
"eval_samples_per_second": 5.242,
"eval_steps_per_second": 0.663,
"step": 192
},
{
"epoch": 0.5049919484702093,
"grad_norm": 1.7128194570541382,
"learning_rate": 0.00015248009171495378,
"loss": 2.1204,
"step": 196
},
{
"epoch": 0.5152979066022544,
"grad_norm": 1.831337809562683,
"learning_rate": 0.00014751990828504622,
"loss": 2.1225,
"step": 200
},
{
"epoch": 0.5152979066022544,
"eval_loss": 2.10336971282959,
"eval_runtime": 45.5466,
"eval_samples_per_second": 5.379,
"eval_steps_per_second": 0.681,
"step": 200
},
{
"epoch": 0.5256038647342995,
"grad_norm": 2.0683786869049072,
"learning_rate": 0.00014256243679901663,
"loss": 2.2088,
"step": 204
},
{
"epoch": 0.5359098228663446,
"grad_norm": 2.0262675285339355,
"learning_rate": 0.00013761309817915014,
"loss": 2.1679,
"step": 208
},
{
"epoch": 0.5359098228663446,
"eval_loss": 2.1078262329101562,
"eval_runtime": 48.3703,
"eval_samples_per_second": 5.065,
"eval_steps_per_second": 0.641,
"step": 208
},
{
"epoch": 0.5462157809983897,
"grad_norm": 1.6107491254806519,
"learning_rate": 0.00013267730445456208,
"loss": 2.1568,
"step": 212
},
{
"epoch": 0.5565217391304348,
"grad_norm": 1.8629176616668701,
"learning_rate": 0.00012776045284322368,
"loss": 2.2091,
"step": 216
},
{
"epoch": 0.5565217391304348,
"eval_loss": 2.1099538803100586,
"eval_runtime": 44.7406,
"eval_samples_per_second": 5.476,
"eval_steps_per_second": 0.693,
"step": 216
},
{
"epoch": 0.5668276972624798,
"grad_norm": 1.5823767185211182,
"learning_rate": 0.00012286791985018355,
"loss": 2.196,
"step": 220
},
{
"epoch": 0.577133655394525,
"grad_norm": 1.6616393327713013,
"learning_rate": 0.00011800505538843798,
"loss": 2.1175,
"step": 224
},
{
"epoch": 0.577133655394525,
"eval_loss": 2.097626209259033,
"eval_runtime": 44.1426,
"eval_samples_per_second": 5.55,
"eval_steps_per_second": 0.702,
"step": 224
},
{
"epoch": 0.58743961352657,
"grad_norm": 1.9238176345825195,
"learning_rate": 0.00011317717692888012,
"loss": 2.195,
"step": 228
},
{
"epoch": 0.5977455716586151,
"grad_norm": 1.7493959665298462,
"learning_rate": 0.00010838956368572334,
"loss": 2.1288,
"step": 232
},
{
"epoch": 0.5977455716586151,
"eval_loss": 2.1059985160827637,
"eval_runtime": 43.5475,
"eval_samples_per_second": 5.626,
"eval_steps_per_second": 0.712,
"step": 232
},
{
"epoch": 0.6080515297906602,
"grad_norm": 1.8193024396896362,
"learning_rate": 0.0001036474508437579,
"loss": 2.1627,
"step": 236
},
{
"epoch": 0.6183574879227053,
"grad_norm": 1.8019583225250244,
"learning_rate": 9.895602383375353e-05,
"loss": 2.1234,
"step": 240
},
{
"epoch": 0.6183574879227053,
"eval_loss": 2.091606616973877,
"eval_runtime": 10.3524,
"eval_samples_per_second": 23.666,
"eval_steps_per_second": 2.994,
"step": 240
},
{
"epoch": 0.6286634460547504,
"grad_norm": 1.7312543392181396,
"learning_rate": 9.432041266226686e-05,
"loss": 2.1407,
"step": 244
},
{
"epoch": 0.6389694041867955,
"grad_norm": 1.6879090070724487,
"learning_rate": 8.97456863020546e-05,
"loss": 2.1084,
"step": 248
},
{
"epoch": 0.6389694041867955,
"eval_loss": 2.09163498878479,
"eval_runtime": 10.3537,
"eval_samples_per_second": 23.663,
"eval_steps_per_second": 2.994,
"step": 248
},
{
"epoch": 0.6492753623188405,
"grad_norm": 1.837243914604187,
"learning_rate": 8.523684714922608e-05,
"loss": 2.1417,
"step": 252
},
{
"epoch": 0.6595813204508857,
"grad_norm": 1.6868021488189697,
"learning_rate": 8.079882555319684e-05,
"loss": 2.1631,
"step": 256
},
{
"epoch": 0.6595813204508857,
"eval_loss": 2.0922515392303467,
"eval_runtime": 10.356,
"eval_samples_per_second": 23.658,
"eval_steps_per_second": 2.993,
"step": 256
},
{
"epoch": 0.6698872785829307,
"grad_norm": 1.6869653463363647,
"learning_rate": 7.643647442542382e-05,
"loss": 2.1583,
"step": 260
},
{
"epoch": 0.6801932367149759,
"grad_norm": 1.7420066595077515,
"learning_rate": 7.215456393281776e-05,
"loss": 2.1299,
"step": 264
},
{
"epoch": 0.6801932367149759,
"eval_loss": 2.0841996669769287,
"eval_runtime": 10.3628,
"eval_samples_per_second": 23.642,
"eval_steps_per_second": 2.991,
"step": 264
},
{
"epoch": 0.6904991948470209,
"grad_norm": 1.6992385387420654,
"learning_rate": 6.795777628163599e-05,
"loss": 2.1609,
"step": 268
},
{
"epoch": 0.700805152979066,
"grad_norm": 1.638026475906372,
"learning_rate": 6.385070059755846e-05,
"loss": 2.1939,
"step": 272
},
{
"epoch": 0.700805152979066,
"eval_loss": 2.091933012008667,
"eval_runtime": 10.3591,
"eval_samples_per_second": 23.651,
"eval_steps_per_second": 2.993,
"step": 272
},
{
"epoch": 0.7111111111111111,
"grad_norm": 1.6998041868209839,
"learning_rate": 5.983782790754623e-05,
"loss": 2.1079,
"step": 276
},
{
"epoch": 0.7214170692431562,
"grad_norm": 2.0246264934539795,
"learning_rate": 5.592354622896944e-05,
"loss": 2.071,
"step": 280
},
{
"epoch": 0.7214170692431562,
"eval_loss": 2.083048105239868,
"eval_runtime": 10.3561,
"eval_samples_per_second": 23.658,
"eval_steps_per_second": 2.993,
"step": 280
},
{
"epoch": 0.7317230273752013,
"grad_norm": 2.208463191986084,
"learning_rate": 5.211213577137469e-05,
"loss": 2.168,
"step": 284
},
{
"epoch": 0.7420289855072464,
"grad_norm": 1.7109562158584595,
"learning_rate": 4.840776425613886e-05,
"loss": 2.181,
"step": 288
},
{
"epoch": 0.7420289855072464,
"eval_loss": 2.0801281929016113,
"eval_runtime": 10.3534,
"eval_samples_per_second": 23.664,
"eval_steps_per_second": 2.994,
"step": 288
},
{
"epoch": 0.7523349436392914,
"grad_norm": 2.0346124172210693,
"learning_rate": 4.481448235912671e-05,
"loss": 2.1519,
"step": 292
},
{
"epoch": 0.7626409017713366,
"grad_norm": 2.0301458835601807,
"learning_rate": 4.133621928133665e-05,
"loss": 2.1076,
"step": 296
},
{
"epoch": 0.7626409017713366,
"eval_loss": 2.080409526824951,
"eval_runtime": 10.2414,
"eval_samples_per_second": 23.923,
"eval_steps_per_second": 3.027,
"step": 296
},
{
"epoch": 0.7729468599033816,
"grad_norm": 1.697075605392456,
"learning_rate": 3.797677845237696e-05,
"loss": 2.1753,
"step": 300
},
{
"epoch": 0.7832528180354267,
"grad_norm": 1.7235360145568848,
"learning_rate": 3.473983337147118e-05,
"loss": 2.1185,
"step": 304
},
{
"epoch": 0.7832528180354267,
"eval_loss": 2.0761303901672363,
"eval_runtime": 10.246,
"eval_samples_per_second": 23.912,
"eval_steps_per_second": 3.026,
"step": 304
},
{
"epoch": 0.7935587761674718,
"grad_norm": 1.8444669246673584,
"learning_rate": 3.162892359054098e-05,
"loss": 2.0746,
"step": 308
},
{
"epoch": 0.8038647342995169,
"grad_norm": 1.657713770866394,
"learning_rate": 2.8647450843757897e-05,
"loss": 2.1079,
"step": 312
},
{
"epoch": 0.8038647342995169,
"eval_loss": 2.074939727783203,
"eval_runtime": 10.2363,
"eval_samples_per_second": 23.934,
"eval_steps_per_second": 3.028,
"step": 312
},
{
"epoch": 0.814170692431562,
"grad_norm": 1.6545333862304688,
"learning_rate": 2.5798675327796993e-05,
"loss": 2.1359,
"step": 316
},
{
"epoch": 0.8244766505636071,
"grad_norm": 1.5847792625427246,
"learning_rate": 2.3085712136859668e-05,
"loss": 2.1499,
"step": 320
},
{
"epoch": 0.8244766505636071,
"eval_loss": 2.078282356262207,
"eval_runtime": 10.2307,
"eval_samples_per_second": 23.948,
"eval_steps_per_second": 3.03,
"step": 320
},
{
"epoch": 0.8347826086956521,
"grad_norm": 1.5907636880874634,
"learning_rate": 2.0511527856363912e-05,
"loss": 2.175,
"step": 324
},
{
"epoch": 0.8450885668276973,
"grad_norm": 1.3903125524520874,
"learning_rate": 1.8078937319026654e-05,
"loss": 2.1551,
"step": 328
},
{
"epoch": 0.8450885668276973,
"eval_loss": 2.078404426574707,
"eval_runtime": 10.216,
"eval_samples_per_second": 23.982,
"eval_steps_per_second": 3.034,
"step": 328
},
{
"epoch": 0.8553945249597423,
"grad_norm": 1.6229329109191895,
"learning_rate": 1.579060052688548e-05,
"loss": 2.1056,
"step": 332
},
{
"epoch": 0.8657004830917875,
"grad_norm": 1.4519729614257812,
"learning_rate": 1.3649019742625623e-05,
"loss": 2.1117,
"step": 336
},
{
"epoch": 0.8657004830917875,
"eval_loss": 2.0783541202545166,
"eval_runtime": 10.209,
"eval_samples_per_second": 23.998,
"eval_steps_per_second": 3.037,
"step": 336
},
{
"epoch": 0.8760064412238325,
"grad_norm": 1.7576130628585815,
"learning_rate": 1.1656536753392287e-05,
"loss": 2.1531,
"step": 340
},
{
"epoch": 0.8863123993558776,
"grad_norm": 1.834403395652771,
"learning_rate": 9.815330310080887e-06,
"loss": 2.1463,
"step": 344
},
{
"epoch": 0.8863123993558776,
"eval_loss": 2.075045108795166,
"eval_runtime": 10.1804,
"eval_samples_per_second": 24.066,
"eval_steps_per_second": 3.045,
"step": 344
},
{
"epoch": 0.8966183574879227,
"grad_norm": 1.560579776763916,
"learning_rate": 8.127413744904804e-06,
"loss": 2.0872,
"step": 348
},
{
"epoch": 0.9069243156199678,
"grad_norm": 1.516147494316101,
"learning_rate": 6.594632769846353e-06,
"loss": 2.1167,
"step": 352
},
{
"epoch": 0.9069243156199678,
"eval_loss": 2.06960391998291,
"eval_runtime": 10.1667,
"eval_samples_per_second": 24.098,
"eval_steps_per_second": 3.049,
"step": 352
},
{
"epoch": 0.9172302737520129,
"grad_norm": 1.672770619392395,
"learning_rate": 5.218663458397715e-06,
"loss": 2.1874,
"step": 356
},
{
"epoch": 0.927536231884058,
"grad_norm": 1.5704377889633179,
"learning_rate": 4.001010412799138e-06,
"loss": 2.1882,
"step": 360
},
{
"epoch": 0.927536231884058,
"eval_loss": 2.0714492797851562,
"eval_runtime": 10.128,
"eval_samples_per_second": 24.19,
"eval_steps_per_second": 3.061,
"step": 360
},
{
"epoch": 0.937842190016103,
"grad_norm": 1.5680062770843506,
"learning_rate": 2.9430051187785962e-06,
"loss": 2.1521,
"step": 364
},
{
"epoch": 0.9481481481481482,
"grad_norm": 1.4755935668945312,
"learning_rate": 2.0458044895916513e-06,
"loss": 2.1131,
"step": 368
},
{
"epoch": 0.9481481481481482,
"eval_loss": 2.071572780609131,
"eval_runtime": 10.1214,
"eval_samples_per_second": 24.206,
"eval_steps_per_second": 3.063,
"step": 368
},
{
"epoch": 0.9584541062801932,
"grad_norm": 1.6502960920333862,
"learning_rate": 1.3103896009537207e-06,
"loss": 2.0761,
"step": 372
},
{
"epoch": 0.9687600644122383,
"grad_norm": 1.5987725257873535,
"learning_rate": 7.375646182482875e-07,
"loss": 2.1626,
"step": 376
},
{
"epoch": 0.9687600644122383,
"eval_loss": 2.0713953971862793,
"eval_runtime": 10.101,
"eval_samples_per_second": 24.255,
"eval_steps_per_second": 3.069,
"step": 376
},
{
"epoch": 0.9790660225442834,
"grad_norm": 1.648586392402649,
"learning_rate": 3.2795591718381975e-07,
"loss": 2.0639,
"step": 380
},
{
"epoch": 0.9893719806763285,
"grad_norm": 1.6988483667373657,
"learning_rate": 8.201139886109264e-08,
"loss": 2.1141,
"step": 384
},
{
"epoch": 0.9893719806763285,
"eval_loss": 2.0710363388061523,
"eval_runtime": 10.1164,
"eval_samples_per_second": 24.218,
"eval_steps_per_second": 3.064,
"step": 384
},
{
"epoch": 0.9996779388083736,
"grad_norm": 1.7316569089889526,
"learning_rate": 0.0,
"loss": 2.156,
"step": 388
}
],
"logging_steps": 4,
"max_steps": 388,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 8,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.611644452078223e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}