imdatta0's picture
End of training
8b1b653 verified
raw
history blame
26.2 kB
{
"best_metric": 4.443276405334473,
"best_model_checkpoint": "/home/datta0/models/lora_final/Mistral-7B-v0.3_metamath_reverse/checkpoint-572",
"epoch": 0.9995949777237748,
"eval_steps": 13,
"global_step": 617,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0016200891049007696,
"grad_norm": 31.96648597717285,
"learning_rate": 2.3076923076923076e-05,
"loss": 0.9728,
"step": 1
},
{
"epoch": 0.011340623734305387,
"grad_norm": 25.136903762817383,
"learning_rate": 0.00016153846153846153,
"loss": 0.7436,
"step": 7
},
{
"epoch": 0.021061158363710003,
"eval_loss": 7.4054484367370605,
"eval_runtime": 12.8928,
"eval_samples_per_second": 38.781,
"eval_steps_per_second": 4.886,
"step": 13
},
{
"epoch": 0.022681247468610773,
"grad_norm": 6121.2451171875,
"learning_rate": 0.0002999979709808197,
"loss": 1.928,
"step": 14
},
{
"epoch": 0.03402187120291616,
"grad_norm": 30.988468170166016,
"learning_rate": 0.0002998701612152596,
"loss": 9.1068,
"step": 21
},
{
"epoch": 0.042122316727420006,
"eval_loss": 6.97995662689209,
"eval_runtime": 88.3905,
"eval_samples_per_second": 5.657,
"eval_steps_per_second": 0.713,
"step": 26
},
{
"epoch": 0.04536249493722155,
"grad_norm": 5.160550117492676,
"learning_rate": 0.0002995437011859465,
"loss": 7.0452,
"step": 28
},
{
"epoch": 0.056703118671526935,
"grad_norm": 4.622859954833984,
"learning_rate": 0.00029901902360990936,
"loss": 6.6988,
"step": 35
},
{
"epoch": 0.06318347509113001,
"eval_loss": 6.427122116088867,
"eval_runtime": 94.0313,
"eval_samples_per_second": 5.317,
"eval_steps_per_second": 0.67,
"step": 39
},
{
"epoch": 0.06804374240583232,
"grad_norm": 419.7519226074219,
"learning_rate": 0.00029829682393805085,
"loss": 6.4471,
"step": 42
},
{
"epoch": 0.0793843661401377,
"grad_norm": 3.067017078399658,
"learning_rate": 0.0002973780594333385,
"loss": 6.4684,
"step": 49
},
{
"epoch": 0.08424463345484001,
"eval_loss": 6.289278507232666,
"eval_runtime": 12.7505,
"eval_samples_per_second": 39.214,
"eval_steps_per_second": 4.941,
"step": 52
},
{
"epoch": 0.0907249898744431,
"grad_norm": 14.996749877929688,
"learning_rate": 0.00029626394790197025,
"loss": 6.2403,
"step": 56
},
{
"epoch": 0.10206561360874848,
"grad_norm": 13.672094345092773,
"learning_rate": 0.00029495596607919305,
"loss": 6.1245,
"step": 63
},
{
"epoch": 0.10530579181855002,
"eval_loss": 6.124541759490967,
"eval_runtime": 92.7829,
"eval_samples_per_second": 5.389,
"eval_steps_per_second": 0.679,
"step": 65
},
{
"epoch": 0.11340623734305387,
"grad_norm": 8.664388656616211,
"learning_rate": 0.00029345584767191685,
"loss": 6.0178,
"step": 70
},
{
"epoch": 0.12474686107735926,
"grad_norm": 11.606036186218262,
"learning_rate": 0.0002917655810607161,
"loss": 5.9117,
"step": 77
},
{
"epoch": 0.12636695018226002,
"eval_loss": 5.8770036697387695,
"eval_runtime": 84.5458,
"eval_samples_per_second": 5.914,
"eval_steps_per_second": 0.745,
"step": 78
},
{
"epoch": 0.13608748481166463,
"grad_norm": 18.062744140625,
"learning_rate": 0.0002898874066642667,
"loss": 5.8276,
"step": 84
},
{
"epoch": 0.14742810854597002,
"grad_norm": 28.24692153930664,
"learning_rate": 0.00028782381396971003,
"loss": 5.8448,
"step": 91
},
{
"epoch": 0.14742810854597002,
"eval_loss": 5.783429145812988,
"eval_runtime": 12.9352,
"eval_samples_per_second": 38.654,
"eval_steps_per_second": 4.87,
"step": 91
},
{
"epoch": 0.1587687322802754,
"grad_norm": 25.64031982421875,
"learning_rate": 0.00028557753823288173,
"loss": 5.742,
"step": 98
},
{
"epoch": 0.16848926690968002,
"eval_loss": 5.894050598144531,
"eval_runtime": 88.9837,
"eval_samples_per_second": 5.619,
"eval_steps_per_second": 0.708,
"step": 104
},
{
"epoch": 0.1701093560145808,
"grad_norm": 19.707569122314453,
"learning_rate": 0.0002831515568527781,
"loss": 5.7521,
"step": 105
},
{
"epoch": 0.1814499797488862,
"grad_norm": 17.04213523864746,
"learning_rate": 0.00028054908542506627,
"loss": 5.6054,
"step": 112
},
{
"epoch": 0.18955042527339003,
"eval_loss": 6.097167015075684,
"eval_runtime": 84.3979,
"eval_samples_per_second": 5.924,
"eval_steps_per_second": 0.746,
"step": 117
},
{
"epoch": 0.19279060348319157,
"grad_norm": 92.46790313720703,
"learning_rate": 0.00027777357347986823,
"loss": 5.801,
"step": 119
},
{
"epoch": 0.20413122721749696,
"grad_norm": 24.985816955566406,
"learning_rate": 0.00027482869990946986,
"loss": 5.6465,
"step": 126
},
{
"epoch": 0.21061158363710003,
"eval_loss": 5.480843544006348,
"eval_runtime": 12.9876,
"eval_samples_per_second": 38.498,
"eval_steps_per_second": 4.851,
"step": 130
},
{
"epoch": 0.21547185095180235,
"grad_norm": 18.881345748901367,
"learning_rate": 0.0002717183680920135,
"loss": 5.5278,
"step": 133
},
{
"epoch": 0.22681247468610774,
"grad_norm": 21.324201583862305,
"learning_rate": 0.00026844670071763906,
"loss": 5.5659,
"step": 140
},
{
"epoch": 0.23167274200081003,
"eval_loss": 5.537073135375977,
"eval_runtime": 85.8424,
"eval_samples_per_second": 5.825,
"eval_steps_per_second": 0.734,
"step": 143
},
{
"epoch": 0.23815309842041313,
"grad_norm": 26.40949249267578,
"learning_rate": 0.00026501803432393037,
"loss": 5.5108,
"step": 147
},
{
"epoch": 0.24949372215471852,
"grad_norm": 55.612056732177734,
"learning_rate": 0.00026143691354791145,
"loss": 5.4175,
"step": 154
},
{
"epoch": 0.25273390036452004,
"eval_loss": 5.568782329559326,
"eval_runtime": 94.8092,
"eval_samples_per_second": 5.274,
"eval_steps_per_second": 0.664,
"step": 156
},
{
"epoch": 0.2608343458890239,
"grad_norm": 14.32459545135498,
"learning_rate": 0.00025770808510220956,
"loss": 5.3415,
"step": 161
},
{
"epoch": 0.27217496962332927,
"grad_norm": 38.13811111450195,
"learning_rate": 0.00025383649148337105,
"loss": 5.3148,
"step": 168
},
{
"epoch": 0.27379505872823007,
"eval_loss": 5.364619731903076,
"eval_runtime": 12.9741,
"eval_samples_per_second": 38.538,
"eval_steps_per_second": 4.856,
"step": 169
},
{
"epoch": 0.28351559335763465,
"grad_norm": 17.65839385986328,
"learning_rate": 0.0002498272644206695,
"loss": 5.2599,
"step": 175
},
{
"epoch": 0.29485621709194004,
"grad_norm": 9.63137435913086,
"learning_rate": 0.0002456857180740884,
"loss": 5.2376,
"step": 182
},
{
"epoch": 0.29485621709194004,
"eval_loss": 5.205206871032715,
"eval_runtime": 94.3725,
"eval_samples_per_second": 5.298,
"eval_steps_per_second": 0.668,
"step": 182
},
{
"epoch": 0.30619684082624543,
"grad_norm": 8.901846885681152,
"learning_rate": 0.0002414173419904956,
"loss": 5.2313,
"step": 189
},
{
"epoch": 0.3159173754556501,
"eval_loss": 5.147277355194092,
"eval_runtime": 90.8266,
"eval_samples_per_second": 5.505,
"eval_steps_per_second": 0.694,
"step": 195
},
{
"epoch": 0.3175374645605508,
"grad_norm": 16.01649284362793,
"learning_rate": 0.00023702779382734566,
"loss": 5.1989,
"step": 196
},
{
"epoch": 0.3288780882948562,
"grad_norm": 35.22917556762695,
"learning_rate": 0.0002325228918535541,
"loss": 5.1381,
"step": 203
},
{
"epoch": 0.33697853381936005,
"eval_loss": 5.247087001800537,
"eval_runtime": 13.0295,
"eval_samples_per_second": 38.375,
"eval_steps_per_second": 4.835,
"step": 208
},
{
"epoch": 0.3402187120291616,
"grad_norm": 18.41366958618164,
"learning_rate": 0.00022790860723748442,
"loss": 5.2052,
"step": 210
},
{
"epoch": 0.351559335763467,
"grad_norm": 36.26469802856445,
"learning_rate": 0.00022319105613226921,
"loss": 5.0545,
"step": 217
},
{
"epoch": 0.3580396921830701,
"eval_loss": 5.057940483093262,
"eval_runtime": 81.209,
"eval_samples_per_second": 6.157,
"eval_steps_per_second": 0.776,
"step": 221
},
{
"epoch": 0.3628999594977724,
"grad_norm": 35.501441955566406,
"learning_rate": 0.00021837649156895706,
"loss": 5.0624,
"step": 224
},
{
"epoch": 0.37424058323207776,
"grad_norm": 25.91912078857422,
"learning_rate": 0.00021347129516822945,
"loss": 5.0218,
"step": 231
},
{
"epoch": 0.37910085054678005,
"eval_loss": 5.043420314788818,
"eval_runtime": 81.4809,
"eval_samples_per_second": 6.136,
"eval_steps_per_second": 0.773,
"step": 234
},
{
"epoch": 0.38558120696638315,
"grad_norm": 76.39311981201172,
"learning_rate": 0.00020848196868167505,
"loss": 5.0395,
"step": 238
},
{
"epoch": 0.39692183070068854,
"grad_norm": 55.80715560913086,
"learning_rate": 0.000203415125373832,
"loss": 5.1901,
"step": 245
},
{
"epoch": 0.4001620089104901,
"eval_loss": 5.186158180236816,
"eval_runtime": 13.0415,
"eval_samples_per_second": 38.339,
"eval_steps_per_second": 4.831,
"step": 247
},
{
"epoch": 0.4082624544349939,
"grad_norm": 44.12828063964844,
"learning_rate": 0.00019827748125642242,
"loss": 5.2192,
"step": 252
},
{
"epoch": 0.4196030781692993,
"grad_norm": 20.139680862426758,
"learning_rate": 0.0001930758461863965,
"loss": 5.0809,
"step": 259
},
{
"epoch": 0.42122316727420006,
"eval_loss": 5.010337829589844,
"eval_runtime": 89.8841,
"eval_samples_per_second": 5.563,
"eval_steps_per_second": 0.701,
"step": 260
},
{
"epoch": 0.4309437019036047,
"grad_norm": 22.906221389770508,
"learning_rate": 0.0001878171148395872,
"loss": 5.0478,
"step": 266
},
{
"epoch": 0.4422843256379101,
"grad_norm": 30.48619270324707,
"learning_rate": 0.00018250825757193848,
"loss": 5.0357,
"step": 273
},
{
"epoch": 0.4422843256379101,
"eval_loss": 5.04883337020874,
"eval_runtime": 93.6727,
"eval_samples_per_second": 5.338,
"eval_steps_per_second": 0.673,
"step": 273
},
{
"epoch": 0.4536249493722155,
"grad_norm": 69.97283172607422,
"learning_rate": 0.0001771563111804211,
"loss": 5.0375,
"step": 280
},
{
"epoch": 0.46334548400162007,
"eval_loss": 5.002610206604004,
"eval_runtime": 13.0501,
"eval_samples_per_second": 38.314,
"eval_steps_per_second": 4.828,
"step": 286
},
{
"epoch": 0.46496557310652087,
"grad_norm": 69.82508850097656,
"learning_rate": 0.0001717683695758819,
"loss": 4.9778,
"step": 287
},
{
"epoch": 0.47630619684082626,
"grad_norm": 95.04804229736328,
"learning_rate": 0.00016635157438018983,
"loss": 5.0348,
"step": 294
},
{
"epoch": 0.4844066423653301,
"eval_loss": 5.008110523223877,
"eval_runtime": 86.595,
"eval_samples_per_second": 5.774,
"eval_steps_per_second": 0.728,
"step": 299
},
{
"epoch": 0.48764682057513165,
"grad_norm": 62.681434631347656,
"learning_rate": 0.0001609131054601416,
"loss": 5.0078,
"step": 301
},
{
"epoch": 0.49898744430943703,
"grad_norm": 58.10196304321289,
"learning_rate": 0.00015546017141067432,
"loss": 4.8927,
"step": 308
},
{
"epoch": 0.5054678007290401,
"eval_loss": 4.8912458419799805,
"eval_runtime": 77.9462,
"eval_samples_per_second": 6.415,
"eval_steps_per_second": 0.808,
"step": 312
},
{
"epoch": 0.5103280680437424,
"grad_norm": 44.00553512573242,
"learning_rate": 0.00015,
"loss": 4.9021,
"step": 315
},
{
"epoch": 0.5216686917780478,
"grad_norm": 39.55318832397461,
"learning_rate": 0.0001445398285893257,
"loss": 4.878,
"step": 322
},
{
"epoch": 0.5265289590927501,
"eval_loss": 4.866469860076904,
"eval_runtime": 13.1009,
"eval_samples_per_second": 38.165,
"eval_steps_per_second": 4.809,
"step": 325
},
{
"epoch": 0.5330093155123532,
"grad_norm": 22.130632400512695,
"learning_rate": 0.0001390868945398584,
"loss": 4.8018,
"step": 329
},
{
"epoch": 0.5443499392466585,
"grad_norm": 46.44150161743164,
"learning_rate": 0.00013364842561981014,
"loss": 4.8092,
"step": 336
},
{
"epoch": 0.5475901174564601,
"eval_loss": 4.84024715423584,
"eval_runtime": 94.4317,
"eval_samples_per_second": 5.295,
"eval_steps_per_second": 0.667,
"step": 338
},
{
"epoch": 0.555690562980964,
"grad_norm": 28.048892974853516,
"learning_rate": 0.00012823163042411807,
"loss": 4.7609,
"step": 343
},
{
"epoch": 0.5670311867152693,
"grad_norm": 49.06658935546875,
"learning_rate": 0.0001228436888195789,
"loss": 4.8342,
"step": 350
},
{
"epoch": 0.56865127582017,
"eval_loss": 4.768944263458252,
"eval_runtime": 95.9831,
"eval_samples_per_second": 5.209,
"eval_steps_per_second": 0.656,
"step": 351
},
{
"epoch": 0.5783718104495748,
"grad_norm": 51.22993469238281,
"learning_rate": 0.00011749174242806152,
"loss": 4.7839,
"step": 357
},
{
"epoch": 0.5897124341838801,
"grad_norm": 18.450580596923828,
"learning_rate": 0.00011218288516041279,
"loss": 4.7834,
"step": 364
},
{
"epoch": 0.5897124341838801,
"eval_loss": 4.78420352935791,
"eval_runtime": 13.1163,
"eval_samples_per_second": 38.121,
"eval_steps_per_second": 4.803,
"step": 364
},
{
"epoch": 0.6010530579181855,
"grad_norm": 20.635562896728516,
"learning_rate": 0.00010692415381360349,
"loss": 4.7428,
"step": 371
},
{
"epoch": 0.6107735925475901,
"eval_loss": 4.739642143249512,
"eval_runtime": 85.1259,
"eval_samples_per_second": 5.874,
"eval_steps_per_second": 0.74,
"step": 377
},
{
"epoch": 0.6123936816524909,
"grad_norm": 23.58678436279297,
"learning_rate": 0.00010172251874357757,
"loss": 4.7561,
"step": 378
},
{
"epoch": 0.6237343053867963,
"grad_norm": 15.554181098937988,
"learning_rate": 9.658487462616794e-05,
"loss": 4.7318,
"step": 385
},
{
"epoch": 0.6318347509113001,
"eval_loss": 4.698749542236328,
"eval_runtime": 89.829,
"eval_samples_per_second": 5.566,
"eval_steps_per_second": 0.701,
"step": 390
},
{
"epoch": 0.6350749291211016,
"grad_norm": 11.260749816894531,
"learning_rate": 9.151803131832493e-05,
"loss": 4.7366,
"step": 392
},
{
"epoch": 0.6464155528554071,
"grad_norm": 23.6870059967041,
"learning_rate": 8.652870483177049e-05,
"loss": 4.6442,
"step": 399
},
{
"epoch": 0.6528959092750102,
"eval_loss": 4.6853532791137695,
"eval_runtime": 13.1,
"eval_samples_per_second": 38.168,
"eval_steps_per_second": 4.809,
"step": 403
},
{
"epoch": 0.6577561765897124,
"grad_norm": 27.518665313720703,
"learning_rate": 8.162350843104291e-05,
"loss": 4.6589,
"step": 406
},
{
"epoch": 0.6690968003240179,
"grad_norm": 40.192344665527344,
"learning_rate": 7.680894386773072e-05,
"loss": 4.6454,
"step": 413
},
{
"epoch": 0.6739570676387201,
"eval_loss": 4.691702365875244,
"eval_runtime": 89.1902,
"eval_samples_per_second": 5.606,
"eval_steps_per_second": 0.706,
"step": 416
},
{
"epoch": 0.6804374240583232,
"grad_norm": 23.988582611083984,
"learning_rate": 7.209139276251558e-05,
"loss": 4.6533,
"step": 420
},
{
"epoch": 0.6917780477926286,
"grad_norm": 30.93518829345703,
"learning_rate": 6.747710814644589e-05,
"loss": 4.7075,
"step": 427
},
{
"epoch": 0.6950182260024301,
"eval_loss": 4.641927719116211,
"eval_runtime": 91.2485,
"eval_samples_per_second": 5.48,
"eval_steps_per_second": 0.69,
"step": 429
},
{
"epoch": 0.703118671526934,
"grad_norm": 28.248205184936523,
"learning_rate": 6.297220617265435e-05,
"loss": 4.608,
"step": 434
},
{
"epoch": 0.7144592952612394,
"grad_norm": 16.064809799194336,
"learning_rate": 5.858265800950438e-05,
"loss": 4.6744,
"step": 441
},
{
"epoch": 0.7160793843661402,
"eval_loss": 4.58261251449585,
"eval_runtime": 13.1224,
"eval_samples_per_second": 38.103,
"eval_steps_per_second": 4.801,
"step": 442
},
{
"epoch": 0.7257999189955447,
"grad_norm": 12.395214080810547,
"learning_rate": 5.4314281925911634e-05,
"loss": 4.5883,
"step": 448
},
{
"epoch": 0.7371405427298502,
"grad_norm": 15.379476547241211,
"learning_rate": 5.0172735579330526e-05,
"loss": 4.5861,
"step": 455
},
{
"epoch": 0.7371405427298502,
"eval_loss": 4.579273700714111,
"eval_runtime": 84.8776,
"eval_samples_per_second": 5.891,
"eval_steps_per_second": 0.742,
"step": 455
},
{
"epoch": 0.7484811664641555,
"grad_norm": 10.24847412109375,
"learning_rate": 4.616350851662895e-05,
"loss": 4.5707,
"step": 462
},
{
"epoch": 0.7582017010935601,
"eval_loss": 4.59439754486084,
"eval_runtime": 92.5346,
"eval_samples_per_second": 5.403,
"eval_steps_per_second": 0.681,
"step": 468
},
{
"epoch": 0.759821790198461,
"grad_norm": 20.303340911865234,
"learning_rate": 4.229191489779047e-05,
"loss": 4.5872,
"step": 469
},
{
"epoch": 0.7711624139327663,
"grad_norm": 45.71290969848633,
"learning_rate": 3.8563086452088506e-05,
"loss": 4.5675,
"step": 476
},
{
"epoch": 0.7792628594572701,
"eval_loss": 4.561083793640137,
"eval_runtime": 13.1075,
"eval_samples_per_second": 38.146,
"eval_steps_per_second": 4.806,
"step": 481
},
{
"epoch": 0.7825030376670717,
"grad_norm": 39.32904052734375,
"learning_rate": 3.498196567606959e-05,
"loss": 4.5437,
"step": 483
},
{
"epoch": 0.7938436614013771,
"grad_norm": 18.427520751953125,
"learning_rate": 3.1553299282360966e-05,
"loss": 4.5286,
"step": 490
},
{
"epoch": 0.8003240178209802,
"eval_loss": 4.521579265594482,
"eval_runtime": 92.4799,
"eval_samples_per_second": 5.407,
"eval_steps_per_second": 0.681,
"step": 494
},
{
"epoch": 0.8051842851356824,
"grad_norm": 12.541765213012695,
"learning_rate": 2.828163190798644e-05,
"loss": 4.48,
"step": 497
},
{
"epoch": 0.8165249088699879,
"grad_norm": 14.985490798950195,
"learning_rate": 2.5171300090530106e-05,
"loss": 4.5302,
"step": 504
},
{
"epoch": 0.8213851761846902,
"eval_loss": 4.5139875411987305,
"eval_runtime": 88.2227,
"eval_samples_per_second": 5.667,
"eval_steps_per_second": 0.714,
"step": 507
},
{
"epoch": 0.8278655326042932,
"grad_norm": 28.087665557861328,
"learning_rate": 2.2226426520131734e-05,
"loss": 4.464,
"step": 511
},
{
"epoch": 0.8392061563385986,
"grad_norm": 17.76738739013672,
"learning_rate": 1.9450914574933725e-05,
"loss": 4.5191,
"step": 518
},
{
"epoch": 0.8424463345484001,
"eval_loss": 4.508421897888184,
"eval_runtime": 13.1321,
"eval_samples_per_second": 38.075,
"eval_steps_per_second": 4.797,
"step": 520
},
{
"epoch": 0.850546780072904,
"grad_norm": 28.11948585510254,
"learning_rate": 1.6848443147221828e-05,
"loss": 4.4829,
"step": 525
},
{
"epoch": 0.8618874038072094,
"grad_norm": 29.396305084228516,
"learning_rate": 1.4422461767118233e-05,
"loss": 4.5023,
"step": 532
},
{
"epoch": 0.8635074929121102,
"eval_loss": 4.487783432006836,
"eval_runtime": 91.7711,
"eval_samples_per_second": 5.448,
"eval_steps_per_second": 0.686,
"step": 533
},
{
"epoch": 0.8732280275415147,
"grad_norm": 16.643442153930664,
"learning_rate": 1.2176186030289936e-05,
"loss": 4.4684,
"step": 539
},
{
"epoch": 0.8845686512758202,
"grad_norm": 6.223151683807373,
"learning_rate": 1.011259333573326e-05,
"loss": 4.4661,
"step": 546
},
{
"epoch": 0.8845686512758202,
"eval_loss": 4.459321022033691,
"eval_runtime": 80.8756,
"eval_samples_per_second": 6.182,
"eval_steps_per_second": 0.779,
"step": 546
},
{
"epoch": 0.8959092750101255,
"grad_norm": 11.320446968078613,
"learning_rate": 8.234418939283866e-06,
"loss": 4.4942,
"step": 553
},
{
"epoch": 0.9056298096395302,
"eval_loss": 4.465951919555664,
"eval_runtime": 13.1729,
"eval_samples_per_second": 37.957,
"eval_steps_per_second": 4.783,
"step": 559
},
{
"epoch": 0.907249898744431,
"grad_norm": 14.810989379882812,
"learning_rate": 6.544152328083152e-06,
"loss": 4.4924,
"step": 560
},
{
"epoch": 0.9185905224787363,
"grad_norm": 7.798120975494385,
"learning_rate": 5.044033920806933e-06,
"loss": 4.4691,
"step": 567
},
{
"epoch": 0.9266909680032401,
"eval_loss": 4.443276405334473,
"eval_runtime": 87.1678,
"eval_samples_per_second": 5.736,
"eval_steps_per_second": 0.723,
"step": 572
},
{
"epoch": 0.9299311462130417,
"grad_norm": 8.354825973510742,
"learning_rate": 3.7360520980297514e-06,
"loss": 4.4501,
"step": 574
},
{
"epoch": 0.9412717699473471,
"grad_norm": 12.046449661254883,
"learning_rate": 2.6219405666614402e-06,
"loss": 4.4153,
"step": 581
},
{
"epoch": 0.9477521263669502,
"eval_loss": 4.453136920928955,
"eval_runtime": 96.3934,
"eval_samples_per_second": 5.187,
"eval_steps_per_second": 0.654,
"step": 585
},
{
"epoch": 0.9526123936816525,
"grad_norm": 10.31865406036377,
"learning_rate": 1.7031760619491353e-06,
"loss": 4.4468,
"step": 588
},
{
"epoch": 0.9639530174159578,
"grad_norm": 11.857120513916016,
"learning_rate": 9.809763900905875e-07,
"loss": 4.4609,
"step": 595
},
{
"epoch": 0.9688132847306602,
"eval_loss": 4.445005893707275,
"eval_runtime": 13.1103,
"eval_samples_per_second": 38.138,
"eval_steps_per_second": 4.805,
"step": 598
},
{
"epoch": 0.9752936411502633,
"grad_norm": 7.414619445800781,
"learning_rate": 4.562988140535073e-07,
"loss": 4.4396,
"step": 602
},
{
"epoch": 0.9866342648845686,
"grad_norm": 7.68168830871582,
"learning_rate": 1.298387847403437e-07,
"loss": 4.4444,
"step": 609
},
{
"epoch": 0.9898744430943702,
"eval_loss": 4.447805881500244,
"eval_runtime": 94.8799,
"eval_samples_per_second": 5.27,
"eval_steps_per_second": 0.664,
"step": 611
},
{
"epoch": 0.9979748886188741,
"grad_norm": 8.450008392333984,
"learning_rate": 2.029019180288527e-09,
"loss": 4.409,
"step": 616
}
],
"logging_steps": 7,
"max_steps": 617,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 13,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.001836583160381e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}