imdatta0's picture
End of training
8b1b653 verified
raw
history blame
27.9 kB
{
"best_metric": 4.9799017906188965,
"best_model_checkpoint": "/home/datta0/models/lora_final/gemma-2-9b_pct_reverse_r32/checkpoint-8",
"epoch": 0.9889738430583501,
"eval_steps": 8,
"global_step": 384,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002575452716297787,
"grad_norm": 10.302180290222168,
"learning_rate": 1.25e-05,
"loss": 2.1766,
"step": 1
},
{
"epoch": 0.010301810865191148,
"grad_norm": 7.941323280334473,
"learning_rate": 5e-05,
"loss": 2.2338,
"step": 4
},
{
"epoch": 0.020603621730382295,
"grad_norm": 11.684494018554688,
"learning_rate": 0.0001,
"loss": 2.5978,
"step": 8
},
{
"epoch": 0.020603621730382295,
"eval_loss": 4.9799017906188965,
"eval_runtime": 400.2297,
"eval_samples_per_second": 0.612,
"eval_steps_per_second": 0.612,
"step": 8
},
{
"epoch": 0.03090543259557344,
"grad_norm": 11.442267417907715,
"learning_rate": 9.997266286704631e-05,
"loss": 7.9986,
"step": 12
},
{
"epoch": 0.04120724346076459,
"grad_norm": 1.5122904777526855,
"learning_rate": 9.989068136093873e-05,
"loss": 11.0056,
"step": 16
},
{
"epoch": 0.04120724346076459,
"eval_loss": 11.464006423950195,
"eval_runtime": 164.6219,
"eval_samples_per_second": 1.488,
"eval_steps_per_second": 1.488,
"step": 16
},
{
"epoch": 0.05150905432595573,
"grad_norm": 1.6047507524490356,
"learning_rate": 9.975414512725057e-05,
"loss": 11.4322,
"step": 20
},
{
"epoch": 0.06181086519114688,
"grad_norm": 1.1092612743377686,
"learning_rate": 9.956320346634876e-05,
"loss": 11.7215,
"step": 24
},
{
"epoch": 0.06181086519114688,
"eval_loss": 11.87649917602539,
"eval_runtime": 121.4683,
"eval_samples_per_second": 2.017,
"eval_steps_per_second": 2.017,
"step": 24
},
{
"epoch": 0.07211267605633803,
"grad_norm": 0.7658454179763794,
"learning_rate": 9.931806517013612e-05,
"loss": 11.7902,
"step": 28
},
{
"epoch": 0.08241448692152918,
"grad_norm": 0.6134170889854431,
"learning_rate": 9.901899829374047e-05,
"loss": 11.8793,
"step": 32
},
{
"epoch": 0.08241448692152918,
"eval_loss": 11.905890464782715,
"eval_runtime": 166.1275,
"eval_samples_per_second": 1.475,
"eval_steps_per_second": 1.475,
"step": 32
},
{
"epoch": 0.09271629778672032,
"grad_norm": 0.7564122080802917,
"learning_rate": 9.86663298624003e-05,
"loss": 11.9183,
"step": 36
},
{
"epoch": 0.10301810865191147,
"grad_norm": 0.5781177282333374,
"learning_rate": 9.826044551386744e-05,
"loss": 11.8739,
"step": 40
},
{
"epoch": 0.10301810865191147,
"eval_loss": 11.97806167602539,
"eval_runtime": 119.7243,
"eval_samples_per_second": 2.046,
"eval_steps_per_second": 2.046,
"step": 40
},
{
"epoch": 0.11331991951710262,
"grad_norm": 0.399111807346344,
"learning_rate": 9.780178907671789e-05,
"loss": 11.9112,
"step": 44
},
{
"epoch": 0.12362173038229377,
"grad_norm": 0.5412446856498718,
"learning_rate": 9.729086208503174e-05,
"loss": 11.8763,
"step": 48
},
{
"epoch": 0.12362173038229377,
"eval_loss": 11.9486665725708,
"eval_runtime": 167.3012,
"eval_samples_per_second": 1.464,
"eval_steps_per_second": 1.464,
"step": 48
},
{
"epoch": 0.1339235412474849,
"grad_norm": 0.324489563703537,
"learning_rate": 9.672822322997305e-05,
"loss": 11.8606,
"step": 52
},
{
"epoch": 0.14422535211267606,
"grad_norm": 0.22089843451976776,
"learning_rate": 9.611448774886924e-05,
"loss": 11.8231,
"step": 56
},
{
"epoch": 0.14422535211267606,
"eval_loss": 11.828161239624023,
"eval_runtime": 120.687,
"eval_samples_per_second": 2.03,
"eval_steps_per_second": 2.03,
"step": 56
},
{
"epoch": 0.1545271629778672,
"grad_norm": 0.22084373235702515,
"learning_rate": 9.545032675245813e-05,
"loss": 11.7885,
"step": 60
},
{
"epoch": 0.16482897384305836,
"grad_norm": 0.5548378229141235,
"learning_rate": 9.473646649103818e-05,
"loss": 11.7758,
"step": 64
},
{
"epoch": 0.16482897384305836,
"eval_loss": 11.76635456085205,
"eval_runtime": 119.5843,
"eval_samples_per_second": 2.049,
"eval_steps_per_second": 2.049,
"step": 64
},
{
"epoch": 0.1751307847082495,
"grad_norm": 0.22932168841362,
"learning_rate": 9.397368756032445e-05,
"loss": 11.7211,
"step": 68
},
{
"epoch": 0.18543259557344063,
"grad_norm": 0.33755362033843994,
"learning_rate": 9.316282404787871e-05,
"loss": 11.8011,
"step": 72
},
{
"epoch": 0.18543259557344063,
"eval_loss": 11.437198638916016,
"eval_runtime": 162.9209,
"eval_samples_per_second": 1.504,
"eval_steps_per_second": 1.504,
"step": 72
},
{
"epoch": 0.1957344064386318,
"grad_norm": 0.2544913589954376,
"learning_rate": 9.230476262104677e-05,
"loss": 11.7226,
"step": 76
},
{
"epoch": 0.20603621730382293,
"grad_norm": 0.2166949361562729,
"learning_rate": 9.140044155740101e-05,
"loss": 11.6991,
"step": 80
},
{
"epoch": 0.20603621730382293,
"eval_loss": 11.733440399169922,
"eval_runtime": 121.9998,
"eval_samples_per_second": 2.008,
"eval_steps_per_second": 2.008,
"step": 80
},
{
"epoch": 0.2163380281690141,
"grad_norm": 0.1701814830303192,
"learning_rate": 9.045084971874738e-05,
"loss": 11.6466,
"step": 84
},
{
"epoch": 0.22663983903420523,
"grad_norm": 0.2655963599681854,
"learning_rate": 8.945702546981969e-05,
"loss": 11.8108,
"step": 88
},
{
"epoch": 0.22663983903420523,
"eval_loss": 11.500480651855469,
"eval_runtime": 164.3823,
"eval_samples_per_second": 1.49,
"eval_steps_per_second": 1.49,
"step": 88
},
{
"epoch": 0.23694164989939637,
"grad_norm": 0.162321999669075,
"learning_rate": 8.842005554284296e-05,
"loss": 11.6505,
"step": 92
},
{
"epoch": 0.24724346076458753,
"grad_norm": 0.21160480380058289,
"learning_rate": 8.73410738492077e-05,
"loss": 11.6519,
"step": 96
},
{
"epoch": 0.24724346076458753,
"eval_loss": 11.594391822814941,
"eval_runtime": 120.0195,
"eval_samples_per_second": 2.041,
"eval_steps_per_second": 2.041,
"step": 96
},
{
"epoch": 0.2575452716297787,
"grad_norm": 0.2764425277709961,
"learning_rate": 8.622126023955446e-05,
"loss": 11.6293,
"step": 100
},
{
"epoch": 0.2678470824949698,
"grad_norm": 0.179239422082901,
"learning_rate": 8.506183921362443e-05,
"loss": 11.6905,
"step": 104
},
{
"epoch": 0.2678470824949698,
"eval_loss": 11.594428062438965,
"eval_runtime": 166.0172,
"eval_samples_per_second": 1.476,
"eval_steps_per_second": 1.476,
"step": 104
},
{
"epoch": 0.27814889336016096,
"grad_norm": 0.2203957438468933,
"learning_rate": 8.386407858128706e-05,
"loss": 11.564,
"step": 108
},
{
"epoch": 0.28845070422535213,
"grad_norm": 0.1474369317293167,
"learning_rate": 8.262928807620843e-05,
"loss": 11.6003,
"step": 112
},
{
"epoch": 0.28845070422535213,
"eval_loss": 11.591402053833008,
"eval_runtime": 120.0268,
"eval_samples_per_second": 2.041,
"eval_steps_per_second": 2.041,
"step": 112
},
{
"epoch": 0.29875251509054324,
"grad_norm": 0.18921791017055511,
"learning_rate": 8.135881792367686e-05,
"loss": 11.5716,
"step": 116
},
{
"epoch": 0.3090543259557344,
"grad_norm": 0.1806280016899109,
"learning_rate": 8.005405736415126e-05,
"loss": 11.5813,
"step": 120
},
{
"epoch": 0.3090543259557344,
"eval_loss": 11.568363189697266,
"eval_runtime": 121.0475,
"eval_samples_per_second": 2.024,
"eval_steps_per_second": 2.024,
"step": 120
},
{
"epoch": 0.31935613682092556,
"grad_norm": 0.1429259330034256,
"learning_rate": 7.871643313414718e-05,
"loss": 11.5332,
"step": 124
},
{
"epoch": 0.3296579476861167,
"grad_norm": 0.34623807668685913,
"learning_rate": 7.734740790612136e-05,
"loss": 11.5493,
"step": 128
},
{
"epoch": 0.3296579476861167,
"eval_loss": 11.755960464477539,
"eval_runtime": 165.4161,
"eval_samples_per_second": 1.481,
"eval_steps_per_second": 1.481,
"step": 128
},
{
"epoch": 0.33995975855130783,
"grad_norm": 0.12925852835178375,
"learning_rate": 7.594847868906076e-05,
"loss": 11.6452,
"step": 132
},
{
"epoch": 0.350261569416499,
"grad_norm": 0.2058892697095871,
"learning_rate": 7.452117519152542e-05,
"loss": 11.5458,
"step": 136
},
{
"epoch": 0.350261569416499,
"eval_loss": 11.456594467163086,
"eval_runtime": 117.7525,
"eval_samples_per_second": 2.081,
"eval_steps_per_second": 2.081,
"step": 136
},
{
"epoch": 0.36056338028169016,
"grad_norm": 0.1822740137577057,
"learning_rate": 7.30670581489344e-05,
"loss": 11.549,
"step": 140
},
{
"epoch": 0.37086519114688127,
"grad_norm": 0.163137748837471,
"learning_rate": 7.158771761692464e-05,
"loss": 11.5838,
"step": 144
},
{
"epoch": 0.37086519114688127,
"eval_loss": 11.433055877685547,
"eval_runtime": 165.2007,
"eval_samples_per_second": 1.483,
"eval_steps_per_second": 1.483,
"step": 144
},
{
"epoch": 0.38116700201207243,
"grad_norm": 0.15767963230609894,
"learning_rate": 7.008477123264848e-05,
"loss": 11.5418,
"step": 148
},
{
"epoch": 0.3914688128772636,
"grad_norm": 0.14546583592891693,
"learning_rate": 6.855986244591104e-05,
"loss": 11.4815,
"step": 152
},
{
"epoch": 0.3914688128772636,
"eval_loss": 11.51737117767334,
"eval_runtime": 119.769,
"eval_samples_per_second": 2.046,
"eval_steps_per_second": 2.046,
"step": 152
},
{
"epoch": 0.4017706237424547,
"grad_norm": 0.1699499934911728,
"learning_rate": 6.701465872208216e-05,
"loss": 11.5552,
"step": 156
},
{
"epoch": 0.41207243460764587,
"grad_norm": 0.1260180026292801,
"learning_rate": 6.545084971874738e-05,
"loss": 11.5369,
"step": 160
},
{
"epoch": 0.41207243460764587,
"eval_loss": 11.527098655700684,
"eval_runtime": 161.8318,
"eval_samples_per_second": 1.514,
"eval_steps_per_second": 1.514,
"step": 160
},
{
"epoch": 0.42237424547283703,
"grad_norm": 0.15433725714683533,
"learning_rate": 6.387014543809223e-05,
"loss": 11.4842,
"step": 164
},
{
"epoch": 0.4326760563380282,
"grad_norm": 0.12766706943511963,
"learning_rate": 6.227427435703997e-05,
"loss": 11.4617,
"step": 168
},
{
"epoch": 0.4326760563380282,
"eval_loss": 11.539175987243652,
"eval_runtime": 121.7844,
"eval_samples_per_second": 2.012,
"eval_steps_per_second": 2.012,
"step": 168
},
{
"epoch": 0.4429778672032193,
"grad_norm": 0.16595180332660675,
"learning_rate": 6.066498153718735e-05,
"loss": 11.5383,
"step": 172
},
{
"epoch": 0.45327967806841046,
"grad_norm": 0.25058212876319885,
"learning_rate": 5.90440267166055e-05,
"loss": 11.4399,
"step": 176
},
{
"epoch": 0.45327967806841046,
"eval_loss": 11.36911678314209,
"eval_runtime": 121.0957,
"eval_samples_per_second": 2.023,
"eval_steps_per_second": 2.023,
"step": 176
},
{
"epoch": 0.4635814889336016,
"grad_norm": 0.15986469388008118,
"learning_rate": 5.74131823855921e-05,
"loss": 11.4953,
"step": 180
},
{
"epoch": 0.47388329979879273,
"grad_norm": 20.536422729492188,
"learning_rate": 5.577423184847932e-05,
"loss": 11.3199,
"step": 184
},
{
"epoch": 0.47388329979879273,
"eval_loss": 10.98233413696289,
"eval_runtime": 162.7087,
"eval_samples_per_second": 1.506,
"eval_steps_per_second": 1.506,
"step": 184
},
{
"epoch": 0.4841851106639839,
"grad_norm": 5.964752674102783,
"learning_rate": 5.4128967273616625e-05,
"loss": 10.8849,
"step": 188
},
{
"epoch": 0.49448692152917506,
"grad_norm": 6.385639190673828,
"learning_rate": 5.247918773366112e-05,
"loss": 10.6547,
"step": 192
},
{
"epoch": 0.49448692152917506,
"eval_loss": 10.066610336303711,
"eval_runtime": 120.5492,
"eval_samples_per_second": 2.032,
"eval_steps_per_second": 2.032,
"step": 192
},
{
"epoch": 0.5047887323943662,
"grad_norm": 12.043314933776855,
"learning_rate": 5.0826697238317935e-05,
"loss": 9.5631,
"step": 196
},
{
"epoch": 0.5150905432595574,
"grad_norm": 7.537539482116699,
"learning_rate": 4.917330276168208e-05,
"loss": 8.8163,
"step": 200
},
{
"epoch": 0.5150905432595574,
"eval_loss": 8.763765335083008,
"eval_runtime": 164.7981,
"eval_samples_per_second": 1.487,
"eval_steps_per_second": 1.487,
"step": 200
},
{
"epoch": 0.5253923541247485,
"grad_norm": 1.9154856204986572,
"learning_rate": 4.7520812266338885e-05,
"loss": 9.183,
"step": 204
},
{
"epoch": 0.5356941649899396,
"grad_norm": 4.3046746253967285,
"learning_rate": 4.5871032726383386e-05,
"loss": 9.5635,
"step": 208
},
{
"epoch": 0.5356941649899396,
"eval_loss": 8.515321731567383,
"eval_runtime": 121.1046,
"eval_samples_per_second": 2.023,
"eval_steps_per_second": 2.023,
"step": 208
},
{
"epoch": 0.5459959758551308,
"grad_norm": 3.1673686504364014,
"learning_rate": 4.4225768151520694e-05,
"loss": 8.7257,
"step": 212
},
{
"epoch": 0.5562977867203219,
"grad_norm": 3.7206709384918213,
"learning_rate": 4.2586817614407895e-05,
"loss": 8.7862,
"step": 216
},
{
"epoch": 0.5562977867203219,
"eval_loss": 9.218582153320312,
"eval_runtime": 165.2874,
"eval_samples_per_second": 1.482,
"eval_steps_per_second": 1.482,
"step": 216
},
{
"epoch": 0.566599597585513,
"grad_norm": 1.9827094078063965,
"learning_rate": 4.095597328339452e-05,
"loss": 9.8043,
"step": 220
},
{
"epoch": 0.5769014084507043,
"grad_norm": 3.734112501144409,
"learning_rate": 3.933501846281267e-05,
"loss": 10.2774,
"step": 224
},
{
"epoch": 0.5769014084507043,
"eval_loss": 10.383406639099121,
"eval_runtime": 119.8206,
"eval_samples_per_second": 2.045,
"eval_steps_per_second": 2.045,
"step": 224
},
{
"epoch": 0.5872032193158954,
"grad_norm": 8.66118049621582,
"learning_rate": 3.772572564296005e-05,
"loss": 10.1576,
"step": 228
},
{
"epoch": 0.5975050301810865,
"grad_norm": 4.829358100891113,
"learning_rate": 3.612985456190778e-05,
"loss": 9.7932,
"step": 232
},
{
"epoch": 0.5975050301810865,
"eval_loss": 9.797175407409668,
"eval_runtime": 120.075,
"eval_samples_per_second": 2.04,
"eval_steps_per_second": 2.04,
"step": 232
},
{
"epoch": 0.6078068410462777,
"grad_norm": 2.6055920124053955,
"learning_rate": 3.4549150281252636e-05,
"loss": 9.7817,
"step": 236
},
{
"epoch": 0.6181086519114688,
"grad_norm": 5.202971458435059,
"learning_rate": 3.298534127791785e-05,
"loss": 9.5421,
"step": 240
},
{
"epoch": 0.6181086519114688,
"eval_loss": 9.684460639953613,
"eval_runtime": 164.0692,
"eval_samples_per_second": 1.493,
"eval_steps_per_second": 1.493,
"step": 240
},
{
"epoch": 0.6284104627766599,
"grad_norm": 4.3098225593566895,
"learning_rate": 3.144013755408895e-05,
"loss": 9.574,
"step": 244
},
{
"epoch": 0.6387122736418511,
"grad_norm": 4.262834072113037,
"learning_rate": 2.991522876735154e-05,
"loss": 9.5401,
"step": 248
},
{
"epoch": 0.6387122736418511,
"eval_loss": 9.343782424926758,
"eval_runtime": 120.8651,
"eval_samples_per_second": 2.027,
"eval_steps_per_second": 2.027,
"step": 248
},
{
"epoch": 0.6490140845070422,
"grad_norm": 2.082047462463379,
"learning_rate": 2.8412282383075363e-05,
"loss": 9.9613,
"step": 252
},
{
"epoch": 0.6593158953722335,
"grad_norm": 0.8725143074989319,
"learning_rate": 2.693294185106562e-05,
"loss": 10.9001,
"step": 256
},
{
"epoch": 0.6593158953722335,
"eval_loss": 10.607041358947754,
"eval_runtime": 165.0213,
"eval_samples_per_second": 1.485,
"eval_steps_per_second": 1.485,
"step": 256
},
{
"epoch": 0.6696177062374246,
"grad_norm": 5.7337517738342285,
"learning_rate": 2.547882480847461e-05,
"loss": 10.5149,
"step": 260
},
{
"epoch": 0.6799195171026157,
"grad_norm": 2.850618839263916,
"learning_rate": 2.405152131093926e-05,
"loss": 9.959,
"step": 264
},
{
"epoch": 0.6799195171026157,
"eval_loss": 9.617180824279785,
"eval_runtime": 121.3426,
"eval_samples_per_second": 2.019,
"eval_steps_per_second": 2.019,
"step": 264
},
{
"epoch": 0.6902213279678069,
"grad_norm": 4.845343112945557,
"learning_rate": 2.2652592093878666e-05,
"loss": 9.2827,
"step": 268
},
{
"epoch": 0.700523138832998,
"grad_norm": 2.2851219177246094,
"learning_rate": 2.128356686585282e-05,
"loss": 9.5409,
"step": 272
},
{
"epoch": 0.700523138832998,
"eval_loss": 10.476217269897461,
"eval_runtime": 164.2642,
"eval_samples_per_second": 1.491,
"eval_steps_per_second": 1.491,
"step": 272
},
{
"epoch": 0.7108249496981891,
"grad_norm": 1.766412615776062,
"learning_rate": 1.9945942635848748e-05,
"loss": 10.9182,
"step": 276
},
{
"epoch": 0.7211267605633803,
"grad_norm": 1.4967565536499023,
"learning_rate": 1.8641182076323148e-05,
"loss": 10.8074,
"step": 280
},
{
"epoch": 0.7211267605633803,
"eval_loss": 10.487205505371094,
"eval_runtime": 119.2743,
"eval_samples_per_second": 2.054,
"eval_steps_per_second": 2.054,
"step": 280
},
{
"epoch": 0.7314285714285714,
"grad_norm": 6.666006088256836,
"learning_rate": 1.7370711923791567e-05,
"loss": 10.3232,
"step": 284
},
{
"epoch": 0.7417303822937625,
"grad_norm": 4.8097381591796875,
"learning_rate": 1.6135921418712956e-05,
"loss": 9.1645,
"step": 288
},
{
"epoch": 0.7417303822937625,
"eval_loss": 7.656736373901367,
"eval_runtime": 121.5628,
"eval_samples_per_second": 2.015,
"eval_steps_per_second": 2.015,
"step": 288
},
{
"epoch": 0.7520321931589538,
"grad_norm": 5.842891693115234,
"learning_rate": 1.4938160786375572e-05,
"loss": 7.4315,
"step": 292
},
{
"epoch": 0.7623340040241449,
"grad_norm": 4.397885322570801,
"learning_rate": 1.3778739760445552e-05,
"loss": 8.1072,
"step": 296
},
{
"epoch": 0.7623340040241449,
"eval_loss": 8.791058540344238,
"eval_runtime": 165.8273,
"eval_samples_per_second": 1.477,
"eval_steps_per_second": 1.477,
"step": 296
},
{
"epoch": 0.772635814889336,
"grad_norm": 2.3736143112182617,
"learning_rate": 1.2658926150792322e-05,
"loss": 9.0859,
"step": 300
},
{
"epoch": 0.7829376257545272,
"grad_norm": 3.1305336952209473,
"learning_rate": 1.157994445715706e-05,
"loss": 9.7069,
"step": 304
},
{
"epoch": 0.7829376257545272,
"eval_loss": 10.004293441772461,
"eval_runtime": 119.512,
"eval_samples_per_second": 2.05,
"eval_steps_per_second": 2.05,
"step": 304
},
{
"epoch": 0.7932394366197183,
"grad_norm": 1.8686987161636353,
"learning_rate": 1.0542974530180327e-05,
"loss": 10.0589,
"step": 308
},
{
"epoch": 0.8035412474849094,
"grad_norm": 2.3722739219665527,
"learning_rate": 9.549150281252633e-06,
"loss": 10.0752,
"step": 312
},
{
"epoch": 0.8035412474849094,
"eval_loss": 10.165539741516113,
"eval_runtime": 166.7318,
"eval_samples_per_second": 1.469,
"eval_steps_per_second": 1.469,
"step": 312
},
{
"epoch": 0.8138430583501006,
"grad_norm": 2.35557222366333,
"learning_rate": 8.599558442598998e-06,
"loss": 10.2222,
"step": 316
},
{
"epoch": 0.8241448692152917,
"grad_norm": 3.20621395111084,
"learning_rate": 7.695237378953223e-06,
"loss": 9.9734,
"step": 320
},
{
"epoch": 0.8241448692152917,
"eval_loss": 9.964385032653809,
"eval_runtime": 120.9633,
"eval_samples_per_second": 2.025,
"eval_steps_per_second": 2.025,
"step": 320
},
{
"epoch": 0.834446680080483,
"grad_norm": 4.180117130279541,
"learning_rate": 6.837175952121306e-06,
"loss": 10.0431,
"step": 324
},
{
"epoch": 0.8447484909456741,
"grad_norm": 3.4952783584594727,
"learning_rate": 6.026312439675552e-06,
"loss": 9.6722,
"step": 328
},
{
"epoch": 0.8447484909456741,
"eval_loss": 9.780319213867188,
"eval_runtime": 167.9654,
"eval_samples_per_second": 1.459,
"eval_steps_per_second": 1.459,
"step": 328
},
{
"epoch": 0.8550503018108652,
"grad_norm": 3.512146472930908,
"learning_rate": 5.263533508961827e-06,
"loss": 9.7029,
"step": 332
},
{
"epoch": 0.8653521126760564,
"grad_norm": 2.8218512535095215,
"learning_rate": 4.549673247541875e-06,
"loss": 9.8279,
"step": 336
},
{
"epoch": 0.8653521126760564,
"eval_loss": 9.582645416259766,
"eval_runtime": 120.4821,
"eval_samples_per_second": 2.033,
"eval_steps_per_second": 2.033,
"step": 336
},
{
"epoch": 0.8756539235412475,
"grad_norm": 7.174349784851074,
"learning_rate": 3.885512251130763e-06,
"loss": 9.5723,
"step": 340
},
{
"epoch": 0.8859557344064386,
"grad_norm": 2.031242847442627,
"learning_rate": 3.271776770026963e-06,
"loss": 9.6714,
"step": 344
},
{
"epoch": 0.8859557344064386,
"eval_loss": 9.553346633911133,
"eval_runtime": 124.1887,
"eval_samples_per_second": 1.973,
"eval_steps_per_second": 1.973,
"step": 344
},
{
"epoch": 0.8962575452716298,
"grad_norm": 12.41804313659668,
"learning_rate": 2.7091379149682685e-06,
"loss": 9.6239,
"step": 348
},
{
"epoch": 0.9065593561368209,
"grad_norm": 5.308027744293213,
"learning_rate": 2.1982109232821178e-06,
"loss": 9.655,
"step": 352
},
{
"epoch": 0.9065593561368209,
"eval_loss": 9.633858680725098,
"eval_runtime": 169.9501,
"eval_samples_per_second": 1.442,
"eval_steps_per_second": 1.442,
"step": 352
},
{
"epoch": 0.916861167002012,
"grad_norm": 3.773082971572876,
"learning_rate": 1.7395544861325718e-06,
"loss": 9.7297,
"step": 356
},
{
"epoch": 0.9271629778672033,
"grad_norm": 3.001129150390625,
"learning_rate": 1.333670137599713e-06,
"loss": 9.7184,
"step": 360
},
{
"epoch": 0.9271629778672033,
"eval_loss": 9.7650728225708,
"eval_runtime": 119.9288,
"eval_samples_per_second": 2.043,
"eval_steps_per_second": 2.043,
"step": 360
},
{
"epoch": 0.9374647887323944,
"grad_norm": 4.141623497009277,
"learning_rate": 9.810017062595322e-07,
"loss": 9.9158,
"step": 364
},
{
"epoch": 0.9477665995975855,
"grad_norm": 2.350005626678467,
"learning_rate": 6.819348298638839e-07,
"loss": 9.6142,
"step": 368
},
{
"epoch": 0.9477665995975855,
"eval_loss": 9.853599548339844,
"eval_runtime": 167.3149,
"eval_samples_per_second": 1.464,
"eval_steps_per_second": 1.464,
"step": 368
},
{
"epoch": 0.9580684104627767,
"grad_norm": 3.0426127910614014,
"learning_rate": 4.367965336512403e-07,
"loss": 9.7248,
"step": 372
},
{
"epoch": 0.9683702213279678,
"grad_norm": 3.860062599182129,
"learning_rate": 2.458548727494292e-07,
"loss": 9.9249,
"step": 376
},
{
"epoch": 0.9683702213279678,
"eval_loss": 9.89064884185791,
"eval_runtime": 121.6599,
"eval_samples_per_second": 2.014,
"eval_steps_per_second": 2.014,
"step": 376
},
{
"epoch": 0.9786720321931589,
"grad_norm": 1.843009114265442,
"learning_rate": 1.0931863906127327e-07,
"loss": 9.7664,
"step": 380
},
{
"epoch": 0.9889738430583501,
"grad_norm": 5.112157344818115,
"learning_rate": 2.7337132953697554e-08,
"loss": 9.8654,
"step": 384
},
{
"epoch": 0.9889738430583501,
"eval_loss": 9.901961326599121,
"eval_runtime": 166.3605,
"eval_samples_per_second": 1.473,
"eval_steps_per_second": 1.473,
"step": 384
}
],
"logging_steps": 4,
"max_steps": 388,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 8,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.5203300693467136e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}