imdatta0's picture
End of training
8b1b653 verified
{
"best_metric": 2.9037797451019287,
"best_model_checkpoint": "/home/datta0/models/lora_final/gemma-2-9b_pct_ortho_r32/checkpoint-8",
"epoch": 0.9889738430583501,
"eval_steps": 8,
"global_step": 384,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002575452716297787,
"grad_norm": 3.382777214050293,
"learning_rate": 1.25e-05,
"loss": 2.1767,
"step": 1
},
{
"epoch": 0.010301810865191148,
"grad_norm": 3.251157760620117,
"learning_rate": 5e-05,
"loss": 2.2653,
"step": 4
},
{
"epoch": 0.020603621730382295,
"grad_norm": 3.237678050994873,
"learning_rate": 0.0001,
"loss": 2.3352,
"step": 8
},
{
"epoch": 0.020603621730382295,
"eval_loss": 2.9037797451019287,
"eval_runtime": 650.9667,
"eval_samples_per_second": 0.376,
"eval_steps_per_second": 0.376,
"step": 8
},
{
"epoch": 0.03090543259557344,
"grad_norm": 5.404860019683838,
"learning_rate": 9.997266286704631e-05,
"loss": 5.7565,
"step": 12
},
{
"epoch": 0.04120724346076459,
"grad_norm": 0.5222948789596558,
"learning_rate": 9.989068136093873e-05,
"loss": 11.3417,
"step": 16
},
{
"epoch": 0.04120724346076459,
"eval_loss": 11.908318519592285,
"eval_runtime": 615.5742,
"eval_samples_per_second": 0.398,
"eval_steps_per_second": 0.398,
"step": 16
},
{
"epoch": 0.05150905432595573,
"grad_norm": 0.5839648842811584,
"learning_rate": 9.975414512725057e-05,
"loss": 11.9397,
"step": 20
},
{
"epoch": 0.06181086519114688,
"grad_norm": 0.2988390028476715,
"learning_rate": 9.956320346634876e-05,
"loss": 11.9918,
"step": 24
},
{
"epoch": 0.06181086519114688,
"eval_loss": 11.977351188659668,
"eval_runtime": 124.2673,
"eval_samples_per_second": 1.972,
"eval_steps_per_second": 1.972,
"step": 24
},
{
"epoch": 0.07211267605633803,
"grad_norm": 0.4247038662433624,
"learning_rate": 9.931806517013612e-05,
"loss": 11.9842,
"step": 28
},
{
"epoch": 0.08241448692152918,
"grad_norm": 0.1009724959731102,
"learning_rate": 9.901899829374047e-05,
"loss": 11.9549,
"step": 32
},
{
"epoch": 0.08241448692152918,
"eval_loss": 11.967530250549316,
"eval_runtime": 155.0418,
"eval_samples_per_second": 1.58,
"eval_steps_per_second": 1.58,
"step": 32
},
{
"epoch": 0.09271629778672032,
"grad_norm": 0.15956225991249084,
"learning_rate": 9.86663298624003e-05,
"loss": 11.9835,
"step": 36
},
{
"epoch": 0.10301810865191147,
"grad_norm": 0.08006568253040314,
"learning_rate": 9.826044551386744e-05,
"loss": 11.974,
"step": 40
},
{
"epoch": 0.10301810865191147,
"eval_loss": 11.973628997802734,
"eval_runtime": 119.2748,
"eval_samples_per_second": 2.054,
"eval_steps_per_second": 2.054,
"step": 40
},
{
"epoch": 0.11331991951710262,
"grad_norm": 0.0759950578212738,
"learning_rate": 9.780178907671789e-05,
"loss": 11.971,
"step": 44
},
{
"epoch": 0.12362173038229377,
"grad_norm": 0.06046704575419426,
"learning_rate": 9.729086208503174e-05,
"loss": 11.9403,
"step": 48
},
{
"epoch": 0.12362173038229377,
"eval_loss": 11.946824073791504,
"eval_runtime": 176.6154,
"eval_samples_per_second": 1.387,
"eval_steps_per_second": 1.387,
"step": 48
},
{
"epoch": 0.1339235412474849,
"grad_norm": 0.07782606780529022,
"learning_rate": 9.672822322997305e-05,
"loss": 11.9184,
"step": 52
},
{
"epoch": 0.14422535211267606,
"grad_norm": 0.04233577847480774,
"learning_rate": 9.611448774886924e-05,
"loss": 11.9321,
"step": 56
},
{
"epoch": 0.14422535211267606,
"eval_loss": 11.880873680114746,
"eval_runtime": 115.3733,
"eval_samples_per_second": 2.124,
"eval_steps_per_second": 2.124,
"step": 56
},
{
"epoch": 0.1545271629778672,
"grad_norm": 0.042002614587545395,
"learning_rate": 9.545032675245813e-05,
"loss": 11.8934,
"step": 60
},
{
"epoch": 0.16482897384305836,
"grad_norm": 0.047231078147888184,
"learning_rate": 9.473646649103818e-05,
"loss": 11.876,
"step": 64
},
{
"epoch": 0.16482897384305836,
"eval_loss": 11.821806907653809,
"eval_runtime": 122.0477,
"eval_samples_per_second": 2.007,
"eval_steps_per_second": 2.007,
"step": 64
},
{
"epoch": 0.1751307847082495,
"grad_norm": 0.05491908639669418,
"learning_rate": 9.397368756032445e-05,
"loss": 11.789,
"step": 68
},
{
"epoch": 0.18543259557344063,
"grad_norm": 0.05340013653039932,
"learning_rate": 9.316282404787871e-05,
"loss": 11.7886,
"step": 72
},
{
"epoch": 0.18543259557344063,
"eval_loss": 11.734496116638184,
"eval_runtime": 177.6856,
"eval_samples_per_second": 1.379,
"eval_steps_per_second": 1.379,
"step": 72
},
{
"epoch": 0.1957344064386318,
"grad_norm": 0.04357607662677765,
"learning_rate": 9.230476262104677e-05,
"loss": 11.7139,
"step": 76
},
{
"epoch": 0.20603621730382293,
"grad_norm": 0.05346447974443436,
"learning_rate": 9.140044155740101e-05,
"loss": 11.6471,
"step": 80
},
{
"epoch": 0.20603621730382293,
"eval_loss": 11.623627662658691,
"eval_runtime": 116.4544,
"eval_samples_per_second": 2.104,
"eval_steps_per_second": 2.104,
"step": 80
},
{
"epoch": 0.2163380281690141,
"grad_norm": 0.071262426674366,
"learning_rate": 9.045084971874738e-05,
"loss": 11.6208,
"step": 84
},
{
"epoch": 0.22663983903420523,
"grad_norm": 0.04146931692957878,
"learning_rate": 8.945702546981969e-05,
"loss": 11.5982,
"step": 88
},
{
"epoch": 0.22663983903420523,
"eval_loss": 11.371801376342773,
"eval_runtime": 163.8682,
"eval_samples_per_second": 1.495,
"eval_steps_per_second": 1.495,
"step": 88
},
{
"epoch": 0.23694164989939637,
"grad_norm": 0.04036790132522583,
"learning_rate": 8.842005554284296e-05,
"loss": 11.6333,
"step": 92
},
{
"epoch": 0.24724346076458753,
"grad_norm": 0.03238769620656967,
"learning_rate": 8.73410738492077e-05,
"loss": 11.7088,
"step": 96
},
{
"epoch": 0.24724346076458753,
"eval_loss": 11.679157257080078,
"eval_runtime": 128.5572,
"eval_samples_per_second": 1.906,
"eval_steps_per_second": 1.906,
"step": 96
},
{
"epoch": 0.2575452716297787,
"grad_norm": 0.06817040592432022,
"learning_rate": 8.622126023955446e-05,
"loss": 11.7336,
"step": 100
},
{
"epoch": 0.2678470824949698,
"grad_norm": 0.042927809059619904,
"learning_rate": 8.506183921362443e-05,
"loss": 11.7296,
"step": 104
},
{
"epoch": 0.2678470824949698,
"eval_loss": 11.688301086425781,
"eval_runtime": 164.6581,
"eval_samples_per_second": 1.488,
"eval_steps_per_second": 1.488,
"step": 104
},
{
"epoch": 0.27814889336016096,
"grad_norm": 0.057764846831560135,
"learning_rate": 8.386407858128706e-05,
"loss": 11.681,
"step": 108
},
{
"epoch": 0.28845070422535213,
"grad_norm": 0.04680996388196945,
"learning_rate": 8.262928807620843e-05,
"loss": 11.6508,
"step": 112
},
{
"epoch": 0.28845070422535213,
"eval_loss": 11.441954612731934,
"eval_runtime": 400.4322,
"eval_samples_per_second": 0.612,
"eval_steps_per_second": 0.612,
"step": 112
},
{
"epoch": 0.29875251509054324,
"grad_norm": 0.0837872251868248,
"learning_rate": 8.135881792367686e-05,
"loss": 11.4868,
"step": 116
},
{
"epoch": 0.3090543259557344,
"grad_norm": 1.4256629943847656,
"learning_rate": 8.005405736415126e-05,
"loss": 10.7655,
"step": 120
},
{
"epoch": 0.3090543259557344,
"eval_loss": 8.817360877990723,
"eval_runtime": 405.6339,
"eval_samples_per_second": 0.604,
"eval_steps_per_second": 0.604,
"step": 120
},
{
"epoch": 0.31935613682092556,
"grad_norm": 0.6819819211959839,
"learning_rate": 7.871643313414718e-05,
"loss": 9.2146,
"step": 124
},
{
"epoch": 0.3296579476861167,
"grad_norm": 1.2133327722549438,
"learning_rate": 7.734740790612136e-05,
"loss": 8.5075,
"step": 128
},
{
"epoch": 0.3296579476861167,
"eval_loss": 9.056794166564941,
"eval_runtime": 582.8769,
"eval_samples_per_second": 0.42,
"eval_steps_per_second": 0.42,
"step": 128
},
{
"epoch": 0.33995975855130783,
"grad_norm": 1.0759503841400146,
"learning_rate": 7.594847868906076e-05,
"loss": 8.4476,
"step": 132
},
{
"epoch": 0.350261569416499,
"grad_norm": 0.6876762509346008,
"learning_rate": 7.452117519152542e-05,
"loss": 8.912,
"step": 136
},
{
"epoch": 0.350261569416499,
"eval_loss": 9.416214942932129,
"eval_runtime": 404.0983,
"eval_samples_per_second": 0.606,
"eval_steps_per_second": 0.606,
"step": 136
},
{
"epoch": 0.36056338028169016,
"grad_norm": 0.4839273691177368,
"learning_rate": 7.30670581489344e-05,
"loss": 10.0255,
"step": 140
},
{
"epoch": 0.37086519114688127,
"grad_norm": 0.5928329229354858,
"learning_rate": 7.158771761692464e-05,
"loss": 11.0052,
"step": 144
},
{
"epoch": 0.37086519114688127,
"eval_loss": 10.347317695617676,
"eval_runtime": 589.7264,
"eval_samples_per_second": 0.415,
"eval_steps_per_second": 0.415,
"step": 144
},
{
"epoch": 0.38116700201207243,
"grad_norm": 0.6406193971633911,
"learning_rate": 7.008477123264848e-05,
"loss": 10.2362,
"step": 148
},
{
"epoch": 0.3914688128772636,
"grad_norm": 0.6814000606536865,
"learning_rate": 6.855986244591104e-05,
"loss": 9.103,
"step": 152
},
{
"epoch": 0.3914688128772636,
"eval_loss": 9.645101547241211,
"eval_runtime": 412.3792,
"eval_samples_per_second": 0.594,
"eval_steps_per_second": 0.594,
"step": 152
},
{
"epoch": 0.4017706237424547,
"grad_norm": 1.015820860862732,
"learning_rate": 6.701465872208216e-05,
"loss": 9.8991,
"step": 156
},
{
"epoch": 0.41207243460764587,
"grad_norm": 1.2941161394119263,
"learning_rate": 6.545084971874738e-05,
"loss": 8.9631,
"step": 160
},
{
"epoch": 0.41207243460764587,
"eval_loss": 8.64919662475586,
"eval_runtime": 589.6714,
"eval_samples_per_second": 0.415,
"eval_steps_per_second": 0.415,
"step": 160
},
{
"epoch": 0.42237424547283703,
"grad_norm": 0.9186323881149292,
"learning_rate": 6.387014543809223e-05,
"loss": 9.4897,
"step": 164
},
{
"epoch": 0.4326760563380282,
"grad_norm": 1.1817784309387207,
"learning_rate": 6.227427435703997e-05,
"loss": 9.9634,
"step": 168
},
{
"epoch": 0.4326760563380282,
"eval_loss": 9.44008731842041,
"eval_runtime": 410.3179,
"eval_samples_per_second": 0.597,
"eval_steps_per_second": 0.597,
"step": 168
},
{
"epoch": 0.4429778672032193,
"grad_norm": 0.6623127460479736,
"learning_rate": 6.066498153718735e-05,
"loss": 9.3577,
"step": 172
},
{
"epoch": 0.45327967806841046,
"grad_norm": 0.612003743648529,
"learning_rate": 5.90440267166055e-05,
"loss": 9.814,
"step": 176
},
{
"epoch": 0.45327967806841046,
"eval_loss": 10.474808692932129,
"eval_runtime": 407.6119,
"eval_samples_per_second": 0.601,
"eval_steps_per_second": 0.601,
"step": 176
},
{
"epoch": 0.4635814889336016,
"grad_norm": 1.1024558544158936,
"learning_rate": 5.74131823855921e-05,
"loss": 10.5504,
"step": 180
},
{
"epoch": 0.47388329979879273,
"grad_norm": 0.6612622737884521,
"learning_rate": 5.577423184847932e-05,
"loss": 10.507,
"step": 184
},
{
"epoch": 0.47388329979879273,
"eval_loss": 10.191047668457031,
"eval_runtime": 619.1618,
"eval_samples_per_second": 0.396,
"eval_steps_per_second": 0.396,
"step": 184
},
{
"epoch": 0.4841851106639839,
"grad_norm": 0.6979438662528992,
"learning_rate": 5.4128967273616625e-05,
"loss": 10.0648,
"step": 188
},
{
"epoch": 0.49448692152917506,
"grad_norm": 1.1083264350891113,
"learning_rate": 5.247918773366112e-05,
"loss": 9.6613,
"step": 192
},
{
"epoch": 0.49448692152917506,
"eval_loss": 9.220105171203613,
"eval_runtime": 407.7177,
"eval_samples_per_second": 0.601,
"eval_steps_per_second": 0.601,
"step": 192
},
{
"epoch": 0.5047887323943662,
"grad_norm": 1.5402456521987915,
"learning_rate": 5.0826697238317935e-05,
"loss": 9.0613,
"step": 196
},
{
"epoch": 0.5150905432595574,
"grad_norm": 0.9043979644775391,
"learning_rate": 4.917330276168208e-05,
"loss": 9.0448,
"step": 200
},
{
"epoch": 0.5150905432595574,
"eval_loss": 10.391300201416016,
"eval_runtime": 612.3848,
"eval_samples_per_second": 0.4,
"eval_steps_per_second": 0.4,
"step": 200
},
{
"epoch": 0.5253923541247485,
"grad_norm": 1.0502400398254395,
"learning_rate": 4.7520812266338885e-05,
"loss": 10.3086,
"step": 204
},
{
"epoch": 0.5356941649899396,
"grad_norm": 1.2477459907531738,
"learning_rate": 4.5871032726383386e-05,
"loss": 9.4984,
"step": 208
},
{
"epoch": 0.5356941649899396,
"eval_loss": 8.543373107910156,
"eval_runtime": 401.1613,
"eval_samples_per_second": 0.611,
"eval_steps_per_second": 0.611,
"step": 208
},
{
"epoch": 0.5459959758551308,
"grad_norm": 1.947261929512024,
"learning_rate": 4.4225768151520694e-05,
"loss": 7.6535,
"step": 212
},
{
"epoch": 0.5562977867203219,
"grad_norm": 1.2403180599212646,
"learning_rate": 4.2586817614407895e-05,
"loss": 7.4393,
"step": 216
},
{
"epoch": 0.5562977867203219,
"eval_loss": 8.435011863708496,
"eval_runtime": 596.6953,
"eval_samples_per_second": 0.411,
"eval_steps_per_second": 0.411,
"step": 216
},
{
"epoch": 0.566599597585513,
"grad_norm": 0.40848633646965027,
"learning_rate": 4.095597328339452e-05,
"loss": 9.1596,
"step": 220
},
{
"epoch": 0.5769014084507043,
"grad_norm": 0.33752307295799255,
"learning_rate": 3.933501846281267e-05,
"loss": 10.0883,
"step": 224
},
{
"epoch": 0.5769014084507043,
"eval_loss": 10.258369445800781,
"eval_runtime": 420.9662,
"eval_samples_per_second": 0.582,
"eval_steps_per_second": 0.582,
"step": 224
},
{
"epoch": 0.5872032193158954,
"grad_norm": 1.32246732711792,
"learning_rate": 3.772572564296005e-05,
"loss": 10.3805,
"step": 228
},
{
"epoch": 0.5975050301810865,
"grad_norm": 0.46579450368881226,
"learning_rate": 3.612985456190778e-05,
"loss": 10.7162,
"step": 232
},
{
"epoch": 0.5975050301810865,
"eval_loss": 10.689937591552734,
"eval_runtime": 393.2211,
"eval_samples_per_second": 0.623,
"eval_steps_per_second": 0.623,
"step": 232
},
{
"epoch": 0.6078068410462777,
"grad_norm": 0.3314819037914276,
"learning_rate": 3.4549150281252636e-05,
"loss": 10.7026,
"step": 236
},
{
"epoch": 0.6181086519114688,
"grad_norm": 0.7479149699211121,
"learning_rate": 3.298534127791785e-05,
"loss": 10.4785,
"step": 240
},
{
"epoch": 0.6181086519114688,
"eval_loss": 10.441695213317871,
"eval_runtime": 587.9582,
"eval_samples_per_second": 0.417,
"eval_steps_per_second": 0.417,
"step": 240
},
{
"epoch": 0.6284104627766599,
"grad_norm": 0.45178794860839844,
"learning_rate": 3.144013755408895e-05,
"loss": 10.4069,
"step": 244
},
{
"epoch": 0.6387122736418511,
"grad_norm": 0.5428746938705444,
"learning_rate": 2.991522876735154e-05,
"loss": 10.023,
"step": 248
},
{
"epoch": 0.6387122736418511,
"eval_loss": 9.624427795410156,
"eval_runtime": 394.3213,
"eval_samples_per_second": 0.621,
"eval_steps_per_second": 0.621,
"step": 248
},
{
"epoch": 0.6490140845070422,
"grad_norm": 1.6642308235168457,
"learning_rate": 2.8412282383075363e-05,
"loss": 9.6059,
"step": 252
},
{
"epoch": 0.6593158953722335,
"grad_norm": 0.7868029475212097,
"learning_rate": 2.693294185106562e-05,
"loss": 9.2272,
"step": 256
},
{
"epoch": 0.6593158953722335,
"eval_loss": 8.930831909179688,
"eval_runtime": 559.2753,
"eval_samples_per_second": 0.438,
"eval_steps_per_second": 0.438,
"step": 256
},
{
"epoch": 0.6696177062374246,
"grad_norm": 0.8221493363380432,
"learning_rate": 2.547882480847461e-05,
"loss": 9.15,
"step": 260
},
{
"epoch": 0.6799195171026157,
"grad_norm": 0.8017754554748535,
"learning_rate": 2.405152131093926e-05,
"loss": 9.1518,
"step": 264
},
{
"epoch": 0.6799195171026157,
"eval_loss": 9.226869583129883,
"eval_runtime": 395.8334,
"eval_samples_per_second": 0.619,
"eval_steps_per_second": 0.619,
"step": 264
},
{
"epoch": 0.6902213279678069,
"grad_norm": 0.8554782867431641,
"learning_rate": 2.2652592093878666e-05,
"loss": 9.1271,
"step": 268
},
{
"epoch": 0.700523138832998,
"grad_norm": 0.8649684190750122,
"learning_rate": 2.128356686585282e-05,
"loss": 9.1733,
"step": 272
},
{
"epoch": 0.700523138832998,
"eval_loss": 9.243361473083496,
"eval_runtime": 580.204,
"eval_samples_per_second": 0.422,
"eval_steps_per_second": 0.422,
"step": 272
},
{
"epoch": 0.7108249496981891,
"grad_norm": 0.5342640280723572,
"learning_rate": 1.9945942635848748e-05,
"loss": 9.5578,
"step": 276
},
{
"epoch": 0.7211267605633803,
"grad_norm": 0.5874167680740356,
"learning_rate": 1.8641182076323148e-05,
"loss": 9.3347,
"step": 280
},
{
"epoch": 0.7211267605633803,
"eval_loss": 9.283143043518066,
"eval_runtime": 401.3056,
"eval_samples_per_second": 0.611,
"eval_steps_per_second": 0.611,
"step": 280
},
{
"epoch": 0.7314285714285714,
"grad_norm": 0.9126715064048767,
"learning_rate": 1.7370711923791567e-05,
"loss": 9.4285,
"step": 284
},
{
"epoch": 0.7417303822937625,
"grad_norm": 1.4541490077972412,
"learning_rate": 1.6135921418712956e-05,
"loss": 9.468,
"step": 288
},
{
"epoch": 0.7417303822937625,
"eval_loss": 9.104644775390625,
"eval_runtime": 406.3059,
"eval_samples_per_second": 0.603,
"eval_steps_per_second": 0.603,
"step": 288
},
{
"epoch": 0.7520321931589538,
"grad_norm": 1.042210578918457,
"learning_rate": 1.4938160786375572e-05,
"loss": 8.9945,
"step": 292
},
{
"epoch": 0.7623340040241449,
"grad_norm": 1.5759915113449097,
"learning_rate": 1.3778739760445552e-05,
"loss": 8.9402,
"step": 296
},
{
"epoch": 0.7623340040241449,
"eval_loss": 9.010215759277344,
"eval_runtime": 586.8102,
"eval_samples_per_second": 0.418,
"eval_steps_per_second": 0.418,
"step": 296
},
{
"epoch": 0.772635814889336,
"grad_norm": 1.4896786212921143,
"learning_rate": 1.2658926150792322e-05,
"loss": 9.0846,
"step": 300
},
{
"epoch": 0.7829376257545272,
"grad_norm": 1.1868205070495605,
"learning_rate": 1.157994445715706e-05,
"loss": 9.1051,
"step": 304
},
{
"epoch": 0.7829376257545272,
"eval_loss": 9.261675834655762,
"eval_runtime": 416.6181,
"eval_samples_per_second": 0.588,
"eval_steps_per_second": 0.588,
"step": 304
},
{
"epoch": 0.7932394366197183,
"grad_norm": 1.3363028764724731,
"learning_rate": 1.0542974530180327e-05,
"loss": 9.2574,
"step": 308
},
{
"epoch": 0.8035412474849094,
"grad_norm": 1.2219579219818115,
"learning_rate": 9.549150281252633e-06,
"loss": 9.2223,
"step": 312
},
{
"epoch": 0.8035412474849094,
"eval_loss": 9.392084121704102,
"eval_runtime": 602.8211,
"eval_samples_per_second": 0.406,
"eval_steps_per_second": 0.406,
"step": 312
},
{
"epoch": 0.8138430583501006,
"grad_norm": 0.9515023827552795,
"learning_rate": 8.599558442598998e-06,
"loss": 9.4958,
"step": 316
},
{
"epoch": 0.8241448692152917,
"grad_norm": 0.989476203918457,
"learning_rate": 7.695237378953223e-06,
"loss": 9.3359,
"step": 320
},
{
"epoch": 0.8241448692152917,
"eval_loss": 9.327733993530273,
"eval_runtime": 392.6991,
"eval_samples_per_second": 0.624,
"eval_steps_per_second": 0.624,
"step": 320
},
{
"epoch": 0.834446680080483,
"grad_norm": 1.3227804899215698,
"learning_rate": 6.837175952121306e-06,
"loss": 9.4079,
"step": 324
},
{
"epoch": 0.8447484909456741,
"grad_norm": 0.998881995677948,
"learning_rate": 6.026312439675552e-06,
"loss": 9.1508,
"step": 328
},
{
"epoch": 0.8447484909456741,
"eval_loss": 9.275510787963867,
"eval_runtime": 607.3633,
"eval_samples_per_second": 0.403,
"eval_steps_per_second": 0.403,
"step": 328
},
{
"epoch": 0.8550503018108652,
"grad_norm": 0.7702223062515259,
"learning_rate": 5.263533508961827e-06,
"loss": 9.2486,
"step": 332
},
{
"epoch": 0.8653521126760564,
"grad_norm": 0.594261646270752,
"learning_rate": 4.549673247541875e-06,
"loss": 9.5364,
"step": 336
},
{
"epoch": 0.8653521126760564,
"eval_loss": 9.303129196166992,
"eval_runtime": 403.5636,
"eval_samples_per_second": 0.607,
"eval_steps_per_second": 0.607,
"step": 336
},
{
"epoch": 0.8756539235412475,
"grad_norm": 1.039624810218811,
"learning_rate": 3.885512251130763e-06,
"loss": 9.3301,
"step": 340
},
{
"epoch": 0.8859557344064386,
"grad_norm": 0.9027475118637085,
"learning_rate": 3.271776770026963e-06,
"loss": 9.4429,
"step": 344
},
{
"epoch": 0.8859557344064386,
"eval_loss": 9.322930335998535,
"eval_runtime": 357.9628,
"eval_samples_per_second": 0.684,
"eval_steps_per_second": 0.684,
"step": 344
},
{
"epoch": 0.8962575452716298,
"grad_norm": 0.9633539319038391,
"learning_rate": 2.7091379149682685e-06,
"loss": 9.3991,
"step": 348
},
{
"epoch": 0.9065593561368209,
"grad_norm": 1.729811191558838,
"learning_rate": 2.1982109232821178e-06,
"loss": 9.3958,
"step": 352
},
{
"epoch": 0.9065593561368209,
"eval_loss": 9.340832710266113,
"eval_runtime": 172.4755,
"eval_samples_per_second": 1.42,
"eval_steps_per_second": 1.42,
"step": 352
},
{
"epoch": 0.916861167002012,
"grad_norm": 0.8728967905044556,
"learning_rate": 1.7395544861325718e-06,
"loss": 9.4181,
"step": 356
},
{
"epoch": 0.9271629778672033,
"grad_norm": 0.8314731121063232,
"learning_rate": 1.333670137599713e-06,
"loss": 9.3778,
"step": 360
},
{
"epoch": 0.9271629778672033,
"eval_loss": 9.357659339904785,
"eval_runtime": 122.9327,
"eval_samples_per_second": 1.993,
"eval_steps_per_second": 1.993,
"step": 360
},
{
"epoch": 0.9374647887323944,
"grad_norm": 1.05840003490448,
"learning_rate": 9.810017062595322e-07,
"loss": 9.5264,
"step": 364
},
{
"epoch": 0.9477665995975855,
"grad_norm": 1.1095401048660278,
"learning_rate": 6.819348298638839e-07,
"loss": 9.1859,
"step": 368
},
{
"epoch": 0.9477665995975855,
"eval_loss": 9.360688209533691,
"eval_runtime": 162.1198,
"eval_samples_per_second": 1.511,
"eval_steps_per_second": 1.511,
"step": 368
},
{
"epoch": 0.9580684104627767,
"grad_norm": 0.8359835743904114,
"learning_rate": 4.367965336512403e-07,
"loss": 9.2367,
"step": 372
},
{
"epoch": 0.9683702213279678,
"grad_norm": 1.236433506011963,
"learning_rate": 2.458548727494292e-07,
"loss": 9.4256,
"step": 376
},
{
"epoch": 0.9683702213279678,
"eval_loss": 9.362174987792969,
"eval_runtime": 122.9652,
"eval_samples_per_second": 1.992,
"eval_steps_per_second": 1.992,
"step": 376
},
{
"epoch": 0.9786720321931589,
"grad_norm": 0.7953875660896301,
"learning_rate": 1.0931863906127327e-07,
"loss": 9.2804,
"step": 380
},
{
"epoch": 0.9889738430583501,
"grad_norm": 1.0977773666381836,
"learning_rate": 2.7337132953697554e-08,
"loss": 9.3454,
"step": 384
},
{
"epoch": 0.9889738430583501,
"eval_loss": 9.3620023727417,
"eval_runtime": 167.8824,
"eval_samples_per_second": 1.459,
"eval_steps_per_second": 1.459,
"step": 384
}
],
"logging_steps": 4,
"max_steps": 388,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 8,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.5203300693467136e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}