prxy5605's picture
Training in progress, step 200, checkpoint
1e9dde8 verified
{
"best_metric": 7.685921669006348,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.011317178061650328,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 5.6585890308251636e-05,
"grad_norm": 1.3464780455018496e+16,
"learning_rate": 1e-05,
"loss": 7.5297,
"step": 1
},
{
"epoch": 5.6585890308251636e-05,
"eval_loss": 7.660423278808594,
"eval_runtime": 1422.8621,
"eval_samples_per_second": 20.918,
"eval_steps_per_second": 5.23,
"step": 1
},
{
"epoch": 0.00011317178061650327,
"grad_norm": 6189794983936.0,
"learning_rate": 2e-05,
"loss": 7.5521,
"step": 2
},
{
"epoch": 0.00016975767092475492,
"grad_norm": 52357701632.0,
"learning_rate": 3e-05,
"loss": 7.8869,
"step": 3
},
{
"epoch": 0.00022634356123300655,
"grad_norm": 54719025152.0,
"learning_rate": 4e-05,
"loss": 7.6219,
"step": 4
},
{
"epoch": 0.0002829294515412582,
"grad_norm": 1395876036608.0,
"learning_rate": 5e-05,
"loss": 7.4413,
"step": 5
},
{
"epoch": 0.00033951534184950983,
"grad_norm": 3793536417792.0,
"learning_rate": 6e-05,
"loss": 7.8253,
"step": 6
},
{
"epoch": 0.0003961012321577615,
"grad_norm": 3858545508352.0,
"learning_rate": 7e-05,
"loss": 7.625,
"step": 7
},
{
"epoch": 0.0004526871224660131,
"grad_norm": 226193014784.0,
"learning_rate": 8e-05,
"loss": 7.7795,
"step": 8
},
{
"epoch": 0.0005092730127742647,
"grad_norm": 30078451712.0,
"learning_rate": 9e-05,
"loss": 7.949,
"step": 9
},
{
"epoch": 0.0005658589030825163,
"grad_norm": 7235769991168.0,
"learning_rate": 0.0001,
"loss": 7.8256,
"step": 10
},
{
"epoch": 0.000622444793390768,
"grad_norm": 182577790976.0,
"learning_rate": 9.999316524962345e-05,
"loss": 7.8462,
"step": 11
},
{
"epoch": 0.0006790306836990197,
"grad_norm": 1385313075200.0,
"learning_rate": 9.997266286704631e-05,
"loss": 7.9559,
"step": 12
},
{
"epoch": 0.0007356165740072713,
"grad_norm": 11366069248.0,
"learning_rate": 9.993849845741524e-05,
"loss": 7.4311,
"step": 13
},
{
"epoch": 0.000792202464315523,
"grad_norm": 28725604352.0,
"learning_rate": 9.989068136093873e-05,
"loss": 7.409,
"step": 14
},
{
"epoch": 0.0008487883546237745,
"grad_norm": 41975459840.0,
"learning_rate": 9.98292246503335e-05,
"loss": 7.6129,
"step": 15
},
{
"epoch": 0.0009053742449320262,
"grad_norm": 927466688.0,
"learning_rate": 9.975414512725057e-05,
"loss": 7.2984,
"step": 16
},
{
"epoch": 0.0009619601352402778,
"grad_norm": 10077430784.0,
"learning_rate": 9.966546331768191e-05,
"loss": 7.6305,
"step": 17
},
{
"epoch": 0.0010185460255485294,
"grad_norm": 141515767808.0,
"learning_rate": 9.956320346634876e-05,
"loss": 7.4938,
"step": 18
},
{
"epoch": 0.001075131915856781,
"grad_norm": 439352197120.0,
"learning_rate": 9.944739353007344e-05,
"loss": 7.6124,
"step": 19
},
{
"epoch": 0.0011317178061650327,
"grad_norm": 52842606592.0,
"learning_rate": 9.931806517013612e-05,
"loss": 7.873,
"step": 20
},
{
"epoch": 0.0011883036964732844,
"grad_norm": 155040694272.0,
"learning_rate": 9.917525374361912e-05,
"loss": 7.6531,
"step": 21
},
{
"epoch": 0.001244889586781536,
"grad_norm": 4364636160.0,
"learning_rate": 9.901899829374047e-05,
"loss": 7.869,
"step": 22
},
{
"epoch": 0.0013014754770897877,
"grad_norm": 42571476992.0,
"learning_rate": 9.884934153917997e-05,
"loss": 7.6311,
"step": 23
},
{
"epoch": 0.0013580613673980393,
"grad_norm": 13589472256.0,
"learning_rate": 9.86663298624003e-05,
"loss": 7.8285,
"step": 24
},
{
"epoch": 0.001414647257706291,
"grad_norm": 35559387136.0,
"learning_rate": 9.847001329696653e-05,
"loss": 7.7521,
"step": 25
},
{
"epoch": 0.0014712331480145426,
"grad_norm": 156407300096.0,
"learning_rate": 9.826044551386744e-05,
"loss": 7.7867,
"step": 26
},
{
"epoch": 0.0015278190383227943,
"grad_norm": 7682135552.0,
"learning_rate": 9.803768380684242e-05,
"loss": 7.736,
"step": 27
},
{
"epoch": 0.001584404928631046,
"grad_norm": 3697520934912.0,
"learning_rate": 9.780178907671789e-05,
"loss": 7.9694,
"step": 28
},
{
"epoch": 0.0016409908189392974,
"grad_norm": 6141609984.0,
"learning_rate": 9.755282581475769e-05,
"loss": 7.8885,
"step": 29
},
{
"epoch": 0.001697576709247549,
"grad_norm": 63329062912.0,
"learning_rate": 9.729086208503174e-05,
"loss": 7.5311,
"step": 30
},
{
"epoch": 0.0017541625995558007,
"grad_norm": 24582584320.0,
"learning_rate": 9.701596950580806e-05,
"loss": 7.6015,
"step": 31
},
{
"epoch": 0.0018107484898640524,
"grad_norm": 1713643648.0,
"learning_rate": 9.672822322997305e-05,
"loss": 7.635,
"step": 32
},
{
"epoch": 0.001867334380172304,
"grad_norm": 9013058560.0,
"learning_rate": 9.642770192448536e-05,
"loss": 7.5828,
"step": 33
},
{
"epoch": 0.0019239202704805557,
"grad_norm": 1362955599872.0,
"learning_rate": 9.611448774886924e-05,
"loss": 7.716,
"step": 34
},
{
"epoch": 0.001980506160788807,
"grad_norm": 8080517120.0,
"learning_rate": 9.578866633275288e-05,
"loss": 7.7179,
"step": 35
},
{
"epoch": 0.0020370920510970588,
"grad_norm": 85229699072.0,
"learning_rate": 9.545032675245813e-05,
"loss": 7.7285,
"step": 36
},
{
"epoch": 0.0020936779414053104,
"grad_norm": 1127980160.0,
"learning_rate": 9.509956150664796e-05,
"loss": 7.8929,
"step": 37
},
{
"epoch": 0.002150263831713562,
"grad_norm": 432537152.0,
"learning_rate": 9.473646649103818e-05,
"loss": 7.349,
"step": 38
},
{
"epoch": 0.0022068497220218137,
"grad_norm": 7240221184.0,
"learning_rate": 9.43611409721806e-05,
"loss": 7.7718,
"step": 39
},
{
"epoch": 0.0022634356123300654,
"grad_norm": 30932027392.0,
"learning_rate": 9.397368756032445e-05,
"loss": 7.7322,
"step": 40
},
{
"epoch": 0.002320021502638317,
"grad_norm": 26500192256.0,
"learning_rate": 9.357421218136386e-05,
"loss": 7.5366,
"step": 41
},
{
"epoch": 0.0023766073929465687,
"grad_norm": 5944017408.0,
"learning_rate": 9.316282404787871e-05,
"loss": 7.7096,
"step": 42
},
{
"epoch": 0.0024331932832548204,
"grad_norm": 7556184064.0,
"learning_rate": 9.273963562927695e-05,
"loss": 7.7012,
"step": 43
},
{
"epoch": 0.002489779173563072,
"grad_norm": 3215926784.0,
"learning_rate": 9.230476262104677e-05,
"loss": 7.7405,
"step": 44
},
{
"epoch": 0.0025463650638713237,
"grad_norm": 53130964992.0,
"learning_rate": 9.185832391312644e-05,
"loss": 7.7814,
"step": 45
},
{
"epoch": 0.0026029509541795753,
"grad_norm": 679010304000.0,
"learning_rate": 9.140044155740101e-05,
"loss": 7.5326,
"step": 46
},
{
"epoch": 0.002659536844487827,
"grad_norm": 860553664.0,
"learning_rate": 9.093124073433463e-05,
"loss": 7.6186,
"step": 47
},
{
"epoch": 0.0027161227347960787,
"grad_norm": 10030989312.0,
"learning_rate": 9.045084971874738e-05,
"loss": 8.1192,
"step": 48
},
{
"epoch": 0.0027727086251043303,
"grad_norm": 3191266304.0,
"learning_rate": 8.995939984474624e-05,
"loss": 7.9123,
"step": 49
},
{
"epoch": 0.002829294515412582,
"grad_norm": 11409710080.0,
"learning_rate": 8.945702546981969e-05,
"loss": 8.1616,
"step": 50
},
{
"epoch": 0.002829294515412582,
"eval_loss": 8.087163925170898,
"eval_runtime": 1434.9366,
"eval_samples_per_second": 20.742,
"eval_steps_per_second": 5.186,
"step": 50
},
{
"epoch": 0.0028858804057208336,
"grad_norm": 33563346993152.0,
"learning_rate": 8.894386393810563e-05,
"loss": 8.2012,
"step": 51
},
{
"epoch": 0.0029424662960290853,
"grad_norm": 775263879168.0,
"learning_rate": 8.842005554284296e-05,
"loss": 8.3322,
"step": 52
},
{
"epoch": 0.002999052186337337,
"grad_norm": 1375761334272.0,
"learning_rate": 8.788574348801675e-05,
"loss": 8.4271,
"step": 53
},
{
"epoch": 0.0030556380766455886,
"grad_norm": 490233495552.0,
"learning_rate": 8.73410738492077e-05,
"loss": 8.0982,
"step": 54
},
{
"epoch": 0.0031122239669538402,
"grad_norm": 179365467914240.0,
"learning_rate": 8.678619553365659e-05,
"loss": 8.149,
"step": 55
},
{
"epoch": 0.003168809857262092,
"grad_norm": 254015537152.0,
"learning_rate": 8.622126023955446e-05,
"loss": 8.3288,
"step": 56
},
{
"epoch": 0.003225395747570343,
"grad_norm": 32609501184.0,
"learning_rate": 8.564642241456986e-05,
"loss": 8.4534,
"step": 57
},
{
"epoch": 0.003281981637878595,
"grad_norm": 8593976328192.0,
"learning_rate": 8.506183921362443e-05,
"loss": 7.9167,
"step": 58
},
{
"epoch": 0.0033385675281868464,
"grad_norm": 1455979757568.0,
"learning_rate": 8.44676704559283e-05,
"loss": 7.9132,
"step": 59
},
{
"epoch": 0.003395153418495098,
"grad_norm": 192916963328.0,
"learning_rate": 8.386407858128706e-05,
"loss": 8.2538,
"step": 60
},
{
"epoch": 0.0034517393088033498,
"grad_norm": 119358521344.0,
"learning_rate": 8.32512286056924e-05,
"loss": 7.9478,
"step": 61
},
{
"epoch": 0.0035083251991116014,
"grad_norm": 666327842816.0,
"learning_rate": 8.262928807620843e-05,
"loss": 8.2091,
"step": 62
},
{
"epoch": 0.003564911089419853,
"grad_norm": 76416955711488.0,
"learning_rate": 8.199842702516583e-05,
"loss": 8.0151,
"step": 63
},
{
"epoch": 0.0036214969797281047,
"grad_norm": 142597619712.0,
"learning_rate": 8.135881792367686e-05,
"loss": 8.1361,
"step": 64
},
{
"epoch": 0.0036780828700363564,
"grad_norm": 133583110144.0,
"learning_rate": 8.07106356344834e-05,
"loss": 7.9852,
"step": 65
},
{
"epoch": 0.003734668760344608,
"grad_norm": 189700620288.0,
"learning_rate": 8.005405736415126e-05,
"loss": 7.7228,
"step": 66
},
{
"epoch": 0.0037912546506528597,
"grad_norm": 86162983092224.0,
"learning_rate": 7.938926261462366e-05,
"loss": 8.3066,
"step": 67
},
{
"epoch": 0.0038478405409611114,
"grad_norm": 90551926784.0,
"learning_rate": 7.871643313414718e-05,
"loss": 7.9547,
"step": 68
},
{
"epoch": 0.003904426431269363,
"grad_norm": 276256391168.0,
"learning_rate": 7.803575286758364e-05,
"loss": 8.1374,
"step": 69
},
{
"epoch": 0.003961012321577614,
"grad_norm": 7292632064.0,
"learning_rate": 7.734740790612136e-05,
"loss": 7.6305,
"step": 70
},
{
"epoch": 0.004017598211885866,
"grad_norm": 149718597632.0,
"learning_rate": 7.66515864363997e-05,
"loss": 7.7694,
"step": 71
},
{
"epoch": 0.0040741841021941175,
"grad_norm": 28569499648.0,
"learning_rate": 7.594847868906076e-05,
"loss": 8.4468,
"step": 72
},
{
"epoch": 0.00413076999250237,
"grad_norm": 674502017024.0,
"learning_rate": 7.52382768867422e-05,
"loss": 8.2909,
"step": 73
},
{
"epoch": 0.004187355882810621,
"grad_norm": 59237191680.0,
"learning_rate": 7.452117519152542e-05,
"loss": 8.1193,
"step": 74
},
{
"epoch": 0.004243941773118873,
"grad_norm": 83830571008.0,
"learning_rate": 7.379736965185368e-05,
"loss": 8.9375,
"step": 75
},
{
"epoch": 0.004300527663427124,
"grad_norm": 125110149120.0,
"learning_rate": 7.30670581489344e-05,
"loss": 8.2467,
"step": 76
},
{
"epoch": 0.004357113553735376,
"grad_norm": 214836723712.0,
"learning_rate": 7.233044034264034e-05,
"loss": 8.5393,
"step": 77
},
{
"epoch": 0.0044136994440436275,
"grad_norm": 11289925517312.0,
"learning_rate": 7.158771761692464e-05,
"loss": 8.3445,
"step": 78
},
{
"epoch": 0.00447028533435188,
"grad_norm": 4613561344.0,
"learning_rate": 7.083909302476453e-05,
"loss": 8.1811,
"step": 79
},
{
"epoch": 0.004526871224660131,
"grad_norm": 39612252160.0,
"learning_rate": 7.008477123264848e-05,
"loss": 8.4202,
"step": 80
},
{
"epoch": 0.004583457114968383,
"grad_norm": 1477506367488.0,
"learning_rate": 6.932495846462261e-05,
"loss": 8.2343,
"step": 81
},
{
"epoch": 0.004640043005276634,
"grad_norm": 791374462976.0,
"learning_rate": 6.855986244591104e-05,
"loss": 8.3701,
"step": 82
},
{
"epoch": 0.004696628895584886,
"grad_norm": 49816530944.0,
"learning_rate": 6.778969234612584e-05,
"loss": 8.4674,
"step": 83
},
{
"epoch": 0.004753214785893137,
"grad_norm": 19193165824.0,
"learning_rate": 6.701465872208216e-05,
"loss": 8.1124,
"step": 84
},
{
"epoch": 0.0048098006762013895,
"grad_norm": 125424975872.0,
"learning_rate": 6.623497346023418e-05,
"loss": 8.2403,
"step": 85
},
{
"epoch": 0.004866386566509641,
"grad_norm": 696718589952.0,
"learning_rate": 6.545084971874738e-05,
"loss": 7.9518,
"step": 86
},
{
"epoch": 0.004922972456817893,
"grad_norm": 31419672576.0,
"learning_rate": 6.466250186922325e-05,
"loss": 8.3914,
"step": 87
},
{
"epoch": 0.004979558347126144,
"grad_norm": 21970282496.0,
"learning_rate": 6.387014543809223e-05,
"loss": 8.0971,
"step": 88
},
{
"epoch": 0.005036144237434396,
"grad_norm": 4688684544.0,
"learning_rate": 6.307399704769099e-05,
"loss": 8.4327,
"step": 89
},
{
"epoch": 0.005092730127742647,
"grad_norm": 9488765952.0,
"learning_rate": 6.227427435703997e-05,
"loss": 8.2206,
"step": 90
},
{
"epoch": 0.005149316018050899,
"grad_norm": 438148628480.0,
"learning_rate": 6.147119600233758e-05,
"loss": 8.1085,
"step": 91
},
{
"epoch": 0.005205901908359151,
"grad_norm": 3410772736.0,
"learning_rate": 6.066498153718735e-05,
"loss": 8.228,
"step": 92
},
{
"epoch": 0.005262487798667402,
"grad_norm": 25886801920.0,
"learning_rate": 5.985585137257401e-05,
"loss": 7.9752,
"step": 93
},
{
"epoch": 0.005319073688975654,
"grad_norm": 199570079744.0,
"learning_rate": 5.90440267166055e-05,
"loss": 8.0697,
"step": 94
},
{
"epoch": 0.005375659579283905,
"grad_norm": 239521890304.0,
"learning_rate": 5.8229729514036705e-05,
"loss": 8.477,
"step": 95
},
{
"epoch": 0.005432245469592157,
"grad_norm": 51361988608.0,
"learning_rate": 5.74131823855921e-05,
"loss": 8.2511,
"step": 96
},
{
"epoch": 0.0054888313599004085,
"grad_norm": 58866578489344.0,
"learning_rate": 5.6594608567103456e-05,
"loss": 8.5789,
"step": 97
},
{
"epoch": 0.005545417250208661,
"grad_norm": 11555475456.0,
"learning_rate": 5.577423184847932e-05,
"loss": 8.4096,
"step": 98
},
{
"epoch": 0.005602003140516912,
"grad_norm": 2464840448.0,
"learning_rate": 5.495227651252315e-05,
"loss": 8.0886,
"step": 99
},
{
"epoch": 0.005658589030825164,
"grad_norm": 2346306816.0,
"learning_rate": 5.4128967273616625e-05,
"loss": 8.8207,
"step": 100
},
{
"epoch": 0.005658589030825164,
"eval_loss": 8.11878490447998,
"eval_runtime": 1433.9122,
"eval_samples_per_second": 20.757,
"eval_steps_per_second": 5.189,
"step": 100
},
{
"epoch": 0.005715174921133415,
"grad_norm": 11243564826624.0,
"learning_rate": 5.330452921628497e-05,
"loss": 8.6527,
"step": 101
},
{
"epoch": 0.005771760811441667,
"grad_norm": 526651097088.0,
"learning_rate": 5.247918773366112e-05,
"loss": 8.368,
"step": 102
},
{
"epoch": 0.0058283467017499185,
"grad_norm": 311527440384.0,
"learning_rate": 5.165316846586541e-05,
"loss": 7.6277,
"step": 103
},
{
"epoch": 0.0058849325920581706,
"grad_norm": 10869995995136.0,
"learning_rate": 5.0826697238317935e-05,
"loss": 8.0927,
"step": 104
},
{
"epoch": 0.005941518482366422,
"grad_norm": 4208619159552.0,
"learning_rate": 5e-05,
"loss": 7.9168,
"step": 105
},
{
"epoch": 0.005998104372674674,
"grad_norm": 1251409002496.0,
"learning_rate": 4.917330276168208e-05,
"loss": 8.268,
"step": 106
},
{
"epoch": 0.006054690262982925,
"grad_norm": 192926597120.0,
"learning_rate": 4.834683153413459e-05,
"loss": 7.9177,
"step": 107
},
{
"epoch": 0.006111276153291177,
"grad_norm": 18461149184.0,
"learning_rate": 4.7520812266338885e-05,
"loss": 8.1626,
"step": 108
},
{
"epoch": 0.006167862043599428,
"grad_norm": 202418225152.0,
"learning_rate": 4.669547078371504e-05,
"loss": 7.8157,
"step": 109
},
{
"epoch": 0.0062244479339076805,
"grad_norm": 3483542487040.0,
"learning_rate": 4.5871032726383386e-05,
"loss": 7.563,
"step": 110
},
{
"epoch": 0.006281033824215932,
"grad_norm": 38304279953408.0,
"learning_rate": 4.504772348747687e-05,
"loss": 7.8441,
"step": 111
},
{
"epoch": 0.006337619714524184,
"grad_norm": 69473370112.0,
"learning_rate": 4.4225768151520694e-05,
"loss": 7.7472,
"step": 112
},
{
"epoch": 0.006394205604832435,
"grad_norm": 226336194560.0,
"learning_rate": 4.3405391432896555e-05,
"loss": 7.7,
"step": 113
},
{
"epoch": 0.006450791495140686,
"grad_norm": 280025563136.0,
"learning_rate": 4.2586817614407895e-05,
"loss": 7.6503,
"step": 114
},
{
"epoch": 0.006507377385448938,
"grad_norm": 231607533568.0,
"learning_rate": 4.17702704859633e-05,
"loss": 7.6836,
"step": 115
},
{
"epoch": 0.00656396327575719,
"grad_norm": 120242954240.0,
"learning_rate": 4.095597328339452e-05,
"loss": 7.7419,
"step": 116
},
{
"epoch": 0.006620549166065442,
"grad_norm": 107691515904.0,
"learning_rate": 4.0144148627425993e-05,
"loss": 7.5792,
"step": 117
},
{
"epoch": 0.006677135056373693,
"grad_norm": 3400876288.0,
"learning_rate": 3.933501846281267e-05,
"loss": 7.5476,
"step": 118
},
{
"epoch": 0.006733720946681945,
"grad_norm": 105070043136.0,
"learning_rate": 3.852880399766243e-05,
"loss": 7.4885,
"step": 119
},
{
"epoch": 0.006790306836990196,
"grad_norm": 46602653696.0,
"learning_rate": 3.772572564296005e-05,
"loss": 7.629,
"step": 120
},
{
"epoch": 0.006846892727298448,
"grad_norm": 13601972224.0,
"learning_rate": 3.6926002952309016e-05,
"loss": 7.7568,
"step": 121
},
{
"epoch": 0.0069034786176066995,
"grad_norm": 32649140224.0,
"learning_rate": 3.612985456190778e-05,
"loss": 7.7595,
"step": 122
},
{
"epoch": 0.006960064507914952,
"grad_norm": 67683680256.0,
"learning_rate": 3.533749813077677e-05,
"loss": 7.7851,
"step": 123
},
{
"epoch": 0.007016650398223203,
"grad_norm": 10268348416.0,
"learning_rate": 3.4549150281252636e-05,
"loss": 7.5404,
"step": 124
},
{
"epoch": 0.007073236288531455,
"grad_norm": 28669790208.0,
"learning_rate": 3.3765026539765834e-05,
"loss": 7.4454,
"step": 125
},
{
"epoch": 0.007129822178839706,
"grad_norm": 648124628992.0,
"learning_rate": 3.298534127791785e-05,
"loss": 7.8448,
"step": 126
},
{
"epoch": 0.007186408069147958,
"grad_norm": 23976722432.0,
"learning_rate": 3.221030765387417e-05,
"loss": 8.0231,
"step": 127
},
{
"epoch": 0.0072429939594562094,
"grad_norm": 11712155648.0,
"learning_rate": 3.144013755408895e-05,
"loss": 8.0631,
"step": 128
},
{
"epoch": 0.0072995798497644615,
"grad_norm": 43061446967296.0,
"learning_rate": 3.0675041535377405e-05,
"loss": 7.5414,
"step": 129
},
{
"epoch": 0.007356165740072713,
"grad_norm": 12610547712.0,
"learning_rate": 2.991522876735154e-05,
"loss": 7.6006,
"step": 130
},
{
"epoch": 0.007412751630380965,
"grad_norm": 3941680384.0,
"learning_rate": 2.916090697523549e-05,
"loss": 7.814,
"step": 131
},
{
"epoch": 0.007469337520689216,
"grad_norm": 39281377280.0,
"learning_rate": 2.8412282383075363e-05,
"loss": 7.6488,
"step": 132
},
{
"epoch": 0.007525923410997468,
"grad_norm": 3924603904.0,
"learning_rate": 2.766955965735968e-05,
"loss": 7.5749,
"step": 133
},
{
"epoch": 0.007582509301305719,
"grad_norm": 14518933504.0,
"learning_rate": 2.693294185106562e-05,
"loss": 7.5491,
"step": 134
},
{
"epoch": 0.0076390951916139715,
"grad_norm": 335605268480.0,
"learning_rate": 2.6202630348146324e-05,
"loss": 7.7765,
"step": 135
},
{
"epoch": 0.007695681081922223,
"grad_norm": 4555636998144.0,
"learning_rate": 2.547882480847461e-05,
"loss": 7.8293,
"step": 136
},
{
"epoch": 0.007752266972230474,
"grad_norm": 5324872704.0,
"learning_rate": 2.476172311325783e-05,
"loss": 7.8063,
"step": 137
},
{
"epoch": 0.007808852862538726,
"grad_norm": 660998848512.0,
"learning_rate": 2.405152131093926e-05,
"loss": 7.2999,
"step": 138
},
{
"epoch": 0.007865438752846978,
"grad_norm": 12251345920.0,
"learning_rate": 2.3348413563600325e-05,
"loss": 7.8885,
"step": 139
},
{
"epoch": 0.007922024643155228,
"grad_norm": 124330786816.0,
"learning_rate": 2.2652592093878666e-05,
"loss": 7.4189,
"step": 140
},
{
"epoch": 0.00797861053346348,
"grad_norm": 23768709120.0,
"learning_rate": 2.196424713241637e-05,
"loss": 7.4094,
"step": 141
},
{
"epoch": 0.008035196423771733,
"grad_norm": 22947604480.0,
"learning_rate": 2.128356686585282e-05,
"loss": 7.9335,
"step": 142
},
{
"epoch": 0.008091782314079985,
"grad_norm": 514664960.0,
"learning_rate": 2.061073738537635e-05,
"loss": 7.8619,
"step": 143
},
{
"epoch": 0.008148368204388235,
"grad_norm": 2989006336.0,
"learning_rate": 1.9945942635848748e-05,
"loss": 7.6817,
"step": 144
},
{
"epoch": 0.008204954094696487,
"grad_norm": 11938341888.0,
"learning_rate": 1.928936436551661e-05,
"loss": 7.8298,
"step": 145
},
{
"epoch": 0.00826153998500474,
"grad_norm": 921827520.0,
"learning_rate": 1.8641182076323148e-05,
"loss": 7.6586,
"step": 146
},
{
"epoch": 0.008318125875312991,
"grad_norm": 33065818112.0,
"learning_rate": 1.800157297483417e-05,
"loss": 7.5457,
"step": 147
},
{
"epoch": 0.008374711765621242,
"grad_norm": 696290112.0,
"learning_rate": 1.7370711923791567e-05,
"loss": 7.8407,
"step": 148
},
{
"epoch": 0.008431297655929494,
"grad_norm": 128925097984.0,
"learning_rate": 1.6748771394307585e-05,
"loss": 7.994,
"step": 149
},
{
"epoch": 0.008487883546237746,
"grad_norm": 239068416.0,
"learning_rate": 1.6135921418712956e-05,
"loss": 7.6732,
"step": 150
},
{
"epoch": 0.008487883546237746,
"eval_loss": 7.687556743621826,
"eval_runtime": 1433.1393,
"eval_samples_per_second": 20.768,
"eval_steps_per_second": 5.192,
"step": 150
},
{
"epoch": 0.008544469436545998,
"grad_norm": 39819799429120.0,
"learning_rate": 1.553232954407171e-05,
"loss": 7.6837,
"step": 151
},
{
"epoch": 0.008601055326854248,
"grad_norm": 130047696896.0,
"learning_rate": 1.4938160786375572e-05,
"loss": 7.5437,
"step": 152
},
{
"epoch": 0.0086576412171625,
"grad_norm": 354326740992.0,
"learning_rate": 1.435357758543015e-05,
"loss": 7.6707,
"step": 153
},
{
"epoch": 0.008714227107470753,
"grad_norm": 35565076480.0,
"learning_rate": 1.3778739760445552e-05,
"loss": 7.7273,
"step": 154
},
{
"epoch": 0.008770812997779005,
"grad_norm": 88415977472.0,
"learning_rate": 1.3213804466343421e-05,
"loss": 7.4978,
"step": 155
},
{
"epoch": 0.008827398888087255,
"grad_norm": 862532927488.0,
"learning_rate": 1.2658926150792322e-05,
"loss": 7.649,
"step": 156
},
{
"epoch": 0.008883984778395507,
"grad_norm": 34882568192.0,
"learning_rate": 1.2114256511983274e-05,
"loss": 7.5036,
"step": 157
},
{
"epoch": 0.00894057066870376,
"grad_norm": 8665818112.0,
"learning_rate": 1.157994445715706e-05,
"loss": 7.3557,
"step": 158
},
{
"epoch": 0.00899715655901201,
"grad_norm": 4064379404288.0,
"learning_rate": 1.1056136061894384e-05,
"loss": 7.6952,
"step": 159
},
{
"epoch": 0.009053742449320262,
"grad_norm": 4287943424.0,
"learning_rate": 1.0542974530180327e-05,
"loss": 7.6186,
"step": 160
},
{
"epoch": 0.009110328339628514,
"grad_norm": 20435408896.0,
"learning_rate": 1.0040600155253765e-05,
"loss": 7.4872,
"step": 161
},
{
"epoch": 0.009166914229936766,
"grad_norm": 146656444416.0,
"learning_rate": 9.549150281252633e-06,
"loss": 7.832,
"step": 162
},
{
"epoch": 0.009223500120245016,
"grad_norm": 1097668689920.0,
"learning_rate": 9.068759265665384e-06,
"loss": 7.7164,
"step": 163
},
{
"epoch": 0.009280086010553268,
"grad_norm": 87433953280.0,
"learning_rate": 8.599558442598998e-06,
"loss": 7.5766,
"step": 164
},
{
"epoch": 0.00933667190086152,
"grad_norm": 10633688064.0,
"learning_rate": 8.141676086873572e-06,
"loss": 7.7424,
"step": 165
},
{
"epoch": 0.009393257791169772,
"grad_norm": 43142524928.0,
"learning_rate": 7.695237378953223e-06,
"loss": 7.5633,
"step": 166
},
{
"epoch": 0.009449843681478023,
"grad_norm": 26160885760.0,
"learning_rate": 7.260364370723044e-06,
"loss": 7.2769,
"step": 167
},
{
"epoch": 0.009506429571786275,
"grad_norm": 108625256448.0,
"learning_rate": 6.837175952121306e-06,
"loss": 7.5497,
"step": 168
},
{
"epoch": 0.009563015462094527,
"grad_norm": 48824982700032.0,
"learning_rate": 6.425787818636131e-06,
"loss": 7.613,
"step": 169
},
{
"epoch": 0.009619601352402779,
"grad_norm": 37375750144.0,
"learning_rate": 6.026312439675552e-06,
"loss": 8.0403,
"step": 170
},
{
"epoch": 0.00967618724271103,
"grad_norm": 132175814656.0,
"learning_rate": 5.6388590278194096e-06,
"loss": 7.6471,
"step": 171
},
{
"epoch": 0.009732773133019281,
"grad_norm": 6472570880.0,
"learning_rate": 5.263533508961827e-06,
"loss": 8.2212,
"step": 172
},
{
"epoch": 0.009789359023327534,
"grad_norm": 255245168.0,
"learning_rate": 4.900438493352055e-06,
"loss": 7.65,
"step": 173
},
{
"epoch": 0.009845944913635786,
"grad_norm": 7248849408.0,
"learning_rate": 4.549673247541875e-06,
"loss": 7.7135,
"step": 174
},
{
"epoch": 0.009902530803944036,
"grad_norm": 818868992.0,
"learning_rate": 4.2113336672471245e-06,
"loss": 7.6893,
"step": 175
},
{
"epoch": 0.009959116694252288,
"grad_norm": 3411516672.0,
"learning_rate": 3.885512251130763e-06,
"loss": 7.4528,
"step": 176
},
{
"epoch": 0.01001570258456054,
"grad_norm": 87788388352.0,
"learning_rate": 3.5722980755146517e-06,
"loss": 7.3741,
"step": 177
},
{
"epoch": 0.010072288474868792,
"grad_norm": 6130722304.0,
"learning_rate": 3.271776770026963e-06,
"loss": 7.7795,
"step": 178
},
{
"epoch": 0.010128874365177043,
"grad_norm": 15577243648.0,
"learning_rate": 2.9840304941919415e-06,
"loss": 7.4474,
"step": 179
},
{
"epoch": 0.010185460255485295,
"grad_norm": 3313134592.0,
"learning_rate": 2.7091379149682685e-06,
"loss": 7.5848,
"step": 180
},
{
"epoch": 0.010242046145793547,
"grad_norm": 1393064192.0,
"learning_rate": 2.4471741852423237e-06,
"loss": 7.5014,
"step": 181
},
{
"epoch": 0.010298632036101797,
"grad_norm": 82932793344.0,
"learning_rate": 2.1982109232821178e-06,
"loss": 7.8914,
"step": 182
},
{
"epoch": 0.01035521792641005,
"grad_norm": 2305009152.0,
"learning_rate": 1.962316193157593e-06,
"loss": 7.5082,
"step": 183
},
{
"epoch": 0.010411803816718301,
"grad_norm": 56451358720.0,
"learning_rate": 1.7395544861325718e-06,
"loss": 7.5595,
"step": 184
},
{
"epoch": 0.010468389707026553,
"grad_norm": 114768248.0,
"learning_rate": 1.5299867030334814e-06,
"loss": 7.7866,
"step": 185
},
{
"epoch": 0.010524975597334804,
"grad_norm": 35701469184.0,
"learning_rate": 1.333670137599713e-06,
"loss": 7.7829,
"step": 186
},
{
"epoch": 0.010581561487643056,
"grad_norm": 19532906496.0,
"learning_rate": 1.1506584608200367e-06,
"loss": 7.7773,
"step": 187
},
{
"epoch": 0.010638147377951308,
"grad_norm": 5800582656.0,
"learning_rate": 9.810017062595322e-07,
"loss": 7.7188,
"step": 188
},
{
"epoch": 0.01069473326825956,
"grad_norm": 25474985984.0,
"learning_rate": 8.247462563808817e-07,
"loss": 7.2083,
"step": 189
},
{
"epoch": 0.01075131915856781,
"grad_norm": 562073280.0,
"learning_rate": 6.819348298638839e-07,
"loss": 7.8159,
"step": 190
},
{
"epoch": 0.010807905048876063,
"grad_norm": 2541558784.0,
"learning_rate": 5.526064699265753e-07,
"loss": 7.4243,
"step": 191
},
{
"epoch": 0.010864490939184315,
"grad_norm": 873800448.0,
"learning_rate": 4.367965336512403e-07,
"loss": 7.4821,
"step": 192
},
{
"epoch": 0.010921076829492567,
"grad_norm": 6237185024.0,
"learning_rate": 3.3453668231809286e-07,
"loss": 7.859,
"step": 193
},
{
"epoch": 0.010977662719800817,
"grad_norm": 569314240.0,
"learning_rate": 2.458548727494292e-07,
"loss": 7.8358,
"step": 194
},
{
"epoch": 0.01103424861010907,
"grad_norm": 1558759168.0,
"learning_rate": 1.7077534966650766e-07,
"loss": 7.3724,
"step": 195
},
{
"epoch": 0.011090834500417321,
"grad_norm": 16786523136.0,
"learning_rate": 1.0931863906127327e-07,
"loss": 7.8613,
"step": 196
},
{
"epoch": 0.011147420390725573,
"grad_norm": 4647081472.0,
"learning_rate": 6.150154258476315e-08,
"loss": 7.4755,
"step": 197
},
{
"epoch": 0.011204006281033824,
"grad_norm": 1446384768.0,
"learning_rate": 2.7337132953697554e-08,
"loss": 7.7445,
"step": 198
},
{
"epoch": 0.011260592171342076,
"grad_norm": 141445984.0,
"learning_rate": 6.834750376549792e-09,
"loss": 7.6598,
"step": 199
},
{
"epoch": 0.011317178061650328,
"grad_norm": 1335355776.0,
"learning_rate": 0.0,
"loss": 7.8035,
"step": 200
},
{
"epoch": 0.011317178061650328,
"eval_loss": 7.685921669006348,
"eval_runtime": 1433.5662,
"eval_samples_per_second": 20.762,
"eval_steps_per_second": 5.191,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.510781724131328e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}