kanishka's picture
End of training
4789d90 verified
raw
history blame
60.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 310860,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06433764395547835,
"grad_norm": 1.052815556526184,
"learning_rate": 3.125e-05,
"loss": 6.1013,
"step": 1000
},
{
"epoch": 0.1286752879109567,
"grad_norm": 1.0303527116775513,
"learning_rate": 6.25e-05,
"loss": 4.5097,
"step": 2000
},
{
"epoch": 0.19301293186643506,
"grad_norm": 0.9319628477096558,
"learning_rate": 9.375e-05,
"loss": 4.1714,
"step": 3000
},
{
"epoch": 0.2573505758219134,
"grad_norm": 0.8786389827728271,
"learning_rate": 0.000125,
"loss": 3.9371,
"step": 4000
},
{
"epoch": 0.32168821977739176,
"grad_norm": 0.7859598398208618,
"learning_rate": 0.00015625,
"loss": 3.7854,
"step": 5000
},
{
"epoch": 0.3860258637328701,
"grad_norm": 0.7061841487884521,
"learning_rate": 0.0001875,
"loss": 3.6647,
"step": 6000
},
{
"epoch": 0.45036350768834843,
"grad_norm": 0.6750425100326538,
"learning_rate": 0.00021875,
"loss": 3.576,
"step": 7000
},
{
"epoch": 0.5147011516438268,
"grad_norm": 0.6492879986763,
"learning_rate": 0.00025,
"loss": 3.5131,
"step": 8000
},
{
"epoch": 0.5790387955993052,
"grad_norm": 0.639130175113678,
"learning_rate": 0.00028125000000000003,
"loss": 3.4541,
"step": 9000
},
{
"epoch": 0.6433764395547835,
"grad_norm": 0.5938445329666138,
"learning_rate": 0.00031246875000000003,
"loss": 3.4065,
"step": 10000
},
{
"epoch": 0.7077140835102619,
"grad_norm": 0.5456812381744385,
"learning_rate": 0.00034371875,
"loss": 3.3722,
"step": 11000
},
{
"epoch": 0.7720517274657402,
"grad_norm": 0.5338535308837891,
"learning_rate": 0.00037496875000000003,
"loss": 3.3338,
"step": 12000
},
{
"epoch": 0.8363893714212186,
"grad_norm": 0.5021286606788635,
"learning_rate": 0.0004061875,
"loss": 3.3068,
"step": 13000
},
{
"epoch": 0.9007270153766969,
"grad_norm": 0.45191365480422974,
"learning_rate": 0.0004374375,
"loss": 3.2859,
"step": 14000
},
{
"epoch": 0.9650646593321752,
"grad_norm": 0.40418481826782227,
"learning_rate": 0.0004686875,
"loss": 3.26,
"step": 15000
},
{
"epoch": 1.0,
"eval_accuracy": 0.38307077369468,
"eval_loss": 3.3135979175567627,
"eval_runtime": 113.2217,
"eval_samples_per_second": 463.162,
"eval_steps_per_second": 7.242,
"step": 15543
},
{
"epoch": 1.0294023032876536,
"grad_norm": 0.39907780289649963,
"learning_rate": 0.0004999375,
"loss": 3.2167,
"step": 16000
},
{
"epoch": 1.093739947243132,
"grad_norm": 0.380867600440979,
"learning_rate": 0.00053115625,
"loss": 3.1969,
"step": 17000
},
{
"epoch": 1.1580775911986103,
"grad_norm": 0.3618770241737366,
"learning_rate": 0.0005623749999999999,
"loss": 3.1923,
"step": 18000
},
{
"epoch": 1.2224152351540887,
"grad_norm": 0.3256314694881439,
"learning_rate": 0.000593625,
"loss": 3.1767,
"step": 19000
},
{
"epoch": 1.286752879109567,
"grad_norm": 0.31108617782592773,
"learning_rate": 0.000624875,
"loss": 3.1643,
"step": 20000
},
{
"epoch": 1.3510905230650454,
"grad_norm": 0.30397939682006836,
"learning_rate": 0.000656125,
"loss": 3.154,
"step": 21000
},
{
"epoch": 1.4154281670205238,
"grad_norm": 0.2824731171131134,
"learning_rate": 0.00068734375,
"loss": 3.1451,
"step": 22000
},
{
"epoch": 1.4797658109760021,
"grad_norm": 0.28574618697166443,
"learning_rate": 0.00071859375,
"loss": 3.1347,
"step": 23000
},
{
"epoch": 1.5441034549314803,
"grad_norm": 0.26015007495880127,
"learning_rate": 0.0007498125,
"loss": 3.1263,
"step": 24000
},
{
"epoch": 1.6084410988869586,
"grad_norm": 0.2511335611343384,
"learning_rate": 0.0007810312499999999,
"loss": 3.1217,
"step": 25000
},
{
"epoch": 1.672778742842437,
"grad_norm": 0.2407851368188858,
"learning_rate": 0.00081228125,
"loss": 3.0962,
"step": 26000
},
{
"epoch": 1.7371163867979154,
"grad_norm": 0.23978936672210693,
"learning_rate": 0.0008435000000000001,
"loss": 3.1022,
"step": 27000
},
{
"epoch": 1.8014540307533937,
"grad_norm": 0.20840179920196533,
"learning_rate": 0.0008747500000000001,
"loss": 3.0929,
"step": 28000
},
{
"epoch": 1.865791674708872,
"grad_norm": 0.2035219669342041,
"learning_rate": 0.000906,
"loss": 3.0824,
"step": 29000
},
{
"epoch": 1.9301293186643504,
"grad_norm": 0.19087807834148407,
"learning_rate": 0.00093721875,
"loss": 3.0732,
"step": 30000
},
{
"epoch": 1.9944669626198288,
"grad_norm": 0.19520416855812073,
"learning_rate": 0.00096846875,
"loss": 3.0629,
"step": 31000
},
{
"epoch": 2.0,
"eval_accuracy": 0.399560057432584,
"eval_loss": 3.1529746055603027,
"eval_runtime": 113.8443,
"eval_samples_per_second": 460.629,
"eval_steps_per_second": 7.203,
"step": 31086
},
{
"epoch": 2.058804606575307,
"grad_norm": 0.205551877617836,
"learning_rate": 0.00099971875,
"loss": 3.0167,
"step": 32000
},
{
"epoch": 2.1231422505307855,
"grad_norm": 0.18326979875564575,
"learning_rate": 0.000996449831456645,
"loss": 3.0168,
"step": 33000
},
{
"epoch": 2.187479894486264,
"grad_norm": 0.18665415048599243,
"learning_rate": 0.000992863802624973,
"loss": 3.0026,
"step": 34000
},
{
"epoch": 2.2518175384417423,
"grad_norm": 0.186910018324852,
"learning_rate": 0.0009892813598221329,
"loss": 3.0041,
"step": 35000
},
{
"epoch": 2.3161551823972206,
"grad_norm": 0.17289380729198456,
"learning_rate": 0.0009856953309904611,
"loss": 2.9883,
"step": 36000
},
{
"epoch": 2.380492826352699,
"grad_norm": 0.18370352685451508,
"learning_rate": 0.000982112888187621,
"loss": 2.9854,
"step": 37000
},
{
"epoch": 2.4448304703081774,
"grad_norm": 0.17503343522548676,
"learning_rate": 0.0009785268593559492,
"loss": 2.9813,
"step": 38000
},
{
"epoch": 2.5091681142636557,
"grad_norm": 0.19140906631946564,
"learning_rate": 0.0009749408305242774,
"loss": 2.9785,
"step": 39000
},
{
"epoch": 2.573505758219134,
"grad_norm": 0.19327034056186676,
"learning_rate": 0.0009713583877214373,
"loss": 2.9781,
"step": 40000
},
{
"epoch": 2.6378434021746124,
"grad_norm": 0.17439500987529755,
"learning_rate": 0.0009677723588897654,
"loss": 2.964,
"step": 41000
},
{
"epoch": 2.702181046130091,
"grad_norm": 0.16128753125667572,
"learning_rate": 0.0009641899160869255,
"loss": 2.9543,
"step": 42000
},
{
"epoch": 2.766518690085569,
"grad_norm": 0.16443367302417755,
"learning_rate": 0.0009606038872552536,
"loss": 2.9606,
"step": 43000
},
{
"epoch": 2.8308563340410475,
"grad_norm": 0.17449279129505157,
"learning_rate": 0.0009570178584235818,
"loss": 2.9465,
"step": 44000
},
{
"epoch": 2.895193977996526,
"grad_norm": 0.17141327261924744,
"learning_rate": 0.00095343182959191,
"loss": 2.9433,
"step": 45000
},
{
"epoch": 2.9595316219520043,
"grad_norm": 0.15691405534744263,
"learning_rate": 0.0009498493867890699,
"loss": 2.9355,
"step": 46000
},
{
"epoch": 3.0,
"eval_accuracy": 0.4113637995243864,
"eval_loss": 3.042464017868042,
"eval_runtime": 113.33,
"eval_samples_per_second": 462.719,
"eval_steps_per_second": 7.236,
"step": 46629
},
{
"epoch": 3.0238692659074826,
"grad_norm": 0.1875714659690857,
"learning_rate": 0.000946263357957398,
"loss": 2.9059,
"step": 47000
},
{
"epoch": 3.088206909862961,
"grad_norm": 0.18929240107536316,
"learning_rate": 0.0009426773291257262,
"loss": 2.8621,
"step": 48000
},
{
"epoch": 3.1525445538184393,
"grad_norm": 0.169860377907753,
"learning_rate": 0.0009390948863228861,
"loss": 2.8641,
"step": 49000
},
{
"epoch": 3.2168821977739177,
"grad_norm": 0.18511784076690674,
"learning_rate": 0.0009355088574912143,
"loss": 2.867,
"step": 50000
},
{
"epoch": 3.2812198417293956,
"grad_norm": 0.2011699676513672,
"learning_rate": 0.0009319228286595425,
"loss": 2.864,
"step": 51000
},
{
"epoch": 3.345557485684874,
"grad_norm": 0.17710469663143158,
"learning_rate": 0.0009283403858567023,
"loss": 2.8627,
"step": 52000
},
{
"epoch": 3.4098951296403524,
"grad_norm": 0.176873579621315,
"learning_rate": 0.0009247543570250305,
"loss": 2.8658,
"step": 53000
},
{
"epoch": 3.4742327735958307,
"grad_norm": 0.16512398421764374,
"learning_rate": 0.0009211683281933588,
"loss": 2.8703,
"step": 54000
},
{
"epoch": 3.538570417551309,
"grad_norm": 0.15719538927078247,
"learning_rate": 0.0009175858853905186,
"loss": 2.8667,
"step": 55000
},
{
"epoch": 3.6029080615067874,
"grad_norm": 0.15996688604354858,
"learning_rate": 0.0009139998565588467,
"loss": 2.8633,
"step": 56000
},
{
"epoch": 3.667245705462266,
"grad_norm": 0.16842438280582428,
"learning_rate": 0.0009104174137560066,
"loss": 2.8584,
"step": 57000
},
{
"epoch": 3.731583349417744,
"grad_norm": 0.16790573298931122,
"learning_rate": 0.0009068313849243348,
"loss": 2.8594,
"step": 58000
},
{
"epoch": 3.7959209933732225,
"grad_norm": 0.2214391678571701,
"learning_rate": 0.000903245356092663,
"loss": 2.8556,
"step": 59000
},
{
"epoch": 3.860258637328701,
"grad_norm": 0.16262733936309814,
"learning_rate": 0.0008996593272609912,
"loss": 2.86,
"step": 60000
},
{
"epoch": 3.9245962812841793,
"grad_norm": 0.17010310292243958,
"learning_rate": 0.000896076884458151,
"loss": 2.8557,
"step": 61000
},
{
"epoch": 3.9889339252396576,
"grad_norm": 0.1839476078748703,
"learning_rate": 0.0008924908556264793,
"loss": 2.8499,
"step": 62000
},
{
"epoch": 4.0,
"eval_accuracy": 0.41723433690791345,
"eval_loss": 2.993454694747925,
"eval_runtime": 114.1434,
"eval_samples_per_second": 459.422,
"eval_steps_per_second": 7.184,
"step": 62172
},
{
"epoch": 4.053271569195136,
"grad_norm": 0.1961131989955902,
"learning_rate": 0.0008889048267948075,
"loss": 2.7827,
"step": 63000
},
{
"epoch": 4.117609213150614,
"grad_norm": 0.17030374705791473,
"learning_rate": 0.0008853187979631356,
"loss": 2.7733,
"step": 64000
},
{
"epoch": 4.181946857106093,
"grad_norm": 0.1887962967157364,
"learning_rate": 0.0008817363551602955,
"loss": 2.7735,
"step": 65000
},
{
"epoch": 4.246284501061571,
"grad_norm": 0.20483681559562683,
"learning_rate": 0.0008781539123574553,
"loss": 2.7816,
"step": 66000
},
{
"epoch": 4.310622145017049,
"grad_norm": 0.19994668662548065,
"learning_rate": 0.0008745678835257835,
"loss": 2.7895,
"step": 67000
},
{
"epoch": 4.374959788972528,
"grad_norm": 0.17334817349910736,
"learning_rate": 0.0008709818546941118,
"loss": 2.7845,
"step": 68000
},
{
"epoch": 4.439297432928006,
"grad_norm": 0.2339029610157013,
"learning_rate": 0.0008673994118912716,
"loss": 2.7915,
"step": 69000
},
{
"epoch": 4.5036350768834845,
"grad_norm": 0.17018085718154907,
"learning_rate": 0.0008638133830595997,
"loss": 2.7879,
"step": 70000
},
{
"epoch": 4.567972720838963,
"grad_norm": 0.23095539212226868,
"learning_rate": 0.000860227354227928,
"loss": 2.7954,
"step": 71000
},
{
"epoch": 4.632310364794441,
"grad_norm": 0.2322332113981247,
"learning_rate": 0.0008566413253962562,
"loss": 2.7897,
"step": 72000
},
{
"epoch": 4.69664800874992,
"grad_norm": 0.19111447036266327,
"learning_rate": 0.0008530588825934161,
"loss": 2.799,
"step": 73000
},
{
"epoch": 4.760985652705398,
"grad_norm": 0.17069219052791595,
"learning_rate": 0.0008494728537617442,
"loss": 2.7923,
"step": 74000
},
{
"epoch": 4.825323296660876,
"grad_norm": 0.20082932710647583,
"learning_rate": 0.0008458904109589041,
"loss": 2.7956,
"step": 75000
},
{
"epoch": 4.889660940616355,
"grad_norm": 0.20119306445121765,
"learning_rate": 0.000842307968156064,
"loss": 2.7873,
"step": 76000
},
{
"epoch": 4.953998584571833,
"grad_norm": 0.16935457289218903,
"learning_rate": 0.0008387219393243922,
"loss": 2.7951,
"step": 77000
},
{
"epoch": 5.0,
"eval_accuracy": 0.4206606990622336,
"eval_loss": 2.9680817127227783,
"eval_runtime": 113.8277,
"eval_samples_per_second": 460.696,
"eval_steps_per_second": 7.204,
"step": 77715
},
{
"epoch": 5.018336228527311,
"grad_norm": 0.19914424419403076,
"learning_rate": 0.0008351359104927204,
"loss": 2.7629,
"step": 78000
},
{
"epoch": 5.08267387248279,
"grad_norm": 0.19470812380313873,
"learning_rate": 0.0008315534676898803,
"loss": 2.7089,
"step": 79000
},
{
"epoch": 5.147011516438268,
"grad_norm": 0.2205093652009964,
"learning_rate": 0.0008279674388582084,
"loss": 2.7133,
"step": 80000
},
{
"epoch": 5.2113491603937465,
"grad_norm": 0.2074008285999298,
"learning_rate": 0.0008243814100265367,
"loss": 2.723,
"step": 81000
},
{
"epoch": 5.275686804349225,
"grad_norm": 0.3117210566997528,
"learning_rate": 0.0008207989672236965,
"loss": 2.7205,
"step": 82000
},
{
"epoch": 5.340024448304703,
"grad_norm": 0.22853530943393707,
"learning_rate": 0.0008172129383920247,
"loss": 2.7304,
"step": 83000
},
{
"epoch": 5.404362092260182,
"grad_norm": 0.1936911940574646,
"learning_rate": 0.0008136304955891846,
"loss": 2.7259,
"step": 84000
},
{
"epoch": 5.46869973621566,
"grad_norm": 0.19292935729026794,
"learning_rate": 0.0008100444667575127,
"loss": 2.7283,
"step": 85000
},
{
"epoch": 5.533037380171138,
"grad_norm": 0.19306513667106628,
"learning_rate": 0.0008064584379258409,
"loss": 2.7349,
"step": 86000
},
{
"epoch": 5.597375024126617,
"grad_norm": 0.1781722456216812,
"learning_rate": 0.0008028759951230008,
"loss": 2.7367,
"step": 87000
},
{
"epoch": 5.661712668082095,
"grad_norm": 0.19567584991455078,
"learning_rate": 0.000799289966291329,
"loss": 2.7386,
"step": 88000
},
{
"epoch": 5.726050312037573,
"grad_norm": 0.17735910415649414,
"learning_rate": 0.0007957075234884889,
"loss": 2.7434,
"step": 89000
},
{
"epoch": 5.790387955993052,
"grad_norm": 0.18065251410007477,
"learning_rate": 0.000792121494656817,
"loss": 2.7402,
"step": 90000
},
{
"epoch": 5.85472559994853,
"grad_norm": 0.17205560207366943,
"learning_rate": 0.0007885354658251452,
"loss": 2.7437,
"step": 91000
},
{
"epoch": 5.9190632439040085,
"grad_norm": 0.2087228149175644,
"learning_rate": 0.0007849494369934735,
"loss": 2.7442,
"step": 92000
},
{
"epoch": 5.983400887859487,
"grad_norm": 0.21673254668712616,
"learning_rate": 0.0007813669941906333,
"loss": 2.7489,
"step": 93000
},
{
"epoch": 6.0,
"eval_accuracy": 0.42259418794214865,
"eval_loss": 2.9527182579040527,
"eval_runtime": 113.5442,
"eval_samples_per_second": 461.847,
"eval_steps_per_second": 7.222,
"step": 93258
},
{
"epoch": 6.047738531814965,
"grad_norm": 0.22024141252040863,
"learning_rate": 0.0007777809653589614,
"loss": 2.6731,
"step": 94000
},
{
"epoch": 6.112076175770444,
"grad_norm": 0.20330588519573212,
"learning_rate": 0.0007741949365272897,
"loss": 2.663,
"step": 95000
},
{
"epoch": 6.176413819725922,
"grad_norm": 0.2359427958726883,
"learning_rate": 0.0007706124937244495,
"loss": 2.666,
"step": 96000
},
{
"epoch": 6.2407514636814,
"grad_norm": 0.18912553787231445,
"learning_rate": 0.0007670264648927777,
"loss": 2.6751,
"step": 97000
},
{
"epoch": 6.305089107636879,
"grad_norm": 0.18407639861106873,
"learning_rate": 0.0007634440220899376,
"loss": 2.6759,
"step": 98000
},
{
"epoch": 6.369426751592357,
"grad_norm": 0.1985590159893036,
"learning_rate": 0.0007598579932582657,
"loss": 2.6759,
"step": 99000
},
{
"epoch": 6.433764395547835,
"grad_norm": 0.18936724960803986,
"learning_rate": 0.0007562719644265939,
"loss": 2.685,
"step": 100000
},
{
"epoch": 6.498102039503314,
"grad_norm": 0.23301662504673004,
"learning_rate": 0.0007526895216237539,
"loss": 2.6839,
"step": 101000
},
{
"epoch": 6.562439683458791,
"grad_norm": 0.19822722673416138,
"learning_rate": 0.000749103492792082,
"loss": 2.6893,
"step": 102000
},
{
"epoch": 6.6267773274142705,
"grad_norm": 0.18612362444400787,
"learning_rate": 0.0007455174639604102,
"loss": 2.6973,
"step": 103000
},
{
"epoch": 6.691114971369748,
"grad_norm": 0.20918478071689606,
"learning_rate": 0.0007419314351287384,
"loss": 2.691,
"step": 104000
},
{
"epoch": 6.755452615325227,
"grad_norm": 0.18845367431640625,
"learning_rate": 0.0007383489923258983,
"loss": 2.7032,
"step": 105000
},
{
"epoch": 6.819790259280705,
"grad_norm": 0.19247417151927948,
"learning_rate": 0.0007347665495230582,
"loss": 2.6998,
"step": 106000
},
{
"epoch": 6.884127903236184,
"grad_norm": 0.18703711032867432,
"learning_rate": 0.0007311805206913864,
"loss": 2.6992,
"step": 107000
},
{
"epoch": 6.948465547191661,
"grad_norm": 0.21239759027957916,
"learning_rate": 0.0007275944918597146,
"loss": 2.7031,
"step": 108000
},
{
"epoch": 7.0,
"eval_accuracy": 0.42415787978044,
"eval_loss": 2.9438040256500244,
"eval_runtime": 113.2221,
"eval_samples_per_second": 463.16,
"eval_steps_per_second": 7.242,
"step": 108801
},
{
"epoch": 7.01280319114714,
"grad_norm": 0.21643811464309692,
"learning_rate": 0.0007240084630280428,
"loss": 2.6826,
"step": 109000
},
{
"epoch": 7.077140835102618,
"grad_norm": 0.21807773411273956,
"learning_rate": 0.0007204260202252026,
"loss": 2.6153,
"step": 110000
},
{
"epoch": 7.1414784790580965,
"grad_norm": 0.23160989582538605,
"learning_rate": 0.0007168399913935309,
"loss": 2.6234,
"step": 111000
},
{
"epoch": 7.205816123013575,
"grad_norm": 0.25499585270881653,
"learning_rate": 0.0007132539625618591,
"loss": 2.6296,
"step": 112000
},
{
"epoch": 7.270153766969053,
"grad_norm": 0.20166288316249847,
"learning_rate": 0.0007096679337301872,
"loss": 2.6332,
"step": 113000
},
{
"epoch": 7.334491410924532,
"grad_norm": 0.23443199694156647,
"learning_rate": 0.0007060854909273471,
"loss": 2.639,
"step": 114000
},
{
"epoch": 7.39882905488001,
"grad_norm": 0.2512323260307312,
"learning_rate": 0.0007024994620956753,
"loss": 2.6427,
"step": 115000
},
{
"epoch": 7.463166698835488,
"grad_norm": 0.21170075237751007,
"learning_rate": 0.0006989134332640035,
"loss": 2.6516,
"step": 116000
},
{
"epoch": 7.527504342790967,
"grad_norm": 0.216237410902977,
"learning_rate": 0.0006953274044323316,
"loss": 2.6487,
"step": 117000
},
{
"epoch": 7.591841986746445,
"grad_norm": 0.21823635697364807,
"learning_rate": 0.0006917449616294915,
"loss": 2.6499,
"step": 118000
},
{
"epoch": 7.656179630701923,
"grad_norm": 0.23018254339694977,
"learning_rate": 0.0006881625188266513,
"loss": 2.6536,
"step": 119000
},
{
"epoch": 7.720517274657402,
"grad_norm": 0.26946449279785156,
"learning_rate": 0.0006845764899949796,
"loss": 2.6617,
"step": 120000
},
{
"epoch": 7.78485491861288,
"grad_norm": 0.19592489302158356,
"learning_rate": 0.0006809940471921394,
"loss": 2.6571,
"step": 121000
},
{
"epoch": 7.8491925625683585,
"grad_norm": 0.1997816413640976,
"learning_rate": 0.0006774080183604677,
"loss": 2.6551,
"step": 122000
},
{
"epoch": 7.913530206523837,
"grad_norm": 0.21142259240150452,
"learning_rate": 0.0006738219895287958,
"loss": 2.6638,
"step": 123000
},
{
"epoch": 7.977867850479315,
"grad_norm": 0.21287241578102112,
"learning_rate": 0.000670235960697124,
"loss": 2.6642,
"step": 124000
},
{
"epoch": 8.0,
"eval_accuracy": 0.42513700064312526,
"eval_loss": 2.942474126815796,
"eval_runtime": 112.6722,
"eval_samples_per_second": 465.421,
"eval_steps_per_second": 7.278,
"step": 124344
},
{
"epoch": 8.042205494434794,
"grad_norm": 0.20446690917015076,
"learning_rate": 0.0006666535178942839,
"loss": 2.6008,
"step": 125000
},
{
"epoch": 8.106543138390272,
"grad_norm": 0.21123293042182922,
"learning_rate": 0.0006630710750914437,
"loss": 2.5828,
"step": 126000
},
{
"epoch": 8.170880782345751,
"grad_norm": 0.22513774037361145,
"learning_rate": 0.0006594850462597719,
"loss": 2.5876,
"step": 127000
},
{
"epoch": 8.235218426301229,
"grad_norm": 0.2192804217338562,
"learning_rate": 0.0006559026034569318,
"loss": 2.5988,
"step": 128000
},
{
"epoch": 8.299556070256708,
"grad_norm": 0.2216465324163437,
"learning_rate": 0.0006523165746252599,
"loss": 2.6021,
"step": 129000
},
{
"epoch": 8.363893714212185,
"grad_norm": 0.22872082889080048,
"learning_rate": 0.0006487305457935882,
"loss": 2.6028,
"step": 130000
},
{
"epoch": 8.428231358167665,
"grad_norm": 0.2199559062719345,
"learning_rate": 0.0006451481029907481,
"loss": 2.6089,
"step": 131000
},
{
"epoch": 8.492569002123142,
"grad_norm": 0.22230418026447296,
"learning_rate": 0.0006415620741590763,
"loss": 2.6098,
"step": 132000
},
{
"epoch": 8.556906646078621,
"grad_norm": 0.21087001264095306,
"learning_rate": 0.0006379760453274045,
"loss": 2.6168,
"step": 133000
},
{
"epoch": 8.621244290034099,
"grad_norm": 0.2557260990142822,
"learning_rate": 0.0006343936025245643,
"loss": 2.6205,
"step": 134000
},
{
"epoch": 8.685581933989578,
"grad_norm": 0.2882158160209656,
"learning_rate": 0.0006308075736928925,
"loss": 2.6236,
"step": 135000
},
{
"epoch": 8.749919577945056,
"grad_norm": 0.21583206951618195,
"learning_rate": 0.0006272215448612208,
"loss": 2.6192,
"step": 136000
},
{
"epoch": 8.814257221900533,
"grad_norm": 0.2255946397781372,
"learning_rate": 0.0006236355160295489,
"loss": 2.6274,
"step": 137000
},
{
"epoch": 8.878594865856012,
"grad_norm": 0.24251054227352142,
"learning_rate": 0.0006200530732267087,
"loss": 2.6293,
"step": 138000
},
{
"epoch": 8.942932509811492,
"grad_norm": 0.2000625729560852,
"learning_rate": 0.000616467044395037,
"loss": 2.6242,
"step": 139000
},
{
"epoch": 9.0,
"eval_accuracy": 0.42548870043822257,
"eval_loss": 2.944140911102295,
"eval_runtime": 112.9581,
"eval_samples_per_second": 464.243,
"eval_steps_per_second": 7.259,
"step": 139887
},
{
"epoch": 9.007270153766969,
"grad_norm": 0.2379744052886963,
"learning_rate": 0.0006128810155633652,
"loss": 2.6206,
"step": 140000
},
{
"epoch": 9.071607797722447,
"grad_norm": 0.30487170815467834,
"learning_rate": 0.0006092985727605251,
"loss": 2.5374,
"step": 141000
},
{
"epoch": 9.135945441677926,
"grad_norm": 0.2142684906721115,
"learning_rate": 0.0006057125439288532,
"loss": 2.5544,
"step": 142000
},
{
"epoch": 9.200283085633403,
"grad_norm": 0.21194174885749817,
"learning_rate": 0.0006021265150971814,
"loss": 2.56,
"step": 143000
},
{
"epoch": 9.264620729588882,
"grad_norm": 0.2444310337305069,
"learning_rate": 0.0005985440722943413,
"loss": 2.5591,
"step": 144000
},
{
"epoch": 9.32895837354436,
"grad_norm": 0.2151477187871933,
"learning_rate": 0.0005949580434626695,
"loss": 2.5717,
"step": 145000
},
{
"epoch": 9.39329601749984,
"grad_norm": 0.20657171308994293,
"learning_rate": 0.000591379186688661,
"loss": 2.5773,
"step": 146000
},
{
"epoch": 9.457633661455317,
"grad_norm": 0.23294700682163239,
"learning_rate": 0.0005877931578569892,
"loss": 2.5796,
"step": 147000
},
{
"epoch": 9.521971305410796,
"grad_norm": 0.22148990631103516,
"learning_rate": 0.0005842071290253173,
"loss": 2.5794,
"step": 148000
},
{
"epoch": 9.586308949366273,
"grad_norm": 0.23063485324382782,
"learning_rate": 0.0005806211001936455,
"loss": 2.5883,
"step": 149000
},
{
"epoch": 9.650646593321753,
"grad_norm": 0.2963120937347412,
"learning_rate": 0.0005770350713619738,
"loss": 2.591,
"step": 150000
},
{
"epoch": 9.71498423727723,
"grad_norm": 0.22468186914920807,
"learning_rate": 0.0005734526285591336,
"loss": 2.59,
"step": 151000
},
{
"epoch": 9.77932188123271,
"grad_norm": 0.2075439840555191,
"learning_rate": 0.0005698665997274618,
"loss": 2.5922,
"step": 152000
},
{
"epoch": 9.843659525188187,
"grad_norm": 0.26252982020378113,
"learning_rate": 0.00056628057089579,
"loss": 2.5897,
"step": 153000
},
{
"epoch": 9.907997169143666,
"grad_norm": 0.23882432281970978,
"learning_rate": 0.0005626945420641182,
"loss": 2.6002,
"step": 154000
},
{
"epoch": 9.972334813099144,
"grad_norm": 0.23926019668579102,
"learning_rate": 0.0005591156852901097,
"loss": 2.5991,
"step": 155000
},
{
"epoch": 10.0,
"eval_accuracy": 0.42581489956776,
"eval_loss": 2.9460246562957764,
"eval_runtime": 113.0461,
"eval_samples_per_second": 463.882,
"eval_steps_per_second": 7.254,
"step": 155430
},
{
"epoch": 10.036672457054623,
"grad_norm": 0.2680639326572418,
"learning_rate": 0.0005555296564584379,
"loss": 2.5553,
"step": 156000
},
{
"epoch": 10.1010101010101,
"grad_norm": 0.2323896884918213,
"learning_rate": 0.000551943627626766,
"loss": 2.5132,
"step": 157000
},
{
"epoch": 10.16534774496558,
"grad_norm": 0.22753171622753143,
"learning_rate": 0.0005483611848239259,
"loss": 2.522,
"step": 158000
},
{
"epoch": 10.229685388921057,
"grad_norm": 0.27533870935440063,
"learning_rate": 0.000544778742021086,
"loss": 2.531,
"step": 159000
},
{
"epoch": 10.294023032876536,
"grad_norm": 0.23533160984516144,
"learning_rate": 0.0005411927131894141,
"loss": 2.5402,
"step": 160000
},
{
"epoch": 10.358360676832014,
"grad_norm": 0.2618332505226135,
"learning_rate": 0.0005376066843577423,
"loss": 2.5426,
"step": 161000
},
{
"epoch": 10.422698320787493,
"grad_norm": 0.23586297035217285,
"learning_rate": 0.0005340206555260705,
"loss": 2.5444,
"step": 162000
},
{
"epoch": 10.48703596474297,
"grad_norm": 0.24014532566070557,
"learning_rate": 0.0005304382127232304,
"loss": 2.5485,
"step": 163000
},
{
"epoch": 10.55137360869845,
"grad_norm": 0.23357678949832916,
"learning_rate": 0.0005268521838915585,
"loss": 2.553,
"step": 164000
},
{
"epoch": 10.615711252653927,
"grad_norm": 0.22992868721485138,
"learning_rate": 0.0005232661550598867,
"loss": 2.552,
"step": 165000
},
{
"epoch": 10.680048896609406,
"grad_norm": 0.2423822432756424,
"learning_rate": 0.000519680126228215,
"loss": 2.5605,
"step": 166000
},
{
"epoch": 10.744386540564884,
"grad_norm": 0.2182675153017044,
"learning_rate": 0.0005160976834253748,
"loss": 2.5612,
"step": 167000
},
{
"epoch": 10.808724184520363,
"grad_norm": 0.21750390529632568,
"learning_rate": 0.0005125116545937029,
"loss": 2.5638,
"step": 168000
},
{
"epoch": 10.87306182847584,
"grad_norm": 0.22003532946109772,
"learning_rate": 0.0005089292117908628,
"loss": 2.5725,
"step": 169000
},
{
"epoch": 10.93739947243132,
"grad_norm": 0.26017525792121887,
"learning_rate": 0.000505343182959191,
"loss": 2.5731,
"step": 170000
},
{
"epoch": 11.0,
"eval_accuracy": 0.42594449679185176,
"eval_loss": 2.949589490890503,
"eval_runtime": 112.9982,
"eval_samples_per_second": 464.078,
"eval_steps_per_second": 7.257,
"step": 170973
},
{
"epoch": 11.001737116386797,
"grad_norm": 0.25224384665489197,
"learning_rate": 0.0005017607401563509,
"loss": 2.5678,
"step": 171000
},
{
"epoch": 11.066074760342277,
"grad_norm": 0.24872563779354095,
"learning_rate": 0.0004981747113246791,
"loss": 2.4739,
"step": 172000
},
{
"epoch": 11.130412404297754,
"grad_norm": 0.24027609825134277,
"learning_rate": 0.000494592268521839,
"loss": 2.4942,
"step": 173000
},
{
"epoch": 11.194750048253233,
"grad_norm": 0.26473268866539,
"learning_rate": 0.0004910098257189988,
"loss": 2.5027,
"step": 174000
},
{
"epoch": 11.25908769220871,
"grad_norm": 0.2208300679922104,
"learning_rate": 0.00048742379688732697,
"loss": 2.5059,
"step": 175000
},
{
"epoch": 11.32342533616419,
"grad_norm": 0.26882168650627136,
"learning_rate": 0.00048383776805565514,
"loss": 2.5106,
"step": 176000
},
{
"epoch": 11.387762980119668,
"grad_norm": 0.2618742287158966,
"learning_rate": 0.00048025173922398336,
"loss": 2.5153,
"step": 177000
},
{
"epoch": 11.452100624075147,
"grad_norm": 0.2615552842617035,
"learning_rate": 0.0004766657103923115,
"loss": 2.5264,
"step": 178000
},
{
"epoch": 11.516438268030624,
"grad_norm": 0.2439650595188141,
"learning_rate": 0.00047307968156063974,
"loss": 2.5245,
"step": 179000
},
{
"epoch": 11.580775911986104,
"grad_norm": 0.24545331299304962,
"learning_rate": 0.00046949723875779964,
"loss": 2.5316,
"step": 180000
},
{
"epoch": 11.645113555941581,
"grad_norm": 0.2467048317193985,
"learning_rate": 0.0004659183819837912,
"loss": 2.5272,
"step": 181000
},
{
"epoch": 11.70945119989706,
"grad_norm": 0.23762260377407074,
"learning_rate": 0.00046233235315211934,
"loss": 2.5375,
"step": 182000
},
{
"epoch": 11.773788843852538,
"grad_norm": 0.26846447587013245,
"learning_rate": 0.00045874632432044756,
"loss": 2.5393,
"step": 183000
},
{
"epoch": 11.838126487808017,
"grad_norm": 0.2588064968585968,
"learning_rate": 0.0004551602954887757,
"loss": 2.5375,
"step": 184000
},
{
"epoch": 11.902464131763494,
"grad_norm": 0.23051457107067108,
"learning_rate": 0.00045157426665710395,
"loss": 2.5374,
"step": 185000
},
{
"epoch": 11.966801775718974,
"grad_norm": 0.23640917241573334,
"learning_rate": 0.0004479882378254321,
"loss": 2.5387,
"step": 186000
},
{
"epoch": 12.0,
"eval_accuracy": 0.425906432748538,
"eval_loss": 2.9570860862731934,
"eval_runtime": 112.6715,
"eval_samples_per_second": 465.424,
"eval_steps_per_second": 7.278,
"step": 186516
},
{
"epoch": 12.031139419674451,
"grad_norm": 0.3415308892726898,
"learning_rate": 0.00044440579502259196,
"loss": 2.5002,
"step": 187000
},
{
"epoch": 12.09547706362993,
"grad_norm": 0.2506396472454071,
"learning_rate": 0.0004408197661909202,
"loss": 2.4645,
"step": 188000
},
{
"epoch": 12.159814707585408,
"grad_norm": 0.32716119289398193,
"learning_rate": 0.0004372337373592484,
"loss": 2.4685,
"step": 189000
},
{
"epoch": 12.224152351540887,
"grad_norm": 0.2909821569919586,
"learning_rate": 0.00043364770852757657,
"loss": 2.4784,
"step": 190000
},
{
"epoch": 12.288489995496365,
"grad_norm": 0.24312670528888702,
"learning_rate": 0.0004300688517535681,
"loss": 2.4878,
"step": 191000
},
{
"epoch": 12.352827639451844,
"grad_norm": 0.24336646497249603,
"learning_rate": 0.00042648282292189626,
"loss": 2.4843,
"step": 192000
},
{
"epoch": 12.417165283407321,
"grad_norm": 0.26876741647720337,
"learning_rate": 0.0004228967940902245,
"loss": 2.4887,
"step": 193000
},
{
"epoch": 12.4815029273628,
"grad_norm": 0.3456116318702698,
"learning_rate": 0.0004193143512873844,
"loss": 2.4934,
"step": 194000
},
{
"epoch": 12.545840571318278,
"grad_norm": 0.24459022283554077,
"learning_rate": 0.00041573190848454423,
"loss": 2.4995,
"step": 195000
},
{
"epoch": 12.610178215273757,
"grad_norm": 0.236145481467247,
"learning_rate": 0.0004121458796528724,
"loss": 2.5034,
"step": 196000
},
{
"epoch": 12.674515859229235,
"grad_norm": 0.26968687772750854,
"learning_rate": 0.0004085598508212006,
"loss": 2.5049,
"step": 197000
},
{
"epoch": 12.738853503184714,
"grad_norm": 0.24692216515541077,
"learning_rate": 0.00040497382198952884,
"loss": 2.5082,
"step": 198000
},
{
"epoch": 12.803191147140192,
"grad_norm": 0.2576483488082886,
"learning_rate": 0.000401387793157857,
"loss": 2.5093,
"step": 199000
},
{
"epoch": 12.86752879109567,
"grad_norm": 0.24602724611759186,
"learning_rate": 0.00039780535035501685,
"loss": 2.5098,
"step": 200000
},
{
"epoch": 12.931866435051148,
"grad_norm": 0.22797554731369019,
"learning_rate": 0.0003942229075521767,
"loss": 2.5143,
"step": 201000
},
{
"epoch": 12.996204079006628,
"grad_norm": 0.23557151854038239,
"learning_rate": 0.0003906368787205049,
"loss": 2.5106,
"step": 202000
},
{
"epoch": 13.0,
"eval_accuracy": 0.42556520243490226,
"eval_loss": 2.9726738929748535,
"eval_runtime": 112.8886,
"eval_samples_per_second": 464.529,
"eval_steps_per_second": 7.264,
"step": 202059
},
{
"epoch": 13.060541722962105,
"grad_norm": 0.2934684455394745,
"learning_rate": 0.0003870508498888331,
"loss": 2.438,
"step": 203000
},
{
"epoch": 13.124879366917584,
"grad_norm": 0.266162633895874,
"learning_rate": 0.0003834648210571613,
"loss": 2.4391,
"step": 204000
},
{
"epoch": 13.189217010873062,
"grad_norm": 0.2488025277853012,
"learning_rate": 0.00037988237825432115,
"loss": 2.4474,
"step": 205000
},
{
"epoch": 13.253554654828541,
"grad_norm": 0.2948077917098999,
"learning_rate": 0.0003762963494226493,
"loss": 2.4527,
"step": 206000
},
{
"epoch": 13.317892298784018,
"grad_norm": 0.2885175049304962,
"learning_rate": 0.00037271032059097754,
"loss": 2.466,
"step": 207000
},
{
"epoch": 13.382229942739498,
"grad_norm": 0.2502969801425934,
"learning_rate": 0.00036912787778813744,
"loss": 2.4656,
"step": 208000
},
{
"epoch": 13.446567586694975,
"grad_norm": 0.26107537746429443,
"learning_rate": 0.00036554184895646566,
"loss": 2.4676,
"step": 209000
},
{
"epoch": 13.510905230650454,
"grad_norm": 0.2713301181793213,
"learning_rate": 0.0003619594061536255,
"loss": 2.4736,
"step": 210000
},
{
"epoch": 13.575242874605932,
"grad_norm": 0.2778390347957611,
"learning_rate": 0.0003583733773219537,
"loss": 2.4732,
"step": 211000
},
{
"epoch": 13.63958051856141,
"grad_norm": 0.27868494391441345,
"learning_rate": 0.0003547873484902819,
"loss": 2.4785,
"step": 212000
},
{
"epoch": 13.703918162516889,
"grad_norm": 0.24166850745677948,
"learning_rate": 0.00035120131965861006,
"loss": 2.4829,
"step": 213000
},
{
"epoch": 13.768255806472368,
"grad_norm": 0.26391276717185974,
"learning_rate": 0.0003476152908269383,
"loss": 2.4861,
"step": 214000
},
{
"epoch": 13.832593450427845,
"grad_norm": 0.2995707392692566,
"learning_rate": 0.00034403284802409813,
"loss": 2.4851,
"step": 215000
},
{
"epoch": 13.896931094383323,
"grad_norm": 0.2664172947406769,
"learning_rate": 0.0003404468191924263,
"loss": 2.4851,
"step": 216000
},
{
"epoch": 13.961268738338802,
"grad_norm": 0.2712934613227844,
"learning_rate": 0.0003368607903607545,
"loss": 2.4846,
"step": 217000
},
{
"epoch": 14.0,
"eval_accuracy": 0.4257042221923094,
"eval_loss": 2.9749884605407715,
"eval_runtime": 113.0113,
"eval_samples_per_second": 464.024,
"eval_steps_per_second": 7.256,
"step": 217602
},
{
"epoch": 14.02560638229428,
"grad_norm": 0.2511956989765167,
"learning_rate": 0.00033327834755791436,
"loss": 2.4623,
"step": 218000
},
{
"epoch": 14.089944026249759,
"grad_norm": 0.2726948857307434,
"learning_rate": 0.0003296923187262426,
"loss": 2.42,
"step": 219000
},
{
"epoch": 14.154281670205236,
"grad_norm": 0.2592310309410095,
"learning_rate": 0.00032610987592340243,
"loss": 2.4247,
"step": 220000
},
{
"epoch": 14.218619314160716,
"grad_norm": 0.24364928901195526,
"learning_rate": 0.0003225238470917306,
"loss": 2.4293,
"step": 221000
},
{
"epoch": 14.282956958116193,
"grad_norm": 0.26270249485969543,
"learning_rate": 0.0003189414042888905,
"loss": 2.43,
"step": 222000
},
{
"epoch": 14.347294602071672,
"grad_norm": 0.24876274168491364,
"learning_rate": 0.0003153553754572187,
"loss": 2.4434,
"step": 223000
},
{
"epoch": 14.41163224602715,
"grad_norm": 0.27520838379859924,
"learning_rate": 0.0003117693466255469,
"loss": 2.4427,
"step": 224000
},
{
"epoch": 14.475969889982629,
"grad_norm": 0.28508856892585754,
"learning_rate": 0.0003081833177938751,
"loss": 2.4431,
"step": 225000
},
{
"epoch": 14.540307533938106,
"grad_norm": 0.26081016659736633,
"learning_rate": 0.00030460087499103495,
"loss": 2.4414,
"step": 226000
},
{
"epoch": 14.604645177893586,
"grad_norm": 0.24568462371826172,
"learning_rate": 0.0003010184321881948,
"loss": 2.455,
"step": 227000
},
{
"epoch": 14.668982821849063,
"grad_norm": 0.2919858694076538,
"learning_rate": 0.000297432403356523,
"loss": 2.4528,
"step": 228000
},
{
"epoch": 14.733320465804542,
"grad_norm": 0.2552844285964966,
"learning_rate": 0.00029384996055368287,
"loss": 2.4591,
"step": 229000
},
{
"epoch": 14.79765810976002,
"grad_norm": 0.24218083918094635,
"learning_rate": 0.00029026393172201104,
"loss": 2.4593,
"step": 230000
},
{
"epoch": 14.8619957537155,
"grad_norm": 0.2795206904411316,
"learning_rate": 0.00028667790289033926,
"loss": 2.4562,
"step": 231000
},
{
"epoch": 14.926333397670977,
"grad_norm": 0.24688765406608582,
"learning_rate": 0.0002830918740586674,
"loss": 2.4526,
"step": 232000
},
{
"epoch": 14.990671041626456,
"grad_norm": 0.26866546273231506,
"learning_rate": 0.00027950584522699564,
"loss": 2.4638,
"step": 233000
},
{
"epoch": 15.0,
"eval_accuracy": 0.4250153303121401,
"eval_loss": 2.985236644744873,
"eval_runtime": 112.77,
"eval_samples_per_second": 465.017,
"eval_steps_per_second": 7.271,
"step": 233145
},
{
"epoch": 15.055008685581933,
"grad_norm": 0.2907277047634125,
"learning_rate": 0.0002759198163953238,
"loss": 2.4022,
"step": 234000
},
{
"epoch": 15.119346329537413,
"grad_norm": 0.28493550419807434,
"learning_rate": 0.0002723409596213154,
"loss": 2.3966,
"step": 235000
},
{
"epoch": 15.18368397349289,
"grad_norm": 0.2639086842536926,
"learning_rate": 0.00026875493078964356,
"loss": 2.4034,
"step": 236000
},
{
"epoch": 15.24802161744837,
"grad_norm": 0.2657439708709717,
"learning_rate": 0.0002651689019579717,
"loss": 2.4088,
"step": 237000
},
{
"epoch": 15.312359261403847,
"grad_norm": 0.2811715602874756,
"learning_rate": 0.0002615828731262999,
"loss": 2.416,
"step": 238000
},
{
"epoch": 15.376696905359326,
"grad_norm": 0.2565077245235443,
"learning_rate": 0.00025800043032345985,
"loss": 2.4167,
"step": 239000
},
{
"epoch": 15.441034549314804,
"grad_norm": 0.2618282735347748,
"learning_rate": 0.000254414401491788,
"loss": 2.4197,
"step": 240000
},
{
"epoch": 15.505372193270283,
"grad_norm": 0.33380481600761414,
"learning_rate": 0.00025083195868894786,
"loss": 2.4218,
"step": 241000
},
{
"epoch": 15.56970983722576,
"grad_norm": 0.28201743960380554,
"learning_rate": 0.0002472459298572761,
"loss": 2.4304,
"step": 242000
},
{
"epoch": 15.63404748118124,
"grad_norm": 0.24747717380523682,
"learning_rate": 0.00024366348705443593,
"loss": 2.4276,
"step": 243000
},
{
"epoch": 15.698385125136717,
"grad_norm": 0.31805720925331116,
"learning_rate": 0.00024007745822276412,
"loss": 2.4288,
"step": 244000
},
{
"epoch": 15.762722769092196,
"grad_norm": 0.28187525272369385,
"learning_rate": 0.0002364914293910923,
"loss": 2.439,
"step": 245000
},
{
"epoch": 15.827060413047674,
"grad_norm": 0.26413655281066895,
"learning_rate": 0.0002329089865882522,
"loss": 2.4329,
"step": 246000
},
{
"epoch": 15.891398057003153,
"grad_norm": 0.28774192929267883,
"learning_rate": 0.00022932295775658038,
"loss": 2.436,
"step": 247000
},
{
"epoch": 15.95573570095863,
"grad_norm": 0.3739126920700073,
"learning_rate": 0.00022574051495374023,
"loss": 2.4351,
"step": 248000
},
{
"epoch": 16.0,
"eval_accuracy": 0.42470191890638787,
"eval_loss": 2.996845006942749,
"eval_runtime": 112.9222,
"eval_samples_per_second": 464.39,
"eval_steps_per_second": 7.262,
"step": 248688
},
{
"epoch": 16.02007334491411,
"grad_norm": 0.2598513662815094,
"learning_rate": 0.00022215448612206842,
"loss": 2.4157,
"step": 249000
},
{
"epoch": 16.08441098886959,
"grad_norm": 0.2784959673881531,
"learning_rate": 0.00021856845729039662,
"loss": 2.3753,
"step": 250000
},
{
"epoch": 16.148748632825065,
"grad_norm": 0.26741138100624084,
"learning_rate": 0.00021498601448755646,
"loss": 2.383,
"step": 251000
},
{
"epoch": 16.213086276780544,
"grad_norm": 0.2786239981651306,
"learning_rate": 0.00021139998565588466,
"loss": 2.3848,
"step": 252000
},
{
"epoch": 16.277423920736023,
"grad_norm": 0.2728557586669922,
"learning_rate": 0.00020781395682421288,
"loss": 2.3863,
"step": 253000
},
{
"epoch": 16.341761564691502,
"grad_norm": 0.2834782898426056,
"learning_rate": 0.00020422792799254107,
"loss": 2.3946,
"step": 254000
},
{
"epoch": 16.406099208646978,
"grad_norm": 0.2955917418003082,
"learning_rate": 0.0002006490712185326,
"loss": 2.3936,
"step": 255000
},
{
"epoch": 16.470436852602457,
"grad_norm": 0.27384132146835327,
"learning_rate": 0.0001970630423868608,
"loss": 2.3991,
"step": 256000
},
{
"epoch": 16.534774496557937,
"grad_norm": 0.2943706512451172,
"learning_rate": 0.00019347701355518899,
"loss": 2.4013,
"step": 257000
},
{
"epoch": 16.599112140513416,
"grad_norm": 0.263576477766037,
"learning_rate": 0.00018989457075234886,
"loss": 2.4069,
"step": 258000
},
{
"epoch": 16.66344978446889,
"grad_norm": 0.2836902141571045,
"learning_rate": 0.00018630854192067703,
"loss": 2.4024,
"step": 259000
},
{
"epoch": 16.72778742842437,
"grad_norm": 0.30342864990234375,
"learning_rate": 0.00018272251308900522,
"loss": 2.4049,
"step": 260000
},
{
"epoch": 16.79212507237985,
"grad_norm": 0.28382593393325806,
"learning_rate": 0.0001791400702861651,
"loss": 2.4104,
"step": 261000
},
{
"epoch": 16.85646271633533,
"grad_norm": 0.30956143140792847,
"learning_rate": 0.00017555404145449331,
"loss": 2.4126,
"step": 262000
},
{
"epoch": 16.920800360290805,
"grad_norm": 0.29086652398109436,
"learning_rate": 0.0001719680126228215,
"loss": 2.4128,
"step": 263000
},
{
"epoch": 16.985138004246284,
"grad_norm": 0.29363515973091125,
"learning_rate": 0.0001683819837911497,
"loss": 2.4119,
"step": 264000
},
{
"epoch": 17.0,
"eval_accuracy": 0.42441498033233127,
"eval_loss": 3.0043680667877197,
"eval_runtime": 112.7018,
"eval_samples_per_second": 465.299,
"eval_steps_per_second": 7.276,
"step": 264231
},
{
"epoch": 17.049475648201764,
"grad_norm": 0.2627195119857788,
"learning_rate": 0.00016479954098830955,
"loss": 2.3717,
"step": 265000
},
{
"epoch": 17.113813292157243,
"grad_norm": 0.31472471356391907,
"learning_rate": 0.00016121709818546942,
"loss": 2.3624,
"step": 266000
},
{
"epoch": 17.17815093611272,
"grad_norm": 0.30606958270072937,
"learning_rate": 0.00015763106935379762,
"loss": 2.3688,
"step": 267000
},
{
"epoch": 17.242488580068198,
"grad_norm": 0.3092518448829651,
"learning_rate": 0.00015404862655095746,
"loss": 2.3707,
"step": 268000
},
{
"epoch": 17.306826224023677,
"grad_norm": 0.29389309883117676,
"learning_rate": 0.00015046259771928566,
"loss": 2.3684,
"step": 269000
},
{
"epoch": 17.371163867979156,
"grad_norm": 0.26194649934768677,
"learning_rate": 0.00014687656888761385,
"loss": 2.3751,
"step": 270000
},
{
"epoch": 17.435501511934632,
"grad_norm": 0.32368776202201843,
"learning_rate": 0.00014329054005594204,
"loss": 2.3746,
"step": 271000
},
{
"epoch": 17.49983915589011,
"grad_norm": 0.2756269872188568,
"learning_rate": 0.00013970809725310192,
"loss": 2.376,
"step": 272000
},
{
"epoch": 17.56417679984559,
"grad_norm": 0.32635217905044556,
"learning_rate": 0.0001361220684214301,
"loss": 2.3872,
"step": 273000
},
{
"epoch": 17.62851444380107,
"grad_norm": 0.31273502111434937,
"learning_rate": 0.00013253962561859,
"loss": 2.3812,
"step": 274000
},
{
"epoch": 17.692852087756545,
"grad_norm": 0.28867122530937195,
"learning_rate": 0.00012895359678691818,
"loss": 2.3852,
"step": 275000
},
{
"epoch": 17.757189731712025,
"grad_norm": 0.2799566984176636,
"learning_rate": 0.00012536756795524637,
"loss": 2.3825,
"step": 276000
},
{
"epoch": 17.821527375667504,
"grad_norm": 0.2807822525501251,
"learning_rate": 0.00012178153912357455,
"loss": 2.3795,
"step": 277000
},
{
"epoch": 17.885865019622983,
"grad_norm": 0.2853219211101532,
"learning_rate": 0.00011819909632073441,
"loss": 2.3846,
"step": 278000
},
{
"epoch": 17.95020266357846,
"grad_norm": 0.27752065658569336,
"learning_rate": 0.0001146130674890626,
"loss": 2.3894,
"step": 279000
},
{
"epoch": 18.0,
"eval_accuracy": 0.4241119636260301,
"eval_loss": 3.013193130493164,
"eval_runtime": 112.7432,
"eval_samples_per_second": 465.128,
"eval_steps_per_second": 7.273,
"step": 279774
},
{
"epoch": 18.014540307533938,
"grad_norm": 0.30354997515678406,
"learning_rate": 0.00011102703865739081,
"loss": 2.3766,
"step": 280000
},
{
"epoch": 18.078877951489417,
"grad_norm": 0.29635682702064514,
"learning_rate": 0.00010744459585455067,
"loss": 2.3432,
"step": 281000
},
{
"epoch": 18.143215595444893,
"grad_norm": 0.36005303263664246,
"learning_rate": 0.00010385856702287887,
"loss": 2.3509,
"step": 282000
},
{
"epoch": 18.207553239400372,
"grad_norm": 0.2774285674095154,
"learning_rate": 0.00010027253819120706,
"loss": 2.3499,
"step": 283000
},
{
"epoch": 18.27189088335585,
"grad_norm": 0.29461967945098877,
"learning_rate": 9.669009538836692e-05,
"loss": 2.3504,
"step": 284000
},
{
"epoch": 18.33622852731133,
"grad_norm": 0.29665911197662354,
"learning_rate": 9.310406655669513e-05,
"loss": 2.3559,
"step": 285000
},
{
"epoch": 18.400566171266806,
"grad_norm": 0.30571022629737854,
"learning_rate": 8.952162375385499e-05,
"loss": 2.3548,
"step": 286000
},
{
"epoch": 18.464903815222286,
"grad_norm": 0.2917598783969879,
"learning_rate": 8.593559492218318e-05,
"loss": 2.3527,
"step": 287000
},
{
"epoch": 18.529241459177765,
"grad_norm": 0.2724420130252838,
"learning_rate": 8.235315211934303e-05,
"loss": 2.3615,
"step": 288000
},
{
"epoch": 18.593579103133244,
"grad_norm": 0.2798316776752472,
"learning_rate": 7.876712328767124e-05,
"loss": 2.3656,
"step": 289000
},
{
"epoch": 18.65791674708872,
"grad_norm": 0.31976842880249023,
"learning_rate": 7.518109445599943e-05,
"loss": 2.3525,
"step": 290000
},
{
"epoch": 18.7222543910442,
"grad_norm": 0.28387773036956787,
"learning_rate": 7.159865165315929e-05,
"loss": 2.3598,
"step": 291000
},
{
"epoch": 18.78659203499968,
"grad_norm": 0.28037527203559875,
"learning_rate": 6.801262282148748e-05,
"loss": 2.3658,
"step": 292000
},
{
"epoch": 18.850929678955158,
"grad_norm": 0.2663221061229706,
"learning_rate": 6.443018001864735e-05,
"loss": 2.3658,
"step": 293000
},
{
"epoch": 18.915267322910633,
"grad_norm": 0.270049124956131,
"learning_rate": 6.0844151186975546e-05,
"loss": 2.3656,
"step": 294000
},
{
"epoch": 18.979604966866113,
"grad_norm": 0.26364874839782715,
"learning_rate": 5.725812235530374e-05,
"loss": 2.3569,
"step": 295000
},
{
"epoch": 19.0,
"eval_accuracy": 0.4237278832204125,
"eval_loss": 3.027001142501831,
"eval_runtime": 112.7341,
"eval_samples_per_second": 465.165,
"eval_steps_per_second": 7.274,
"step": 295317
},
{
"epoch": 19.043942610821592,
"grad_norm": 0.29353857040405273,
"learning_rate": 5.367209352363193e-05,
"loss": 2.3409,
"step": 296000
},
{
"epoch": 19.10828025477707,
"grad_norm": 0.2566602826118469,
"learning_rate": 5.008965072079179e-05,
"loss": 2.3371,
"step": 297000
},
{
"epoch": 19.172617898732547,
"grad_norm": 0.2797909677028656,
"learning_rate": 4.650362188911999e-05,
"loss": 2.3365,
"step": 298000
},
{
"epoch": 19.236955542688026,
"grad_norm": 0.32513269782066345,
"learning_rate": 4.2921179086279854e-05,
"loss": 2.3378,
"step": 299000
},
{
"epoch": 19.301293186643505,
"grad_norm": 0.3171839416027069,
"learning_rate": 3.933873628343972e-05,
"loss": 2.3325,
"step": 300000
},
{
"epoch": 19.365630830598985,
"grad_norm": 0.3085026443004608,
"learning_rate": 3.575270745176791e-05,
"loss": 2.3356,
"step": 301000
},
{
"epoch": 19.42996847455446,
"grad_norm": 0.32660767436027527,
"learning_rate": 3.21666786200961e-05,
"loss": 2.3407,
"step": 302000
},
{
"epoch": 19.49430611850994,
"grad_norm": 0.3034733235836029,
"learning_rate": 2.8580649788424298e-05,
"loss": 2.3404,
"step": 303000
},
{
"epoch": 19.55864376246542,
"grad_norm": 0.277503103017807,
"learning_rate": 2.4998206985584166e-05,
"loss": 2.3407,
"step": 304000
},
{
"epoch": 19.622981406420898,
"grad_norm": 0.3372737467288971,
"learning_rate": 2.141217815391236e-05,
"loss": 2.339,
"step": 305000
},
{
"epoch": 19.687319050376374,
"grad_norm": 0.3079584240913391,
"learning_rate": 1.7826149322240552e-05,
"loss": 2.3399,
"step": 306000
},
{
"epoch": 19.751656694331853,
"grad_norm": 0.2789517939090729,
"learning_rate": 1.4243706519400415e-05,
"loss": 2.3396,
"step": 307000
},
{
"epoch": 19.815994338287332,
"grad_norm": 0.2706036865711212,
"learning_rate": 1.065767768772861e-05,
"loss": 2.34,
"step": 308000
},
{
"epoch": 19.88033198224281,
"grad_norm": 0.2812829911708832,
"learning_rate": 7.075234884888475e-06,
"loss": 2.3404,
"step": 309000
},
{
"epoch": 19.944669626198287,
"grad_norm": 0.2975556254386902,
"learning_rate": 3.489206053216668e-06,
"loss": 2.3375,
"step": 310000
},
{
"epoch": 20.0,
"eval_accuracy": 0.4232865945768086,
"eval_loss": 3.037959337234497,
"eval_runtime": 112.7925,
"eval_samples_per_second": 464.925,
"eval_steps_per_second": 7.27,
"step": 310860
},
{
"epoch": 20.0,
"step": 310860,
"total_flos": 1.29957250203648e+18,
"train_loss": 2.656636161733025,
"train_runtime": 44071.5037,
"train_samples_per_second": 225.708,
"train_steps_per_second": 7.054
}
],
"logging_steps": 1000,
"max_steps": 310860,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.29957250203648e+18,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}