google-gemma-2b-1727403360 / trainer_state.json
ManyingZ's picture
Upload folder using huggingface_hub
b23f089 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 28.67383512544803,
"eval_steps": 500,
"global_step": 4000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.14336917562724014,
"grad_norm": 0.8701110482215881,
"learning_rate": 4e-05,
"loss": 2.5146,
"step": 20
},
{
"epoch": 0.2867383512544803,
"grad_norm": 1.018184781074524,
"learning_rate": 8e-05,
"loss": 2.4311,
"step": 40
},
{
"epoch": 0.43010752688172044,
"grad_norm": 3.042095184326172,
"learning_rate": 0.00012,
"loss": 2.1109,
"step": 60
},
{
"epoch": 0.5734767025089605,
"grad_norm": 0.8230174779891968,
"learning_rate": 0.00016,
"loss": 1.8722,
"step": 80
},
{
"epoch": 0.7168458781362007,
"grad_norm": 0.8457050323486328,
"learning_rate": 0.0002,
"loss": 1.6615,
"step": 100
},
{
"epoch": 0.8602150537634409,
"grad_norm": 0.7940322160720825,
"learning_rate": 0.00019988455988455988,
"loss": 1.575,
"step": 120
},
{
"epoch": 1.003584229390681,
"grad_norm": 1.0075829029083252,
"learning_rate": 0.00019976911976911978,
"loss": 1.4759,
"step": 140
},
{
"epoch": 1.146953405017921,
"grad_norm": 0.8754940629005432,
"learning_rate": 0.00019965367965367966,
"loss": 1.4484,
"step": 160
},
{
"epoch": 1.2903225806451613,
"grad_norm": 1.2001057863235474,
"learning_rate": 0.00019953823953823956,
"loss": 1.4163,
"step": 180
},
{
"epoch": 1.4336917562724014,
"grad_norm": 1.1393181085586548,
"learning_rate": 0.00019942279942279943,
"loss": 1.4249,
"step": 200
},
{
"epoch": 1.5770609318996416,
"grad_norm": 1.426618218421936,
"learning_rate": 0.0001993073593073593,
"loss": 1.4,
"step": 220
},
{
"epoch": 1.7204301075268817,
"grad_norm": 1.5125058889389038,
"learning_rate": 0.0001991919191919192,
"loss": 1.4044,
"step": 240
},
{
"epoch": 1.863799283154122,
"grad_norm": 1.490069031715393,
"learning_rate": 0.00019907647907647908,
"loss": 1.3274,
"step": 260
},
{
"epoch": 2.007168458781362,
"grad_norm": 1.1766711473464966,
"learning_rate": 0.00019896103896103898,
"loss": 1.4013,
"step": 280
},
{
"epoch": 2.150537634408602,
"grad_norm": 1.7495911121368408,
"learning_rate": 0.00019884559884559885,
"loss": 1.3134,
"step": 300
},
{
"epoch": 2.293906810035842,
"grad_norm": 1.4539434909820557,
"learning_rate": 0.00019873015873015875,
"loss": 1.3187,
"step": 320
},
{
"epoch": 2.4372759856630823,
"grad_norm": 1.6635984182357788,
"learning_rate": 0.00019861471861471863,
"loss": 1.2624,
"step": 340
},
{
"epoch": 2.5806451612903225,
"grad_norm": 2.0392329692840576,
"learning_rate": 0.00019849927849927853,
"loss": 1.2672,
"step": 360
},
{
"epoch": 2.7240143369175627,
"grad_norm": 1.4355524778366089,
"learning_rate": 0.00019838383838383837,
"loss": 1.2838,
"step": 380
},
{
"epoch": 2.867383512544803,
"grad_norm": 1.7227002382278442,
"learning_rate": 0.00019826839826839827,
"loss": 1.2552,
"step": 400
},
{
"epoch": 3.010752688172043,
"grad_norm": 1.666508436203003,
"learning_rate": 0.00019815295815295815,
"loss": 1.2805,
"step": 420
},
{
"epoch": 3.154121863799283,
"grad_norm": 2.0044264793395996,
"learning_rate": 0.00019803751803751805,
"loss": 1.1947,
"step": 440
},
{
"epoch": 3.2974910394265233,
"grad_norm": 2.1593244075775146,
"learning_rate": 0.00019792207792207792,
"loss": 1.2006,
"step": 460
},
{
"epoch": 3.4408602150537635,
"grad_norm": 2.0706088542938232,
"learning_rate": 0.00019780663780663782,
"loss": 1.2535,
"step": 480
},
{
"epoch": 3.5842293906810037,
"grad_norm": 2.375523567199707,
"learning_rate": 0.0001976911976911977,
"loss": 1.1777,
"step": 500
},
{
"epoch": 3.727598566308244,
"grad_norm": 2.0333664417266846,
"learning_rate": 0.0001975757575757576,
"loss": 1.2199,
"step": 520
},
{
"epoch": 3.870967741935484,
"grad_norm": 2.0638692378997803,
"learning_rate": 0.00019746031746031747,
"loss": 1.241,
"step": 540
},
{
"epoch": 4.014336917562724,
"grad_norm": 1.829522728919983,
"learning_rate": 0.00019734487734487734,
"loss": 1.1603,
"step": 560
},
{
"epoch": 4.157706093189964,
"grad_norm": 2.5862321853637695,
"learning_rate": 0.00019722943722943722,
"loss": 1.1392,
"step": 580
},
{
"epoch": 4.301075268817204,
"grad_norm": 2.028472423553467,
"learning_rate": 0.00019711399711399712,
"loss": 1.078,
"step": 600
},
{
"epoch": 4.444444444444445,
"grad_norm": 1.7103493213653564,
"learning_rate": 0.000196998556998557,
"loss": 1.1751,
"step": 620
},
{
"epoch": 4.587813620071684,
"grad_norm": 2.320739269256592,
"learning_rate": 0.0001968831168831169,
"loss": 1.1617,
"step": 640
},
{
"epoch": 4.731182795698925,
"grad_norm": 2.6019182205200195,
"learning_rate": 0.00019676767676767677,
"loss": 1.1775,
"step": 660
},
{
"epoch": 4.874551971326165,
"grad_norm": 2.342106342315674,
"learning_rate": 0.00019665223665223667,
"loss": 1.0605,
"step": 680
},
{
"epoch": 5.017921146953405,
"grad_norm": 2.3113956451416016,
"learning_rate": 0.00019653679653679654,
"loss": 1.1429,
"step": 700
},
{
"epoch": 5.161290322580645,
"grad_norm": 2.171203851699829,
"learning_rate": 0.00019642135642135644,
"loss": 1.0781,
"step": 720
},
{
"epoch": 5.304659498207886,
"grad_norm": 2.2198007106781006,
"learning_rate": 0.00019630591630591631,
"loss": 1.0734,
"step": 740
},
{
"epoch": 5.448028673835125,
"grad_norm": 2.1445112228393555,
"learning_rate": 0.0001961904761904762,
"loss": 1.0354,
"step": 760
},
{
"epoch": 5.591397849462366,
"grad_norm": 2.0999436378479004,
"learning_rate": 0.0001960750360750361,
"loss": 1.0931,
"step": 780
},
{
"epoch": 5.734767025089606,
"grad_norm": 2.395735740661621,
"learning_rate": 0.00019595959595959596,
"loss": 1.1055,
"step": 800
},
{
"epoch": 5.878136200716845,
"grad_norm": 2.327645778656006,
"learning_rate": 0.00019584415584415586,
"loss": 1.1219,
"step": 820
},
{
"epoch": 6.021505376344086,
"grad_norm": 2.3551368713378906,
"learning_rate": 0.00019572871572871574,
"loss": 1.0061,
"step": 840
},
{
"epoch": 6.164874551971327,
"grad_norm": 2.9338879585266113,
"learning_rate": 0.00019561327561327564,
"loss": 0.9606,
"step": 860
},
{
"epoch": 6.308243727598566,
"grad_norm": 2.6095728874206543,
"learning_rate": 0.0001954978354978355,
"loss": 0.9917,
"step": 880
},
{
"epoch": 6.451612903225806,
"grad_norm": 2.9128003120422363,
"learning_rate": 0.0001953823953823954,
"loss": 1.0127,
"step": 900
},
{
"epoch": 6.594982078853047,
"grad_norm": 2.1806392669677734,
"learning_rate": 0.00019526695526695528,
"loss": 1.0646,
"step": 920
},
{
"epoch": 6.738351254480286,
"grad_norm": 2.0614748001098633,
"learning_rate": 0.00019515151515151516,
"loss": 1.0206,
"step": 940
},
{
"epoch": 6.881720430107527,
"grad_norm": 3.0712902545928955,
"learning_rate": 0.00019503607503607503,
"loss": 1.044,
"step": 960
},
{
"epoch": 7.025089605734767,
"grad_norm": 2.756150960922241,
"learning_rate": 0.00019492063492063493,
"loss": 1.0001,
"step": 980
},
{
"epoch": 7.168458781362007,
"grad_norm": 2.4070088863372803,
"learning_rate": 0.0001948051948051948,
"loss": 1.0062,
"step": 1000
},
{
"epoch": 7.311827956989247,
"grad_norm": 3.246767520904541,
"learning_rate": 0.0001946897546897547,
"loss": 0.9447,
"step": 1020
},
{
"epoch": 7.455197132616488,
"grad_norm": 2.817984104156494,
"learning_rate": 0.00019457431457431458,
"loss": 0.9825,
"step": 1040
},
{
"epoch": 7.598566308243727,
"grad_norm": 2.8855531215667725,
"learning_rate": 0.00019445887445887448,
"loss": 0.9889,
"step": 1060
},
{
"epoch": 7.741935483870968,
"grad_norm": 3.1939897537231445,
"learning_rate": 0.00019434343434343435,
"loss": 0.9244,
"step": 1080
},
{
"epoch": 7.885304659498208,
"grad_norm": 2.6888437271118164,
"learning_rate": 0.00019422799422799425,
"loss": 0.932,
"step": 1100
},
{
"epoch": 8.028673835125447,
"grad_norm": 2.6523380279541016,
"learning_rate": 0.0001941125541125541,
"loss": 0.9283,
"step": 1120
},
{
"epoch": 8.172043010752688,
"grad_norm": 3.123074769973755,
"learning_rate": 0.000193997113997114,
"loss": 0.9142,
"step": 1140
},
{
"epoch": 8.315412186379929,
"grad_norm": 2.7300188541412354,
"learning_rate": 0.00019388167388167387,
"loss": 0.9039,
"step": 1160
},
{
"epoch": 8.45878136200717,
"grad_norm": 2.889723300933838,
"learning_rate": 0.00019376623376623377,
"loss": 0.9093,
"step": 1180
},
{
"epoch": 8.602150537634408,
"grad_norm": 2.390486717224121,
"learning_rate": 0.00019365079365079365,
"loss": 0.9247,
"step": 1200
},
{
"epoch": 8.745519713261649,
"grad_norm": 2.97426700592041,
"learning_rate": 0.00019353535353535355,
"loss": 0.8887,
"step": 1220
},
{
"epoch": 8.88888888888889,
"grad_norm": 3.086162805557251,
"learning_rate": 0.00019341991341991342,
"loss": 0.9477,
"step": 1240
},
{
"epoch": 9.03225806451613,
"grad_norm": 4.036062240600586,
"learning_rate": 0.00019330447330447332,
"loss": 0.8978,
"step": 1260
},
{
"epoch": 9.175627240143369,
"grad_norm": 3.6827592849731445,
"learning_rate": 0.0001931890331890332,
"loss": 0.8826,
"step": 1280
},
{
"epoch": 9.31899641577061,
"grad_norm": 2.9294958114624023,
"learning_rate": 0.00019307359307359307,
"loss": 0.8367,
"step": 1300
},
{
"epoch": 9.46236559139785,
"grad_norm": 2.873551607131958,
"learning_rate": 0.00019295815295815297,
"loss": 0.8517,
"step": 1320
},
{
"epoch": 9.60573476702509,
"grad_norm": 2.599212884902954,
"learning_rate": 0.00019284271284271284,
"loss": 0.8754,
"step": 1340
},
{
"epoch": 9.74910394265233,
"grad_norm": 3.3076424598693848,
"learning_rate": 0.00019272727272727274,
"loss": 0.8371,
"step": 1360
},
{
"epoch": 9.89247311827957,
"grad_norm": 3.347135066986084,
"learning_rate": 0.00019261183261183262,
"loss": 0.8566,
"step": 1380
},
{
"epoch": 10.03584229390681,
"grad_norm": 3.3262321949005127,
"learning_rate": 0.00019249639249639252,
"loss": 0.8665,
"step": 1400
},
{
"epoch": 10.17921146953405,
"grad_norm": 3.3654842376708984,
"learning_rate": 0.0001923809523809524,
"loss": 0.8058,
"step": 1420
},
{
"epoch": 10.32258064516129,
"grad_norm": 3.437192678451538,
"learning_rate": 0.0001922655122655123,
"loss": 0.8227,
"step": 1440
},
{
"epoch": 10.46594982078853,
"grad_norm": 3.4953198432922363,
"learning_rate": 0.00019215007215007217,
"loss": 0.8112,
"step": 1460
},
{
"epoch": 10.609318996415771,
"grad_norm": 3.1842894554138184,
"learning_rate": 0.00019203463203463204,
"loss": 0.7906,
"step": 1480
},
{
"epoch": 10.75268817204301,
"grad_norm": 3.4176406860351562,
"learning_rate": 0.00019191919191919191,
"loss": 0.8452,
"step": 1500
},
{
"epoch": 10.89605734767025,
"grad_norm": 2.7511439323425293,
"learning_rate": 0.00019180375180375181,
"loss": 0.8233,
"step": 1520
},
{
"epoch": 11.039426523297491,
"grad_norm": 3.739851236343384,
"learning_rate": 0.0001916883116883117,
"loss": 0.8293,
"step": 1540
},
{
"epoch": 11.182795698924732,
"grad_norm": 4.09880256652832,
"learning_rate": 0.0001915728715728716,
"loss": 0.7689,
"step": 1560
},
{
"epoch": 11.32616487455197,
"grad_norm": 3.835360288619995,
"learning_rate": 0.00019145743145743146,
"loss": 0.7774,
"step": 1580
},
{
"epoch": 11.469534050179211,
"grad_norm": 4.158806324005127,
"learning_rate": 0.00019134199134199136,
"loss": 0.7507,
"step": 1600
},
{
"epoch": 11.612903225806452,
"grad_norm": 3.1276865005493164,
"learning_rate": 0.00019122655122655124,
"loss": 0.8105,
"step": 1620
},
{
"epoch": 11.756272401433693,
"grad_norm": 3.1346616744995117,
"learning_rate": 0.00019111111111111114,
"loss": 0.78,
"step": 1640
},
{
"epoch": 11.899641577060931,
"grad_norm": 3.3458778858184814,
"learning_rate": 0.00019099567099567098,
"loss": 0.7711,
"step": 1660
},
{
"epoch": 12.043010752688172,
"grad_norm": 3.1722230911254883,
"learning_rate": 0.00019088023088023088,
"loss": 0.7746,
"step": 1680
},
{
"epoch": 12.186379928315413,
"grad_norm": 4.433016777038574,
"learning_rate": 0.00019076479076479076,
"loss": 0.7556,
"step": 1700
},
{
"epoch": 12.329749103942653,
"grad_norm": 3.0202817916870117,
"learning_rate": 0.00019064935064935066,
"loss": 0.7333,
"step": 1720
},
{
"epoch": 12.473118279569892,
"grad_norm": 3.66214656829834,
"learning_rate": 0.00019053391053391053,
"loss": 0.7353,
"step": 1740
},
{
"epoch": 12.616487455197133,
"grad_norm": 3.635371446609497,
"learning_rate": 0.00019041847041847043,
"loss": 0.7186,
"step": 1760
},
{
"epoch": 12.759856630824373,
"grad_norm": 3.1196370124816895,
"learning_rate": 0.0001903030303030303,
"loss": 0.7959,
"step": 1780
},
{
"epoch": 12.903225806451612,
"grad_norm": 4.016541481018066,
"learning_rate": 0.0001901875901875902,
"loss": 0.6809,
"step": 1800
},
{
"epoch": 13.046594982078853,
"grad_norm": 3.472865581512451,
"learning_rate": 0.00019007215007215008,
"loss": 0.7213,
"step": 1820
},
{
"epoch": 13.189964157706093,
"grad_norm": 3.9193198680877686,
"learning_rate": 0.00018995670995670995,
"loss": 0.6724,
"step": 1840
},
{
"epoch": 13.333333333333334,
"grad_norm": 3.6128931045532227,
"learning_rate": 0.00018984126984126985,
"loss": 0.6879,
"step": 1860
},
{
"epoch": 13.476702508960573,
"grad_norm": 3.895617961883545,
"learning_rate": 0.00018972582972582973,
"loss": 0.6845,
"step": 1880
},
{
"epoch": 13.620071684587813,
"grad_norm": 3.7436187267303467,
"learning_rate": 0.00018961038961038963,
"loss": 0.7148,
"step": 1900
},
{
"epoch": 13.763440860215054,
"grad_norm": 2.9357612133026123,
"learning_rate": 0.0001894949494949495,
"loss": 0.7178,
"step": 1920
},
{
"epoch": 13.906810035842295,
"grad_norm": 3.4985647201538086,
"learning_rate": 0.0001893795093795094,
"loss": 0.7267,
"step": 1940
},
{
"epoch": 14.050179211469533,
"grad_norm": 3.0033462047576904,
"learning_rate": 0.00018926406926406928,
"loss": 0.7228,
"step": 1960
},
{
"epoch": 14.193548387096774,
"grad_norm": 3.525641441345215,
"learning_rate": 0.00018914862914862918,
"loss": 0.6547,
"step": 1980
},
{
"epoch": 14.336917562724015,
"grad_norm": 4.64281702041626,
"learning_rate": 0.00018903318903318905,
"loss": 0.6672,
"step": 2000
},
{
"epoch": 14.480286738351255,
"grad_norm": 3.8177640438079834,
"learning_rate": 0.00018891774891774892,
"loss": 0.6672,
"step": 2020
},
{
"epoch": 14.623655913978494,
"grad_norm": 3.5208513736724854,
"learning_rate": 0.0001888023088023088,
"loss": 0.6649,
"step": 2040
},
{
"epoch": 14.767025089605735,
"grad_norm": 4.133231163024902,
"learning_rate": 0.0001886868686868687,
"loss": 0.683,
"step": 2060
},
{
"epoch": 14.910394265232975,
"grad_norm": 3.406191110610962,
"learning_rate": 0.00018857142857142857,
"loss": 0.6852,
"step": 2080
},
{
"epoch": 15.053763440860216,
"grad_norm": 3.38570237159729,
"learning_rate": 0.00018845598845598847,
"loss": 0.6723,
"step": 2100
},
{
"epoch": 15.197132616487455,
"grad_norm": 3.719644546508789,
"learning_rate": 0.00018834054834054834,
"loss": 0.6528,
"step": 2120
},
{
"epoch": 15.340501792114695,
"grad_norm": 3.887092113494873,
"learning_rate": 0.00018822510822510825,
"loss": 0.6116,
"step": 2140
},
{
"epoch": 15.483870967741936,
"grad_norm": 4.1723432540893555,
"learning_rate": 0.00018810966810966812,
"loss": 0.5873,
"step": 2160
},
{
"epoch": 15.627240143369175,
"grad_norm": 4.310718536376953,
"learning_rate": 0.00018799422799422802,
"loss": 0.6705,
"step": 2180
},
{
"epoch": 15.770609318996415,
"grad_norm": 4.029461860656738,
"learning_rate": 0.0001878787878787879,
"loss": 0.6342,
"step": 2200
},
{
"epoch": 15.913978494623656,
"grad_norm": 4.160161972045898,
"learning_rate": 0.00018776334776334777,
"loss": 0.636,
"step": 2220
},
{
"epoch": 16.057347670250895,
"grad_norm": 3.1228997707366943,
"learning_rate": 0.00018764790764790764,
"loss": 0.6491,
"step": 2240
},
{
"epoch": 16.200716845878137,
"grad_norm": 5.732697486877441,
"learning_rate": 0.00018753246753246754,
"loss": 0.5499,
"step": 2260
},
{
"epoch": 16.344086021505376,
"grad_norm": 3.855351448059082,
"learning_rate": 0.00018741702741702741,
"loss": 0.5809,
"step": 2280
},
{
"epoch": 16.487455197132615,
"grad_norm": 4.892947673797607,
"learning_rate": 0.00018730158730158731,
"loss": 0.5962,
"step": 2300
},
{
"epoch": 16.630824372759857,
"grad_norm": 3.2525179386138916,
"learning_rate": 0.0001871861471861472,
"loss": 0.6289,
"step": 2320
},
{
"epoch": 16.774193548387096,
"grad_norm": 3.8182177543640137,
"learning_rate": 0.0001870707070707071,
"loss": 0.6763,
"step": 2340
},
{
"epoch": 16.91756272401434,
"grad_norm": 4.179948329925537,
"learning_rate": 0.00018695526695526696,
"loss": 0.6172,
"step": 2360
},
{
"epoch": 17.060931899641577,
"grad_norm": 4.481749057769775,
"learning_rate": 0.00018683982683982686,
"loss": 0.5805,
"step": 2380
},
{
"epoch": 17.204301075268816,
"grad_norm": 3.459740400314331,
"learning_rate": 0.00018672438672438674,
"loss": 0.525,
"step": 2400
},
{
"epoch": 17.34767025089606,
"grad_norm": 4.321086883544922,
"learning_rate": 0.0001866089466089466,
"loss": 0.578,
"step": 2420
},
{
"epoch": 17.491039426523297,
"grad_norm": 3.591439962387085,
"learning_rate": 0.0001864935064935065,
"loss": 0.6128,
"step": 2440
},
{
"epoch": 17.634408602150536,
"grad_norm": 4.735933780670166,
"learning_rate": 0.00018637806637806638,
"loss": 0.5629,
"step": 2460
},
{
"epoch": 17.77777777777778,
"grad_norm": 4.053983688354492,
"learning_rate": 0.00018626262626262628,
"loss": 0.6315,
"step": 2480
},
{
"epoch": 17.921146953405017,
"grad_norm": 4.15818452835083,
"learning_rate": 0.00018614718614718616,
"loss": 0.5813,
"step": 2500
},
{
"epoch": 18.06451612903226,
"grad_norm": 4.635441303253174,
"learning_rate": 0.00018603174603174606,
"loss": 0.5661,
"step": 2520
},
{
"epoch": 18.2078853046595,
"grad_norm": 4.151346206665039,
"learning_rate": 0.00018591630591630593,
"loss": 0.5588,
"step": 2540
},
{
"epoch": 18.351254480286737,
"grad_norm": 3.2103450298309326,
"learning_rate": 0.00018580086580086583,
"loss": 0.5348,
"step": 2560
},
{
"epoch": 18.49462365591398,
"grad_norm": 3.5987372398376465,
"learning_rate": 0.00018568542568542568,
"loss": 0.5488,
"step": 2580
},
{
"epoch": 18.63799283154122,
"grad_norm": 3.638810873031616,
"learning_rate": 0.00018556998556998558,
"loss": 0.5712,
"step": 2600
},
{
"epoch": 18.781362007168457,
"grad_norm": 4.3519086837768555,
"learning_rate": 0.00018545454545454545,
"loss": 0.5295,
"step": 2620
},
{
"epoch": 18.9247311827957,
"grad_norm": 4.117532253265381,
"learning_rate": 0.00018533910533910535,
"loss": 0.588,
"step": 2640
},
{
"epoch": 19.06810035842294,
"grad_norm": 3.9326632022857666,
"learning_rate": 0.00018522366522366523,
"loss": 0.5676,
"step": 2660
},
{
"epoch": 19.211469534050178,
"grad_norm": 3.967031717300415,
"learning_rate": 0.00018510822510822513,
"loss": 0.5477,
"step": 2680
},
{
"epoch": 19.35483870967742,
"grad_norm": 3.7262048721313477,
"learning_rate": 0.000184992784992785,
"loss": 0.5448,
"step": 2700
},
{
"epoch": 19.49820788530466,
"grad_norm": 6.239511489868164,
"learning_rate": 0.0001848773448773449,
"loss": 0.4948,
"step": 2720
},
{
"epoch": 19.6415770609319,
"grad_norm": 4.259354114532471,
"learning_rate": 0.00018476190476190478,
"loss": 0.5091,
"step": 2740
},
{
"epoch": 19.78494623655914,
"grad_norm": 3.851757526397705,
"learning_rate": 0.00018464646464646465,
"loss": 0.5246,
"step": 2760
},
{
"epoch": 19.92831541218638,
"grad_norm": 4.161237716674805,
"learning_rate": 0.00018453102453102452,
"loss": 0.561,
"step": 2780
},
{
"epoch": 20.07168458781362,
"grad_norm": 3.4905500411987305,
"learning_rate": 0.00018441558441558442,
"loss": 0.5089,
"step": 2800
},
{
"epoch": 20.21505376344086,
"grad_norm": 4.338738918304443,
"learning_rate": 0.0001843001443001443,
"loss": 0.4543,
"step": 2820
},
{
"epoch": 20.3584229390681,
"grad_norm": 3.200549840927124,
"learning_rate": 0.0001841847041847042,
"loss": 0.51,
"step": 2840
},
{
"epoch": 20.50179211469534,
"grad_norm": 4.969254016876221,
"learning_rate": 0.00018406926406926407,
"loss": 0.5493,
"step": 2860
},
{
"epoch": 20.64516129032258,
"grad_norm": 5.882287979125977,
"learning_rate": 0.00018395382395382397,
"loss": 0.5594,
"step": 2880
},
{
"epoch": 20.788530465949822,
"grad_norm": 4.375141143798828,
"learning_rate": 0.00018383838383838384,
"loss": 0.4904,
"step": 2900
},
{
"epoch": 20.93189964157706,
"grad_norm": 5.635257720947266,
"learning_rate": 0.00018372294372294375,
"loss": 0.5015,
"step": 2920
},
{
"epoch": 21.0752688172043,
"grad_norm": 4.052358150482178,
"learning_rate": 0.00018360750360750362,
"loss": 0.5043,
"step": 2940
},
{
"epoch": 21.218637992831543,
"grad_norm": 3.6945507526397705,
"learning_rate": 0.0001834920634920635,
"loss": 0.5193,
"step": 2960
},
{
"epoch": 21.36200716845878,
"grad_norm": 3.5928592681884766,
"learning_rate": 0.0001833766233766234,
"loss": 0.4771,
"step": 2980
},
{
"epoch": 21.50537634408602,
"grad_norm": 3.8914806842803955,
"learning_rate": 0.00018326118326118327,
"loss": 0.4604,
"step": 3000
},
{
"epoch": 21.648745519713263,
"grad_norm": 4.555027961730957,
"learning_rate": 0.00018314574314574317,
"loss": 0.4799,
"step": 3020
},
{
"epoch": 21.7921146953405,
"grad_norm": 4.636914253234863,
"learning_rate": 0.00018303030303030304,
"loss": 0.4556,
"step": 3040
},
{
"epoch": 21.93548387096774,
"grad_norm": 3.740828037261963,
"learning_rate": 0.00018291486291486294,
"loss": 0.5132,
"step": 3060
},
{
"epoch": 22.078853046594983,
"grad_norm": 3.5043046474456787,
"learning_rate": 0.00018279942279942281,
"loss": 0.4834,
"step": 3080
},
{
"epoch": 22.22222222222222,
"grad_norm": 5.914973258972168,
"learning_rate": 0.00018268398268398272,
"loss": 0.4751,
"step": 3100
},
{
"epoch": 22.365591397849464,
"grad_norm": 3.935854434967041,
"learning_rate": 0.00018256854256854256,
"loss": 0.4742,
"step": 3120
},
{
"epoch": 22.508960573476703,
"grad_norm": 4.205220699310303,
"learning_rate": 0.00018245310245310246,
"loss": 0.4726,
"step": 3140
},
{
"epoch": 22.65232974910394,
"grad_norm": 3.5910654067993164,
"learning_rate": 0.00018233766233766234,
"loss": 0.4551,
"step": 3160
},
{
"epoch": 22.795698924731184,
"grad_norm": 4.200057029724121,
"learning_rate": 0.00018222222222222224,
"loss": 0.4513,
"step": 3180
},
{
"epoch": 22.939068100358423,
"grad_norm": 3.9215588569641113,
"learning_rate": 0.0001821067821067821,
"loss": 0.4887,
"step": 3200
},
{
"epoch": 23.08243727598566,
"grad_norm": 5.426336288452148,
"learning_rate": 0.000181991341991342,
"loss": 0.4311,
"step": 3220
},
{
"epoch": 23.225806451612904,
"grad_norm": 5.736265182495117,
"learning_rate": 0.00018187590187590188,
"loss": 0.4004,
"step": 3240
},
{
"epoch": 23.369175627240143,
"grad_norm": 5.350865840911865,
"learning_rate": 0.00018176046176046178,
"loss": 0.4655,
"step": 3260
},
{
"epoch": 23.512544802867385,
"grad_norm": 3.840571165084839,
"learning_rate": 0.00018164502164502166,
"loss": 0.4935,
"step": 3280
},
{
"epoch": 23.655913978494624,
"grad_norm": 4.473032474517822,
"learning_rate": 0.00018152958152958153,
"loss": 0.4499,
"step": 3300
},
{
"epoch": 23.799283154121863,
"grad_norm": 4.150224685668945,
"learning_rate": 0.0001814141414141414,
"loss": 0.43,
"step": 3320
},
{
"epoch": 23.942652329749105,
"grad_norm": 4.022675037384033,
"learning_rate": 0.0001812987012987013,
"loss": 0.4635,
"step": 3340
},
{
"epoch": 24.086021505376344,
"grad_norm": 4.3577961921691895,
"learning_rate": 0.00018118326118326118,
"loss": 0.4399,
"step": 3360
},
{
"epoch": 24.229390681003583,
"grad_norm": 4.307856559753418,
"learning_rate": 0.00018106782106782108,
"loss": 0.4369,
"step": 3380
},
{
"epoch": 24.372759856630825,
"grad_norm": 3.8509104251861572,
"learning_rate": 0.00018095238095238095,
"loss": 0.396,
"step": 3400
},
{
"epoch": 24.516129032258064,
"grad_norm": 4.024786949157715,
"learning_rate": 0.00018083694083694085,
"loss": 0.4283,
"step": 3420
},
{
"epoch": 24.659498207885306,
"grad_norm": 4.666787624359131,
"learning_rate": 0.00018072150072150073,
"loss": 0.4333,
"step": 3440
},
{
"epoch": 24.802867383512545,
"grad_norm": 3.9275686740875244,
"learning_rate": 0.00018060606060606063,
"loss": 0.4453,
"step": 3460
},
{
"epoch": 24.946236559139784,
"grad_norm": 4.202296733856201,
"learning_rate": 0.0001804906204906205,
"loss": 0.4556,
"step": 3480
},
{
"epoch": 25.089605734767026,
"grad_norm": 4.293205738067627,
"learning_rate": 0.00018037518037518038,
"loss": 0.3734,
"step": 3500
},
{
"epoch": 25.232974910394265,
"grad_norm": 4.109980583190918,
"learning_rate": 0.00018025974025974025,
"loss": 0.3765,
"step": 3520
},
{
"epoch": 25.376344086021504,
"grad_norm": 3.595794200897217,
"learning_rate": 0.00018014430014430015,
"loss": 0.4052,
"step": 3540
},
{
"epoch": 25.519713261648747,
"grad_norm": 3.6639111042022705,
"learning_rate": 0.00018002886002886002,
"loss": 0.44,
"step": 3560
},
{
"epoch": 25.663082437275985,
"grad_norm": 4.5247931480407715,
"learning_rate": 0.00017991341991341992,
"loss": 0.4004,
"step": 3580
},
{
"epoch": 25.806451612903224,
"grad_norm": 3.711890697479248,
"learning_rate": 0.0001797979797979798,
"loss": 0.4452,
"step": 3600
},
{
"epoch": 25.949820788530467,
"grad_norm": 5.131597518920898,
"learning_rate": 0.0001796825396825397,
"loss": 0.476,
"step": 3620
},
{
"epoch": 26.093189964157705,
"grad_norm": 3.825037956237793,
"learning_rate": 0.00017956709956709957,
"loss": 0.369,
"step": 3640
},
{
"epoch": 26.236559139784948,
"grad_norm": 4.446529865264893,
"learning_rate": 0.00017945165945165947,
"loss": 0.3898,
"step": 3660
},
{
"epoch": 26.379928315412187,
"grad_norm": 3.0329320430755615,
"learning_rate": 0.00017933621933621934,
"loss": 0.3671,
"step": 3680
},
{
"epoch": 26.523297491039425,
"grad_norm": 4.175711631774902,
"learning_rate": 0.00017922077922077922,
"loss": 0.3932,
"step": 3700
},
{
"epoch": 26.666666666666668,
"grad_norm": 3.5688698291778564,
"learning_rate": 0.00017910533910533912,
"loss": 0.4308,
"step": 3720
},
{
"epoch": 26.810035842293907,
"grad_norm": 5.509890556335449,
"learning_rate": 0.000178989898989899,
"loss": 0.3667,
"step": 3740
},
{
"epoch": 26.953405017921146,
"grad_norm": 5.023184776306152,
"learning_rate": 0.0001788744588744589,
"loss": 0.4775,
"step": 3760
},
{
"epoch": 27.096774193548388,
"grad_norm": 4.032219886779785,
"learning_rate": 0.00017875901875901877,
"loss": 0.4131,
"step": 3780
},
{
"epoch": 27.240143369175627,
"grad_norm": 4.195873737335205,
"learning_rate": 0.00017864357864357867,
"loss": 0.3387,
"step": 3800
},
{
"epoch": 27.38351254480287,
"grad_norm": 4.665581703186035,
"learning_rate": 0.00017852813852813854,
"loss": 0.414,
"step": 3820
},
{
"epoch": 27.526881720430108,
"grad_norm": 4.608016490936279,
"learning_rate": 0.00017841269841269844,
"loss": 0.3583,
"step": 3840
},
{
"epoch": 27.670250896057347,
"grad_norm": 7.6718854904174805,
"learning_rate": 0.0001782972582972583,
"loss": 0.401,
"step": 3860
},
{
"epoch": 27.81362007168459,
"grad_norm": 6.262495994567871,
"learning_rate": 0.0001781818181818182,
"loss": 0.386,
"step": 3880
},
{
"epoch": 27.956989247311828,
"grad_norm": 5.654719829559326,
"learning_rate": 0.00017806637806637806,
"loss": 0.3942,
"step": 3900
},
{
"epoch": 28.100358422939067,
"grad_norm": 3.569892406463623,
"learning_rate": 0.00017795093795093796,
"loss": 0.3701,
"step": 3920
},
{
"epoch": 28.24372759856631,
"grad_norm": 3.7804338932037354,
"learning_rate": 0.00017783549783549784,
"loss": 0.3829,
"step": 3940
},
{
"epoch": 28.387096774193548,
"grad_norm": 5.116115093231201,
"learning_rate": 0.00017772005772005774,
"loss": 0.3774,
"step": 3960
},
{
"epoch": 28.530465949820787,
"grad_norm": 4.543453693389893,
"learning_rate": 0.0001776046176046176,
"loss": 0.3319,
"step": 3980
},
{
"epoch": 28.67383512544803,
"grad_norm": 4.088935852050781,
"learning_rate": 0.0001774891774891775,
"loss": 0.3457,
"step": 4000
}
],
"logging_steps": 20,
"max_steps": 34750,
"num_input_tokens_seen": 0,
"num_train_epochs": 250,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.079524906326426e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}