sumuks's picture
Upload folder using huggingface_hub
c2ed908 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.2728044123886297,
"eval_steps": 600,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00848536274925753,
"grad_norm": 0.4898678891363344,
"learning_rate": 8.488964346349746e-07,
"loss": 1.8056,
"step": 10
},
{
"epoch": 0.01697072549851506,
"grad_norm": 0.3537473179717183,
"learning_rate": 1.6977928692699491e-06,
"loss": 1.7621,
"step": 20
},
{
"epoch": 0.025456088247772592,
"grad_norm": 0.28215953004159977,
"learning_rate": 2.546689303904924e-06,
"loss": 1.7571,
"step": 30
},
{
"epoch": 0.03394145099703012,
"grad_norm": 0.27446565146764923,
"learning_rate": 3.3955857385398982e-06,
"loss": 1.7136,
"step": 40
},
{
"epoch": 0.04242681374628765,
"grad_norm": 0.17051549768176558,
"learning_rate": 4.244482173174873e-06,
"loss": 1.6767,
"step": 50
},
{
"epoch": 0.050912176495545185,
"grad_norm": 0.17763882467320422,
"learning_rate": 5.093378607809848e-06,
"loss": 1.6371,
"step": 60
},
{
"epoch": 0.05939753924480271,
"grad_norm": 0.14311462596290048,
"learning_rate": 5.942275042444822e-06,
"loss": 1.6324,
"step": 70
},
{
"epoch": 0.06788290199406025,
"grad_norm": 0.1659540846071645,
"learning_rate": 6.7911714770797965e-06,
"loss": 1.6062,
"step": 80
},
{
"epoch": 0.07636826474331777,
"grad_norm": 0.20064072815620043,
"learning_rate": 7.640067911714771e-06,
"loss": 1.5832,
"step": 90
},
{
"epoch": 0.0848536274925753,
"grad_norm": 0.2179045681711979,
"learning_rate": 8.488964346349745e-06,
"loss": 1.5898,
"step": 100
},
{
"epoch": 0.09333899024183284,
"grad_norm": 0.23866012053128668,
"learning_rate": 9.337860780984721e-06,
"loss": 1.5924,
"step": 110
},
{
"epoch": 0.10182435299109037,
"grad_norm": 0.18578051776430282,
"learning_rate": 1.0186757215619695e-05,
"loss": 1.5877,
"step": 120
},
{
"epoch": 0.1103097157403479,
"grad_norm": 0.2216509707409362,
"learning_rate": 1.103565365025467e-05,
"loss": 1.5947,
"step": 130
},
{
"epoch": 0.11879507848960542,
"grad_norm": 0.20427142255694086,
"learning_rate": 1.1884550084889643e-05,
"loss": 1.5841,
"step": 140
},
{
"epoch": 0.12728044123886295,
"grad_norm": 0.1765851415675038,
"learning_rate": 1.2733446519524619e-05,
"loss": 1.5878,
"step": 150
},
{
"epoch": 0.1357658039881205,
"grad_norm": 0.1769355117060811,
"learning_rate": 1.3582342954159593e-05,
"loss": 1.5795,
"step": 160
},
{
"epoch": 0.14425116673737803,
"grad_norm": 0.1617675663096666,
"learning_rate": 1.4431239388794569e-05,
"loss": 1.5549,
"step": 170
},
{
"epoch": 0.15273652948663555,
"grad_norm": 0.17302259072151574,
"learning_rate": 1.5280135823429543e-05,
"loss": 1.5808,
"step": 180
},
{
"epoch": 0.1612218922358931,
"grad_norm": 0.16876039012432806,
"learning_rate": 1.6129032258064517e-05,
"loss": 1.5676,
"step": 190
},
{
"epoch": 0.1697072549851506,
"grad_norm": 0.19627360154037596,
"learning_rate": 1.697792869269949e-05,
"loss": 1.5598,
"step": 200
},
{
"epoch": 0.17819261773440814,
"grad_norm": 0.16078510362361015,
"learning_rate": 1.7826825127334465e-05,
"loss": 1.5667,
"step": 210
},
{
"epoch": 0.18667798048366568,
"grad_norm": 0.16044786518959703,
"learning_rate": 1.8675721561969442e-05,
"loss": 1.5815,
"step": 220
},
{
"epoch": 0.1951633432329232,
"grad_norm": 0.15656958873834717,
"learning_rate": 1.9524617996604416e-05,
"loss": 1.5576,
"step": 230
},
{
"epoch": 0.20364870598218074,
"grad_norm": 0.1687290471357602,
"learning_rate": 2.037351443123939e-05,
"loss": 1.5453,
"step": 240
},
{
"epoch": 0.21213406873143828,
"grad_norm": 0.1519017348276184,
"learning_rate": 2.1222410865874364e-05,
"loss": 1.5554,
"step": 250
},
{
"epoch": 0.2206194314806958,
"grad_norm": 0.15761892005160086,
"learning_rate": 2.207130730050934e-05,
"loss": 1.5494,
"step": 260
},
{
"epoch": 0.22910479422995333,
"grad_norm": 0.16857088482977495,
"learning_rate": 2.2920203735144312e-05,
"loss": 1.5794,
"step": 270
},
{
"epoch": 0.23759015697921085,
"grad_norm": 0.1678705209913503,
"learning_rate": 2.3769100169779286e-05,
"loss": 1.5373,
"step": 280
},
{
"epoch": 0.2460755197284684,
"grad_norm": 0.14812649566587394,
"learning_rate": 2.461799660441426e-05,
"loss": 1.5504,
"step": 290
},
{
"epoch": 0.2545608824777259,
"grad_norm": 0.17651916734325857,
"learning_rate": 2.5466893039049238e-05,
"loss": 1.5607,
"step": 300
},
{
"epoch": 0.26304624522698344,
"grad_norm": 0.14883055338507856,
"learning_rate": 2.6315789473684212e-05,
"loss": 1.5311,
"step": 310
},
{
"epoch": 0.271531607976241,
"grad_norm": 0.15787522753231265,
"learning_rate": 2.7164685908319186e-05,
"loss": 1.5656,
"step": 320
},
{
"epoch": 0.2800169707254985,
"grad_norm": 0.1625232940237689,
"learning_rate": 2.801358234295416e-05,
"loss": 1.5686,
"step": 330
},
{
"epoch": 0.28850233347475607,
"grad_norm": 0.18505951289343867,
"learning_rate": 2.8862478777589137e-05,
"loss": 1.5474,
"step": 340
},
{
"epoch": 0.29698769622401355,
"grad_norm": 0.13785772316349984,
"learning_rate": 2.9711375212224108e-05,
"loss": 1.5696,
"step": 350
},
{
"epoch": 0.3054730589732711,
"grad_norm": 0.13531274658248552,
"learning_rate": 3.0560271646859086e-05,
"loss": 1.5551,
"step": 360
},
{
"epoch": 0.31395842172252864,
"grad_norm": 0.1366381415368909,
"learning_rate": 3.140916808149406e-05,
"loss": 1.524,
"step": 370
},
{
"epoch": 0.3224437844717862,
"grad_norm": 0.14587220569353926,
"learning_rate": 3.2258064516129034e-05,
"loss": 1.5515,
"step": 380
},
{
"epoch": 0.3309291472210437,
"grad_norm": 0.13336349383744864,
"learning_rate": 3.310696095076401e-05,
"loss": 1.5457,
"step": 390
},
{
"epoch": 0.3394145099703012,
"grad_norm": 0.1772016947970983,
"learning_rate": 3.395585738539898e-05,
"loss": 1.5582,
"step": 400
},
{
"epoch": 0.34789987271955874,
"grad_norm": 0.13819420575084573,
"learning_rate": 3.4804753820033956e-05,
"loss": 1.5326,
"step": 410
},
{
"epoch": 0.3563852354688163,
"grad_norm": 0.12729862167862188,
"learning_rate": 3.565365025466893e-05,
"loss": 1.5387,
"step": 420
},
{
"epoch": 0.3648705982180738,
"grad_norm": 0.11777082851399363,
"learning_rate": 3.6502546689303904e-05,
"loss": 1.5587,
"step": 430
},
{
"epoch": 0.37335596096733137,
"grad_norm": 0.15372268131323022,
"learning_rate": 3.7351443123938885e-05,
"loss": 1.5362,
"step": 440
},
{
"epoch": 0.3818413237165889,
"grad_norm": 0.12616185572252248,
"learning_rate": 3.820033955857386e-05,
"loss": 1.5548,
"step": 450
},
{
"epoch": 0.3903266864658464,
"grad_norm": 0.1311200786303391,
"learning_rate": 3.904923599320883e-05,
"loss": 1.5409,
"step": 460
},
{
"epoch": 0.39881204921510394,
"grad_norm": 0.1707919112561785,
"learning_rate": 3.989813242784381e-05,
"loss": 1.5509,
"step": 470
},
{
"epoch": 0.4072974119643615,
"grad_norm": 0.14660149264284913,
"learning_rate": 4.074702886247878e-05,
"loss": 1.5433,
"step": 480
},
{
"epoch": 0.415782774713619,
"grad_norm": 0.12478895483834351,
"learning_rate": 4.1595925297113755e-05,
"loss": 1.5382,
"step": 490
},
{
"epoch": 0.42426813746287656,
"grad_norm": 0.12327957445795817,
"learning_rate": 4.244482173174873e-05,
"loss": 1.5515,
"step": 500
},
{
"epoch": 0.43275350021213405,
"grad_norm": 0.12922777738650987,
"learning_rate": 4.32937181663837e-05,
"loss": 1.5688,
"step": 510
},
{
"epoch": 0.4412388629613916,
"grad_norm": 0.12486802189783415,
"learning_rate": 4.414261460101868e-05,
"loss": 1.5452,
"step": 520
},
{
"epoch": 0.44972422571064913,
"grad_norm": 0.1360610874577123,
"learning_rate": 4.499151103565366e-05,
"loss": 1.5493,
"step": 530
},
{
"epoch": 0.45820958845990667,
"grad_norm": 0.1884897685356775,
"learning_rate": 4.5840407470288625e-05,
"loss": 1.5511,
"step": 540
},
{
"epoch": 0.4666949512091642,
"grad_norm": 0.12446302384809525,
"learning_rate": 4.6689303904923606e-05,
"loss": 1.5458,
"step": 550
},
{
"epoch": 0.4751803139584217,
"grad_norm": 0.13169591804768588,
"learning_rate": 4.753820033955857e-05,
"loss": 1.5569,
"step": 560
},
{
"epoch": 0.48366567670767924,
"grad_norm": 0.1343809247449631,
"learning_rate": 4.8387096774193554e-05,
"loss": 1.5408,
"step": 570
},
{
"epoch": 0.4921510394569368,
"grad_norm": 0.14024589853602,
"learning_rate": 4.923599320882852e-05,
"loss": 1.5487,
"step": 580
},
{
"epoch": 0.5006364022061943,
"grad_norm": 0.16240429253875313,
"learning_rate": 4.999999560970061e-05,
"loss": 1.5488,
"step": 590
},
{
"epoch": 0.5091217649554518,
"grad_norm": 0.12575424857894482,
"learning_rate": 4.999946877563971e-05,
"loss": 1.532,
"step": 600
},
{
"epoch": 0.5091217649554518,
"eval_loss": 1.519254446029663,
"eval_runtime": 53.3242,
"eval_samples_per_second": 7.145,
"eval_steps_per_second": 0.9,
"step": 600
},
{
"epoch": 0.5176071277047094,
"grad_norm": 0.18688482756329736,
"learning_rate": 4.999806390290309e-05,
"loss": 1.5544,
"step": 610
},
{
"epoch": 0.5260924904539669,
"grad_norm": 0.12425469431830571,
"learning_rate": 4.999578104083307e-05,
"loss": 1.5443,
"step": 620
},
{
"epoch": 0.5345778532032245,
"grad_norm": 0.1299027485420099,
"learning_rate": 4.999262026960902e-05,
"loss": 1.5569,
"step": 630
},
{
"epoch": 0.543063215952482,
"grad_norm": 0.11441754852508934,
"learning_rate": 4.998858170024449e-05,
"loss": 1.5316,
"step": 640
},
{
"epoch": 0.5515485787017395,
"grad_norm": 0.14888547248976478,
"learning_rate": 4.998366547458326e-05,
"loss": 1.5177,
"step": 650
},
{
"epoch": 0.560033941450997,
"grad_norm": 0.14859292774768867,
"learning_rate": 4.997787176529449e-05,
"loss": 1.5394,
"step": 660
},
{
"epoch": 0.5685193042002545,
"grad_norm": 0.12499154376539734,
"learning_rate": 4.997120077586651e-05,
"loss": 1.5554,
"step": 670
},
{
"epoch": 0.5770046669495121,
"grad_norm": 0.1218974898058821,
"learning_rate": 4.9963652740599774e-05,
"loss": 1.5335,
"step": 680
},
{
"epoch": 0.5854900296987696,
"grad_norm": 0.1273110498715124,
"learning_rate": 4.995522792459859e-05,
"loss": 1.5349,
"step": 690
},
{
"epoch": 0.5939753924480271,
"grad_norm": 0.12115412881719101,
"learning_rate": 4.994592662376183e-05,
"loss": 1.5419,
"step": 700
},
{
"epoch": 0.6024607551972847,
"grad_norm": 0.14855096330233286,
"learning_rate": 4.99357491647725e-05,
"loss": 1.513,
"step": 710
},
{
"epoch": 0.6109461179465422,
"grad_norm": 0.11407988659327956,
"learning_rate": 4.992469590508628e-05,
"loss": 1.5243,
"step": 720
},
{
"epoch": 0.6194314806957998,
"grad_norm": 0.1197712643781127,
"learning_rate": 4.9912767232919035e-05,
"loss": 1.5177,
"step": 730
},
{
"epoch": 0.6279168434450573,
"grad_norm": 0.12400515877262065,
"learning_rate": 4.9899963567233074e-05,
"loss": 1.5619,
"step": 740
},
{
"epoch": 0.6364022061943148,
"grad_norm": 0.12250385257708406,
"learning_rate": 4.988628535772249e-05,
"loss": 1.539,
"step": 750
},
{
"epoch": 0.6448875689435724,
"grad_norm": 0.1262441090496857,
"learning_rate": 4.987173308479738e-05,
"loss": 1.5195,
"step": 760
},
{
"epoch": 0.6533729316928298,
"grad_norm": 0.12459694416473029,
"learning_rate": 4.985630725956694e-05,
"loss": 1.5462,
"step": 770
},
{
"epoch": 0.6618582944420874,
"grad_norm": 0.12985189006106762,
"learning_rate": 4.9840008423821527e-05,
"loss": 1.5113,
"step": 780
},
{
"epoch": 0.6703436571913449,
"grad_norm": 0.12689306141471304,
"learning_rate": 4.9822837150013636e-05,
"loss": 1.5201,
"step": 790
},
{
"epoch": 0.6788290199406024,
"grad_norm": 0.15393156370587963,
"learning_rate": 4.980479404123778e-05,
"loss": 1.5121,
"step": 800
},
{
"epoch": 0.68731438268986,
"grad_norm": 0.13213701895207608,
"learning_rate": 4.978587973120931e-05,
"loss": 1.5307,
"step": 810
},
{
"epoch": 0.6957997454391175,
"grad_norm": 0.11561354931316294,
"learning_rate": 4.9766094884242184e-05,
"loss": 1.5316,
"step": 820
},
{
"epoch": 0.7042851081883751,
"grad_norm": 0.12414772399330044,
"learning_rate": 4.974544019522559e-05,
"loss": 1.5148,
"step": 830
},
{
"epoch": 0.7127704709376326,
"grad_norm": 0.1171652849153521,
"learning_rate": 4.972391638959959e-05,
"loss": 1.5096,
"step": 840
},
{
"epoch": 0.7212558336868902,
"grad_norm": 0.12868937349582316,
"learning_rate": 4.9701524223329585e-05,
"loss": 1.5282,
"step": 850
},
{
"epoch": 0.7297411964361477,
"grad_norm": 0.1200015077117309,
"learning_rate": 4.967826448287981e-05,
"loss": 1.5512,
"step": 860
},
{
"epoch": 0.7382265591854051,
"grad_norm": 0.12340885660045105,
"learning_rate": 4.96541379851857e-05,
"loss": 1.5394,
"step": 870
},
{
"epoch": 0.7467119219346627,
"grad_norm": 0.12976937691467555,
"learning_rate": 4.962914557762517e-05,
"loss": 1.51,
"step": 880
},
{
"epoch": 0.7551972846839202,
"grad_norm": 0.11912878476038466,
"learning_rate": 4.9603288137988905e-05,
"loss": 1.5294,
"step": 890
},
{
"epoch": 0.7636826474331778,
"grad_norm": 0.1299625480337927,
"learning_rate": 4.957656657444947e-05,
"loss": 1.507,
"step": 900
},
{
"epoch": 0.7721680101824353,
"grad_norm": 0.12380144459698468,
"learning_rate": 4.954898182552946e-05,
"loss": 1.5376,
"step": 910
},
{
"epoch": 0.7806533729316928,
"grad_norm": 0.13139339643682763,
"learning_rate": 4.9520534860068535e-05,
"loss": 1.5291,
"step": 920
},
{
"epoch": 0.7891387356809504,
"grad_norm": 0.13088956203983898,
"learning_rate": 4.949122667718935e-05,
"loss": 1.5239,
"step": 930
},
{
"epoch": 0.7976240984302079,
"grad_norm": 0.12586052988453703,
"learning_rate": 4.94610583062625e-05,
"loss": 1.5525,
"step": 940
},
{
"epoch": 0.8061094611794655,
"grad_norm": 0.12020996031652877,
"learning_rate": 4.943003080687035e-05,
"loss": 1.5525,
"step": 950
},
{
"epoch": 0.814594823928723,
"grad_norm": 0.12866375954060869,
"learning_rate": 4.9398145268769856e-05,
"loss": 1.5266,
"step": 960
},
{
"epoch": 0.8230801866779804,
"grad_norm": 0.13166136756817035,
"learning_rate": 4.936540281185423e-05,
"loss": 1.5041,
"step": 970
},
{
"epoch": 0.831565549427238,
"grad_norm": 0.12481946698483787,
"learning_rate": 4.933180458611364e-05,
"loss": 1.5271,
"step": 980
},
{
"epoch": 0.8400509121764955,
"grad_norm": 0.12264463761209114,
"learning_rate": 4.9297351771594844e-05,
"loss": 1.5354,
"step": 990
},
{
"epoch": 0.8485362749257531,
"grad_norm": 0.11985452856537594,
"learning_rate": 4.926204557835968e-05,
"loss": 1.5167,
"step": 1000
},
{
"epoch": 0.8570216376750106,
"grad_norm": 0.13125396521190327,
"learning_rate": 4.9225887246442634e-05,
"loss": 1.5282,
"step": 1010
},
{
"epoch": 0.8655070004242681,
"grad_norm": 0.12730192328072554,
"learning_rate": 4.918887804580725e-05,
"loss": 1.5089,
"step": 1020
},
{
"epoch": 0.8739923631735257,
"grad_norm": 0.12724644219344786,
"learning_rate": 4.915101927630153e-05,
"loss": 1.4964,
"step": 1030
},
{
"epoch": 0.8824777259227832,
"grad_norm": 0.13578611501833232,
"learning_rate": 4.911231226761227e-05,
"loss": 1.5189,
"step": 1040
},
{
"epoch": 0.8909630886720408,
"grad_norm": 0.13577513964986457,
"learning_rate": 4.90727583792184e-05,
"loss": 1.5149,
"step": 1050
},
{
"epoch": 0.8994484514212983,
"grad_norm": 0.1269735011676505,
"learning_rate": 4.903235900034317e-05,
"loss": 1.5066,
"step": 1060
},
{
"epoch": 0.9079338141705557,
"grad_norm": 0.13250058214235566,
"learning_rate": 4.899111554990543e-05,
"loss": 1.5129,
"step": 1070
},
{
"epoch": 0.9164191769198133,
"grad_norm": 0.13130735246433495,
"learning_rate": 4.894902947646975e-05,
"loss": 1.5156,
"step": 1080
},
{
"epoch": 0.9249045396690708,
"grad_norm": 0.1273580180253049,
"learning_rate": 4.890610225819553e-05,
"loss": 1.5324,
"step": 1090
},
{
"epoch": 0.9333899024183284,
"grad_norm": 0.13155314243939242,
"learning_rate": 4.8862335402785136e-05,
"loss": 1.5106,
"step": 1100
},
{
"epoch": 0.9418752651675859,
"grad_norm": 0.13564895211984299,
"learning_rate": 4.88177304474309e-05,
"loss": 1.5067,
"step": 1110
},
{
"epoch": 0.9503606279168434,
"grad_norm": 0.12774735587114736,
"learning_rate": 4.877228895876115e-05,
"loss": 1.5182,
"step": 1120
},
{
"epoch": 0.958845990666101,
"grad_norm": 0.1307997709537685,
"learning_rate": 4.872601253278517e-05,
"loss": 1.4969,
"step": 1130
},
{
"epoch": 0.9673313534153585,
"grad_norm": 0.1304794845040634,
"learning_rate": 4.867890279483717e-05,
"loss": 1.5264,
"step": 1140
},
{
"epoch": 0.9758167161646161,
"grad_norm": 0.13666141796489684,
"learning_rate": 4.8630961399519206e-05,
"loss": 1.5467,
"step": 1150
},
{
"epoch": 0.9843020789138736,
"grad_norm": 0.1370278303190263,
"learning_rate": 4.8582190030643e-05,
"loss": 1.5127,
"step": 1160
},
{
"epoch": 0.9927874416631312,
"grad_norm": 0.1390936629299565,
"learning_rate": 4.8532590401170894e-05,
"loss": 1.5058,
"step": 1170
},
{
"epoch": 1.0012728044123886,
"grad_norm": 0.12934475548108287,
"learning_rate": 4.848216425315561e-05,
"loss": 1.5202,
"step": 1180
},
{
"epoch": 1.0097581671616462,
"grad_norm": 0.13898591683370803,
"learning_rate": 4.843091335767913e-05,
"loss": 1.4563,
"step": 1190
},
{
"epoch": 1.0182435299109036,
"grad_norm": 0.17488231535826249,
"learning_rate": 4.837883951479043e-05,
"loss": 1.4402,
"step": 1200
},
{
"epoch": 1.0182435299109036,
"eval_loss": 1.4955657720565796,
"eval_runtime": 52.424,
"eval_samples_per_second": 7.268,
"eval_steps_per_second": 0.916,
"step": 1200
},
{
"epoch": 1.0267288926601612,
"grad_norm": 0.1536036344095855,
"learning_rate": 4.832594455344229e-05,
"loss": 1.4848,
"step": 1210
},
{
"epoch": 1.0352142554094188,
"grad_norm": 0.15762414421336599,
"learning_rate": 4.827223033142706e-05,
"loss": 1.4567,
"step": 1220
},
{
"epoch": 1.0436996181586762,
"grad_norm": 0.15058229398130366,
"learning_rate": 4.8217698735311414e-05,
"loss": 1.4672,
"step": 1230
},
{
"epoch": 1.0521849809079338,
"grad_norm": 0.16010992835678386,
"learning_rate": 4.8162351680370044e-05,
"loss": 1.4458,
"step": 1240
},
{
"epoch": 1.0606703436571914,
"grad_norm": 0.16758816000341356,
"learning_rate": 4.810619111051847e-05,
"loss": 1.4842,
"step": 1250
},
{
"epoch": 1.069155706406449,
"grad_norm": 0.16559260972674986,
"learning_rate": 4.8049218998244696e-05,
"loss": 1.4556,
"step": 1260
},
{
"epoch": 1.0776410691557063,
"grad_norm": 0.17237632034416966,
"learning_rate": 4.7991437344539966e-05,
"loss": 1.4813,
"step": 1270
},
{
"epoch": 1.086126431904964,
"grad_norm": 0.17112756741722487,
"learning_rate": 4.793284817882845e-05,
"loss": 1.4535,
"step": 1280
},
{
"epoch": 1.0946117946542215,
"grad_norm": 0.16828572707718548,
"learning_rate": 4.787345355889604e-05,
"loss": 1.4344,
"step": 1290
},
{
"epoch": 1.103097157403479,
"grad_norm": 0.15709986047041227,
"learning_rate": 4.7813255570817985e-05,
"loss": 1.4744,
"step": 1300
},
{
"epoch": 1.1115825201527365,
"grad_norm": 0.16651547128146313,
"learning_rate": 4.775225632888568e-05,
"loss": 1.4561,
"step": 1310
},
{
"epoch": 1.120067882901994,
"grad_norm": 0.16750176017515714,
"learning_rate": 4.76904579755324e-05,
"loss": 1.4616,
"step": 1320
},
{
"epoch": 1.1285532456512515,
"grad_norm": 0.1608016567554825,
"learning_rate": 4.7627862681258037e-05,
"loss": 1.4593,
"step": 1330
},
{
"epoch": 1.137038608400509,
"grad_norm": 0.21390766919038295,
"learning_rate": 4.756447264455287e-05,
"loss": 1.4484,
"step": 1340
},
{
"epoch": 1.1455239711497667,
"grad_norm": 0.16826883293172662,
"learning_rate": 4.750029009182038e-05,
"loss": 1.4703,
"step": 1350
},
{
"epoch": 1.1540093338990243,
"grad_norm": 0.17431508867079595,
"learning_rate": 4.7435317277299e-05,
"loss": 1.4701,
"step": 1360
},
{
"epoch": 1.1624946966482816,
"grad_norm": 0.15973851467570443,
"learning_rate": 4.736955648298299e-05,
"loss": 1.4503,
"step": 1370
},
{
"epoch": 1.1709800593975392,
"grad_norm": 0.1887713767970947,
"learning_rate": 4.730301001854225e-05,
"loss": 1.4624,
"step": 1380
},
{
"epoch": 1.1794654221467968,
"grad_norm": 0.16898695344997974,
"learning_rate": 4.7235680221241216e-05,
"loss": 1.4452,
"step": 1390
},
{
"epoch": 1.1879507848960542,
"grad_norm": 0.20014553287073528,
"learning_rate": 4.716756945585681e-05,
"loss": 1.4717,
"step": 1400
},
{
"epoch": 1.1964361476453118,
"grad_norm": 0.17137954325200072,
"learning_rate": 4.709868011459528e-05,
"loss": 1.4403,
"step": 1410
},
{
"epoch": 1.2049215103945694,
"grad_norm": 0.17801721751888322,
"learning_rate": 4.7029014617008294e-05,
"loss": 1.4339,
"step": 1420
},
{
"epoch": 1.213406873143827,
"grad_norm": 0.17139613676642362,
"learning_rate": 4.695857540990789e-05,
"loss": 1.4573,
"step": 1430
},
{
"epoch": 1.2218922358930844,
"grad_norm": 0.16971403514498054,
"learning_rate": 4.688736496728058e-05,
"loss": 1.4282,
"step": 1440
},
{
"epoch": 1.230377598642342,
"grad_norm": 0.17200272420880428,
"learning_rate": 4.681538579020038e-05,
"loss": 1.4434,
"step": 1450
},
{
"epoch": 1.2388629613915996,
"grad_norm": 0.17208160407432616,
"learning_rate": 4.6742640406741106e-05,
"loss": 1.45,
"step": 1460
},
{
"epoch": 1.247348324140857,
"grad_norm": 0.1939626212901777,
"learning_rate": 4.666913137188743e-05,
"loss": 1.4608,
"step": 1470
},
{
"epoch": 1.2558336868901145,
"grad_norm": 0.17291794493304186,
"learning_rate": 4.6594861267445236e-05,
"loss": 1.4671,
"step": 1480
},
{
"epoch": 1.2643190496393721,
"grad_norm": 0.18219792041638924,
"learning_rate": 4.651983270195093e-05,
"loss": 1.4262,
"step": 1490
},
{
"epoch": 1.2728044123886297,
"grad_norm": 0.18086437830489926,
"learning_rate": 4.644404831057979e-05,
"loss": 1.4455,
"step": 1500
}
],
"logging_steps": 10,
"max_steps": 5890,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1260689402363904.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}