nqv2291's picture
Upload folder using huggingface_hub
18273cc verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9827003787491043,
"eval_steps": 200,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008189169822909202,
"grad_norm": 4.80268669128418,
"learning_rate": 0.0001,
"loss": 1.6951,
"step": 10
},
{
"epoch": 0.016378339645818404,
"grad_norm": 2.047800302505493,
"learning_rate": 0.0001,
"loss": 0.983,
"step": 20
},
{
"epoch": 0.024567509468727607,
"grad_norm": 1.5396971702575684,
"learning_rate": 0.0001,
"loss": 0.9092,
"step": 30
},
{
"epoch": 0.03275667929163681,
"grad_norm": 4.307969093322754,
"learning_rate": 0.0001,
"loss": 0.8544,
"step": 40
},
{
"epoch": 0.04094584911454601,
"grad_norm": 2.1945383548736572,
"learning_rate": 0.0001,
"loss": 0.8015,
"step": 50
},
{
"epoch": 0.04913501893745521,
"grad_norm": 1.5352895259857178,
"learning_rate": 0.0001,
"loss": 0.7917,
"step": 60
},
{
"epoch": 0.057324188760364415,
"grad_norm": 1.9893757104873657,
"learning_rate": 0.0001,
"loss": 0.7561,
"step": 70
},
{
"epoch": 0.06551335858327362,
"grad_norm": 1.067208170890808,
"learning_rate": 0.0001,
"loss": 0.7457,
"step": 80
},
{
"epoch": 0.07370252840618283,
"grad_norm": 1.8355977535247803,
"learning_rate": 0.0001,
"loss": 0.7197,
"step": 90
},
{
"epoch": 0.08189169822909202,
"grad_norm": 0.9938833713531494,
"learning_rate": 0.0001,
"loss": 0.6959,
"step": 100
},
{
"epoch": 0.09008086805200123,
"grad_norm": 1.5569779872894287,
"learning_rate": 0.0001,
"loss": 0.6875,
"step": 110
},
{
"epoch": 0.09827003787491043,
"grad_norm": 1.362318515777588,
"learning_rate": 0.0001,
"loss": 0.7031,
"step": 120
},
{
"epoch": 0.10645920769781964,
"grad_norm": 0.9789265990257263,
"learning_rate": 0.0001,
"loss": 0.6744,
"step": 130
},
{
"epoch": 0.11464837752072883,
"grad_norm": 0.9227413535118103,
"learning_rate": 0.0001,
"loss": 0.6496,
"step": 140
},
{
"epoch": 0.12283754734363804,
"grad_norm": 1.0309714078903198,
"learning_rate": 0.0001,
"loss": 0.6365,
"step": 150
},
{
"epoch": 0.13102671716654724,
"grad_norm": 1.2037794589996338,
"learning_rate": 0.0001,
"loss": 0.6629,
"step": 160
},
{
"epoch": 0.13921588698945644,
"grad_norm": 0.8785035014152527,
"learning_rate": 0.0001,
"loss": 0.6462,
"step": 170
},
{
"epoch": 0.14740505681236565,
"grad_norm": 1.5502973794937134,
"learning_rate": 0.0001,
"loss": 0.6146,
"step": 180
},
{
"epoch": 0.15559422663527486,
"grad_norm": 0.9982532858848572,
"learning_rate": 0.0001,
"loss": 0.6205,
"step": 190
},
{
"epoch": 0.16378339645818404,
"grad_norm": 1.0146996974945068,
"learning_rate": 0.0001,
"loss": 0.6107,
"step": 200
},
{
"epoch": 0.16378339645818404,
"eval_loss": 0.5019610524177551,
"eval_runtime": 993.1256,
"eval_samples_per_second": 36.249,
"eval_steps_per_second": 18.125,
"step": 200
},
{
"epoch": 0.17197256628109325,
"grad_norm": 1.0556907653808594,
"learning_rate": 0.0001,
"loss": 0.6103,
"step": 210
},
{
"epoch": 0.18016173610400246,
"grad_norm": 1.069365382194519,
"learning_rate": 0.0001,
"loss": 0.6309,
"step": 220
},
{
"epoch": 0.18835090592691167,
"grad_norm": 1.2085084915161133,
"learning_rate": 0.0001,
"loss": 0.598,
"step": 230
},
{
"epoch": 0.19654007574982085,
"grad_norm": 0.8124738931655884,
"learning_rate": 0.0001,
"loss": 0.603,
"step": 240
},
{
"epoch": 0.20472924557273006,
"grad_norm": 1.032139778137207,
"learning_rate": 0.0001,
"loss": 0.611,
"step": 250
},
{
"epoch": 0.21291841539563927,
"grad_norm": 0.8203685283660889,
"learning_rate": 0.0001,
"loss": 0.5709,
"step": 260
},
{
"epoch": 0.22110758521854848,
"grad_norm": 0.7408131957054138,
"learning_rate": 0.0001,
"loss": 0.5753,
"step": 270
},
{
"epoch": 0.22929675504145766,
"grad_norm": 0.858842134475708,
"learning_rate": 0.0001,
"loss": 0.5649,
"step": 280
},
{
"epoch": 0.23748592486436687,
"grad_norm": 0.9077485799789429,
"learning_rate": 0.0001,
"loss": 0.5769,
"step": 290
},
{
"epoch": 0.24567509468727608,
"grad_norm": 1.1604504585266113,
"learning_rate": 0.0001,
"loss": 0.5558,
"step": 300
},
{
"epoch": 0.2538642645101853,
"grad_norm": 0.7758939266204834,
"learning_rate": 0.0001,
"loss": 0.5711,
"step": 310
},
{
"epoch": 0.26205343433309447,
"grad_norm": 1.2495347261428833,
"learning_rate": 0.0001,
"loss": 0.5845,
"step": 320
},
{
"epoch": 0.2702426041560037,
"grad_norm": 0.8986139297485352,
"learning_rate": 0.0001,
"loss": 0.5483,
"step": 330
},
{
"epoch": 0.2784317739789129,
"grad_norm": 0.7849992513656616,
"learning_rate": 0.0001,
"loss": 0.5682,
"step": 340
},
{
"epoch": 0.28662094380182207,
"grad_norm": 0.8479835987091064,
"learning_rate": 0.0001,
"loss": 0.5381,
"step": 350
},
{
"epoch": 0.2948101136247313,
"grad_norm": 0.9730711579322815,
"learning_rate": 0.0001,
"loss": 0.5572,
"step": 360
},
{
"epoch": 0.3029992834476405,
"grad_norm": 0.8130871653556824,
"learning_rate": 0.0001,
"loss": 0.5285,
"step": 370
},
{
"epoch": 0.3111884532705497,
"grad_norm": 0.7159671783447266,
"learning_rate": 0.0001,
"loss": 0.529,
"step": 380
},
{
"epoch": 0.3193776230934589,
"grad_norm": 0.7021393775939941,
"learning_rate": 0.0001,
"loss": 0.5371,
"step": 390
},
{
"epoch": 0.3275667929163681,
"grad_norm": 0.9046493172645569,
"learning_rate": 0.0001,
"loss": 0.5412,
"step": 400
},
{
"epoch": 0.3275667929163681,
"eval_loss": 0.43820247054100037,
"eval_runtime": 1014.548,
"eval_samples_per_second": 35.484,
"eval_steps_per_second": 17.742,
"step": 400
},
{
"epoch": 0.3357559627392773,
"grad_norm": 0.9188250303268433,
"learning_rate": 0.0001,
"loss": 0.5567,
"step": 410
},
{
"epoch": 0.3439451325621865,
"grad_norm": 0.6707028150558472,
"learning_rate": 0.0001,
"loss": 0.5303,
"step": 420
},
{
"epoch": 0.3521343023850957,
"grad_norm": 1.1012392044067383,
"learning_rate": 0.0001,
"loss": 0.5356,
"step": 430
},
{
"epoch": 0.3603234722080049,
"grad_norm": 0.6614859104156494,
"learning_rate": 0.0001,
"loss": 0.5443,
"step": 440
},
{
"epoch": 0.3685126420309141,
"grad_norm": 0.8753280639648438,
"learning_rate": 0.0001,
"loss": 0.5278,
"step": 450
},
{
"epoch": 0.37670181185382334,
"grad_norm": 0.713115394115448,
"learning_rate": 0.0001,
"loss": 0.5344,
"step": 460
},
{
"epoch": 0.3848909816767325,
"grad_norm": 1.034822940826416,
"learning_rate": 0.0001,
"loss": 0.511,
"step": 470
},
{
"epoch": 0.3930801514996417,
"grad_norm": 0.7497478723526001,
"learning_rate": 0.0001,
"loss": 0.5143,
"step": 480
},
{
"epoch": 0.40126932132255094,
"grad_norm": 0.9680531024932861,
"learning_rate": 0.0001,
"loss": 0.5372,
"step": 490
},
{
"epoch": 0.4094584911454601,
"grad_norm": 0.6382943391799927,
"learning_rate": 0.0001,
"loss": 0.5242,
"step": 500
},
{
"epoch": 0.4176476609683693,
"grad_norm": 0.7657376527786255,
"learning_rate": 0.0001,
"loss": 0.5125,
"step": 510
},
{
"epoch": 0.42583683079127854,
"grad_norm": 0.9952341914176941,
"learning_rate": 0.0001,
"loss": 0.507,
"step": 520
},
{
"epoch": 0.4340260006141877,
"grad_norm": 0.8375737071037292,
"learning_rate": 0.0001,
"loss": 0.5142,
"step": 530
},
{
"epoch": 0.44221517043709696,
"grad_norm": 0.7849007844924927,
"learning_rate": 0.0001,
"loss": 0.5049,
"step": 540
},
{
"epoch": 0.45040434026000614,
"grad_norm": 0.7818809151649475,
"learning_rate": 0.0001,
"loss": 0.4921,
"step": 550
},
{
"epoch": 0.4585935100829153,
"grad_norm": 0.6086965203285217,
"learning_rate": 0.0001,
"loss": 0.5254,
"step": 560
},
{
"epoch": 0.46678267990582456,
"grad_norm": 0.6038303971290588,
"learning_rate": 0.0001,
"loss": 0.5044,
"step": 570
},
{
"epoch": 0.47497184972873374,
"grad_norm": 0.7107850313186646,
"learning_rate": 0.0001,
"loss": 0.5189,
"step": 580
},
{
"epoch": 0.483161019551643,
"grad_norm": 0.7399956583976746,
"learning_rate": 0.0001,
"loss": 0.4814,
"step": 590
},
{
"epoch": 0.49135018937455216,
"grad_norm": 0.7447919249534607,
"learning_rate": 0.0001,
"loss": 0.5273,
"step": 600
},
{
"epoch": 0.49135018937455216,
"eval_loss": 0.41115716099739075,
"eval_runtime": 974.1489,
"eval_samples_per_second": 36.955,
"eval_steps_per_second": 18.478,
"step": 600
},
{
"epoch": 0.49953935919746134,
"grad_norm": 0.7186319231987,
"learning_rate": 0.0001,
"loss": 0.4964,
"step": 610
},
{
"epoch": 0.5077285290203706,
"grad_norm": 0.6659027338027954,
"learning_rate": 0.0001,
"loss": 0.4813,
"step": 620
},
{
"epoch": 0.5159176988432798,
"grad_norm": 0.8509200811386108,
"learning_rate": 0.0001,
"loss": 0.5007,
"step": 630
},
{
"epoch": 0.5241068686661889,
"grad_norm": 0.7163340449333191,
"learning_rate": 0.0001,
"loss": 0.4918,
"step": 640
},
{
"epoch": 0.5322960384890981,
"grad_norm": 0.6762372851371765,
"learning_rate": 0.0001,
"loss": 0.4721,
"step": 650
},
{
"epoch": 0.5404852083120074,
"grad_norm": 0.741247832775116,
"learning_rate": 0.0001,
"loss": 0.5035,
"step": 660
},
{
"epoch": 0.5486743781349166,
"grad_norm": 0.6757375001907349,
"learning_rate": 0.0001,
"loss": 0.5044,
"step": 670
},
{
"epoch": 0.5568635479578258,
"grad_norm": 0.6605280637741089,
"learning_rate": 0.0001,
"loss": 0.5036,
"step": 680
},
{
"epoch": 0.565052717780735,
"grad_norm": 0.7928522825241089,
"learning_rate": 0.0001,
"loss": 0.4884,
"step": 690
},
{
"epoch": 0.5732418876036441,
"grad_norm": 0.7482662796974182,
"learning_rate": 0.0001,
"loss": 0.4831,
"step": 700
},
{
"epoch": 0.5814310574265534,
"grad_norm": 0.7094814777374268,
"learning_rate": 0.0001,
"loss": 0.493,
"step": 710
},
{
"epoch": 0.5896202272494626,
"grad_norm": 0.708988606929779,
"learning_rate": 0.0001,
"loss": 0.4878,
"step": 720
},
{
"epoch": 0.5978093970723718,
"grad_norm": 0.7677808403968811,
"learning_rate": 0.0001,
"loss": 0.4819,
"step": 730
},
{
"epoch": 0.605998566895281,
"grad_norm": 0.6716774702072144,
"learning_rate": 0.0001,
"loss": 0.505,
"step": 740
},
{
"epoch": 0.6141877367181902,
"grad_norm": 0.6802059412002563,
"learning_rate": 0.0001,
"loss": 0.4612,
"step": 750
},
{
"epoch": 0.6223769065410995,
"grad_norm": 0.7577608227729797,
"learning_rate": 0.0001,
"loss": 0.4814,
"step": 760
},
{
"epoch": 0.6305660763640086,
"grad_norm": 0.6441067457199097,
"learning_rate": 0.0001,
"loss": 0.4838,
"step": 770
},
{
"epoch": 0.6387552461869178,
"grad_norm": 0.666208028793335,
"learning_rate": 0.0001,
"loss": 0.4908,
"step": 780
},
{
"epoch": 0.646944416009827,
"grad_norm": 0.7665196657180786,
"learning_rate": 0.0001,
"loss": 0.4716,
"step": 790
},
{
"epoch": 0.6551335858327362,
"grad_norm": 0.8056005239486694,
"learning_rate": 0.0001,
"loss": 0.4717,
"step": 800
},
{
"epoch": 0.6551335858327362,
"eval_loss": 0.41053226590156555,
"eval_runtime": 952.7571,
"eval_samples_per_second": 37.785,
"eval_steps_per_second": 18.893,
"step": 800
},
{
"epoch": 0.6633227556556454,
"grad_norm": 1.5036696195602417,
"learning_rate": 0.0001,
"loss": 0.4711,
"step": 810
},
{
"epoch": 0.6715119254785546,
"grad_norm": 0.7547106146812439,
"learning_rate": 0.0001,
"loss": 0.476,
"step": 820
},
{
"epoch": 0.6797010953014638,
"grad_norm": 0.7936639785766602,
"learning_rate": 0.0001,
"loss": 0.4762,
"step": 830
},
{
"epoch": 0.687890265124373,
"grad_norm": 0.7227616310119629,
"learning_rate": 0.0001,
"loss": 0.4798,
"step": 840
},
{
"epoch": 0.6960794349472822,
"grad_norm": 0.7741641402244568,
"learning_rate": 0.0001,
"loss": 0.4992,
"step": 850
},
{
"epoch": 0.7042686047701914,
"grad_norm": 0.6723213791847229,
"learning_rate": 0.0001,
"loss": 0.4818,
"step": 860
},
{
"epoch": 0.7124577745931007,
"grad_norm": 0.5631268620491028,
"learning_rate": 0.0001,
"loss": 0.481,
"step": 870
},
{
"epoch": 0.7206469444160098,
"grad_norm": 0.7071460485458374,
"learning_rate": 0.0001,
"loss": 0.4744,
"step": 880
},
{
"epoch": 0.728836114238919,
"grad_norm": 0.6503288745880127,
"learning_rate": 0.0001,
"loss": 0.4466,
"step": 890
},
{
"epoch": 0.7370252840618282,
"grad_norm": 0.6489100456237793,
"learning_rate": 0.0001,
"loss": 0.4708,
"step": 900
},
{
"epoch": 0.7452144538847374,
"grad_norm": 0.7342277765274048,
"learning_rate": 0.0001,
"loss": 0.4687,
"step": 910
},
{
"epoch": 0.7534036237076467,
"grad_norm": 0.6502553224563599,
"learning_rate": 0.0001,
"loss": 0.4649,
"step": 920
},
{
"epoch": 0.7615927935305559,
"grad_norm": 0.6611005663871765,
"learning_rate": 0.0001,
"loss": 0.4771,
"step": 930
},
{
"epoch": 0.769781963353465,
"grad_norm": 0.5838377475738525,
"learning_rate": 0.0001,
"loss": 0.4671,
"step": 940
},
{
"epoch": 0.7779711331763742,
"grad_norm": 0.6084617972373962,
"learning_rate": 0.0001,
"loss": 0.4701,
"step": 950
},
{
"epoch": 0.7861603029992834,
"grad_norm": 0.6489084959030151,
"learning_rate": 0.0001,
"loss": 0.4586,
"step": 960
},
{
"epoch": 0.7943494728221927,
"grad_norm": 0.6316090226173401,
"learning_rate": 0.0001,
"loss": 0.451,
"step": 970
},
{
"epoch": 0.8025386426451019,
"grad_norm": 0.6724498271942139,
"learning_rate": 0.0001,
"loss": 0.4776,
"step": 980
},
{
"epoch": 0.8107278124680111,
"grad_norm": 0.6327974796295166,
"learning_rate": 0.0001,
"loss": 0.4708,
"step": 990
},
{
"epoch": 0.8189169822909202,
"grad_norm": 0.8041887879371643,
"learning_rate": 0.0001,
"loss": 0.4835,
"step": 1000
},
{
"epoch": 0.8189169822909202,
"eval_loss": 0.39249786734580994,
"eval_runtime": 953.0333,
"eval_samples_per_second": 37.774,
"eval_steps_per_second": 18.887,
"step": 1000
},
{
"epoch": 0.8271061521138294,
"grad_norm": 0.7333071827888489,
"learning_rate": 0.0001,
"loss": 0.4662,
"step": 1010
},
{
"epoch": 0.8352953219367386,
"grad_norm": 0.6036032438278198,
"learning_rate": 0.0001,
"loss": 0.449,
"step": 1020
},
{
"epoch": 0.8434844917596479,
"grad_norm": 0.6087955236434937,
"learning_rate": 0.0001,
"loss": 0.4651,
"step": 1030
},
{
"epoch": 0.8516736615825571,
"grad_norm": 0.8231128454208374,
"learning_rate": 0.0001,
"loss": 0.4672,
"step": 1040
},
{
"epoch": 0.8598628314054663,
"grad_norm": 0.6176358461380005,
"learning_rate": 0.0001,
"loss": 0.4616,
"step": 1050
},
{
"epoch": 0.8680520012283754,
"grad_norm": 0.5654678344726562,
"learning_rate": 0.0001,
"loss": 0.437,
"step": 1060
},
{
"epoch": 0.8762411710512846,
"grad_norm": 0.6934413909912109,
"learning_rate": 0.0001,
"loss": 0.4302,
"step": 1070
},
{
"epoch": 0.8844303408741939,
"grad_norm": 0.6048303842544556,
"learning_rate": 0.0001,
"loss": 0.4584,
"step": 1080
},
{
"epoch": 0.8926195106971031,
"grad_norm": 0.6487530469894409,
"learning_rate": 0.0001,
"loss": 0.4647,
"step": 1090
},
{
"epoch": 0.9008086805200123,
"grad_norm": 0.6674748659133911,
"learning_rate": 0.0001,
"loss": 0.467,
"step": 1100
},
{
"epoch": 0.9089978503429215,
"grad_norm": 0.6509521007537842,
"learning_rate": 0.0001,
"loss": 0.4525,
"step": 1110
},
{
"epoch": 0.9171870201658306,
"grad_norm": 0.6949372291564941,
"learning_rate": 0.0001,
"loss": 0.4663,
"step": 1120
},
{
"epoch": 0.9253761899887399,
"grad_norm": 0.7340756058692932,
"learning_rate": 0.0001,
"loss": 0.4297,
"step": 1130
},
{
"epoch": 0.9335653598116491,
"grad_norm": 0.5543187856674194,
"learning_rate": 0.0001,
"loss": 0.4606,
"step": 1140
},
{
"epoch": 0.9417545296345583,
"grad_norm": 0.6061782836914062,
"learning_rate": 0.0001,
"loss": 0.4589,
"step": 1150
},
{
"epoch": 0.9499436994574675,
"grad_norm": 0.6638914942741394,
"learning_rate": 0.0001,
"loss": 0.4252,
"step": 1160
},
{
"epoch": 0.9581328692803767,
"grad_norm": 0.5913025736808777,
"learning_rate": 0.0001,
"loss": 0.44,
"step": 1170
},
{
"epoch": 0.966322039103286,
"grad_norm": 0.5672889947891235,
"learning_rate": 0.0001,
"loss": 0.4659,
"step": 1180
},
{
"epoch": 0.9745112089261951,
"grad_norm": 0.5614264607429504,
"learning_rate": 0.0001,
"loss": 0.4471,
"step": 1190
},
{
"epoch": 0.9827003787491043,
"grad_norm": 0.5809562802314758,
"learning_rate": 0.0001,
"loss": 0.4288,
"step": 1200
},
{
"epoch": 0.9827003787491043,
"eval_loss": 0.37333595752716064,
"eval_runtime": 961.0219,
"eval_samples_per_second": 37.46,
"eval_steps_per_second": 18.73,
"step": 1200
}
],
"logging_steps": 10,
"max_steps": 1221,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 400,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.304264713469952e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}