Qwen2.5-32B-Lora-HQ-e-6 / trainer_state.json
FINGU-AI's picture
Upload folder using huggingface_hub
7c85315 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.71976401179941,
"eval_steps": 100,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03933136676499508,
"grad_norm": 0.481609046459198,
"learning_rate": 0.00015,
"loss": 2.0722,
"step": 5
},
{
"epoch": 0.07866273352999016,
"grad_norm": 0.15720224380493164,
"learning_rate": 0.0003,
"loss": 1.4825,
"step": 10
},
{
"epoch": 0.11799410029498525,
"grad_norm": 0.06716315448284149,
"learning_rate": 0.00029759999999999997,
"loss": 1.3333,
"step": 15
},
{
"epoch": 0.15732546705998032,
"grad_norm": 0.06133478134870529,
"learning_rate": 0.00029519999999999997,
"loss": 1.2341,
"step": 20
},
{
"epoch": 0.19665683382497542,
"grad_norm": 0.07264667749404907,
"learning_rate": 0.00029279999999999996,
"loss": 1.1756,
"step": 25
},
{
"epoch": 0.2359882005899705,
"grad_norm": 0.07928217202425003,
"learning_rate": 0.00029039999999999996,
"loss": 1.1197,
"step": 30
},
{
"epoch": 0.2753195673549656,
"grad_norm": 0.09420346468687057,
"learning_rate": 0.00028799999999999995,
"loss": 1.0834,
"step": 35
},
{
"epoch": 0.31465093411996065,
"grad_norm": 0.0862259566783905,
"learning_rate": 0.00028559999999999995,
"loss": 1.044,
"step": 40
},
{
"epoch": 0.35398230088495575,
"grad_norm": 0.09086894243955612,
"learning_rate": 0.00028319999999999994,
"loss": 1.0205,
"step": 45
},
{
"epoch": 0.39331366764995085,
"grad_norm": 0.08469890058040619,
"learning_rate": 0.0002808,
"loss": 0.9798,
"step": 50
},
{
"epoch": 0.4326450344149459,
"grad_norm": 0.10012397915124893,
"learning_rate": 0.0002784,
"loss": 0.9811,
"step": 55
},
{
"epoch": 0.471976401179941,
"grad_norm": 0.08633492887020111,
"learning_rate": 0.000276,
"loss": 0.9556,
"step": 60
},
{
"epoch": 0.511307767944936,
"grad_norm": 0.09879346191883087,
"learning_rate": 0.0002736,
"loss": 0.9446,
"step": 65
},
{
"epoch": 0.5506391347099312,
"grad_norm": 0.08795857429504395,
"learning_rate": 0.0002712,
"loss": 0.9228,
"step": 70
},
{
"epoch": 0.5899705014749262,
"grad_norm": 0.0837111845612526,
"learning_rate": 0.0002688,
"loss": 0.9279,
"step": 75
},
{
"epoch": 0.6293018682399213,
"grad_norm": 0.08551318198442459,
"learning_rate": 0.00026639999999999997,
"loss": 0.9267,
"step": 80
},
{
"epoch": 0.6686332350049164,
"grad_norm": 0.08481767773628235,
"learning_rate": 0.00026399999999999997,
"loss": 0.9082,
"step": 85
},
{
"epoch": 0.7079646017699115,
"grad_norm": 0.100365050137043,
"learning_rate": 0.00026159999999999996,
"loss": 0.9028,
"step": 90
},
{
"epoch": 0.7472959685349065,
"grad_norm": 0.08463772386312485,
"learning_rate": 0.00025919999999999996,
"loss": 0.8866,
"step": 95
},
{
"epoch": 0.7866273352999017,
"grad_norm": 0.09628409892320633,
"learning_rate": 0.00025679999999999995,
"loss": 0.8787,
"step": 100
},
{
"epoch": 0.7866273352999017,
"eval_loss": 0.8853636980056763,
"eval_runtime": 24.3719,
"eval_samples_per_second": 6.729,
"eval_steps_per_second": 0.862,
"step": 100
},
{
"epoch": 0.8259587020648967,
"grad_norm": 0.08835043758153915,
"learning_rate": 0.00025439999999999995,
"loss": 0.8786,
"step": 105
},
{
"epoch": 0.8652900688298918,
"grad_norm": 0.09190791845321655,
"learning_rate": 0.00025199999999999995,
"loss": 0.8693,
"step": 110
},
{
"epoch": 0.904621435594887,
"grad_norm": 0.08965795487165451,
"learning_rate": 0.00024959999999999994,
"loss": 0.8772,
"step": 115
},
{
"epoch": 0.943952802359882,
"grad_norm": 0.09055910259485245,
"learning_rate": 0.0002472,
"loss": 0.867,
"step": 120
},
{
"epoch": 0.983284169124877,
"grad_norm": 0.09172637015581131,
"learning_rate": 0.0002448,
"loss": 0.8536,
"step": 125
},
{
"epoch": 1.022615535889872,
"grad_norm": 0.10374542325735092,
"learning_rate": 0.00024239999999999998,
"loss": 0.9888,
"step": 130
},
{
"epoch": 1.0619469026548674,
"grad_norm": 0.08842068910598755,
"learning_rate": 0.00023999999999999998,
"loss": 0.8443,
"step": 135
},
{
"epoch": 1.1012782694198624,
"grad_norm": 0.0736837387084961,
"learning_rate": 0.0002376,
"loss": 0.8457,
"step": 140
},
{
"epoch": 1.1406096361848574,
"grad_norm": 0.07575016468763351,
"learning_rate": 0.0002352,
"loss": 0.8335,
"step": 145
},
{
"epoch": 1.1799410029498525,
"grad_norm": 0.07092955708503723,
"learning_rate": 0.0002328,
"loss": 0.8246,
"step": 150
},
{
"epoch": 1.2192723697148475,
"grad_norm": 0.077423095703125,
"learning_rate": 0.0002304,
"loss": 0.823,
"step": 155
},
{
"epoch": 1.2586037364798428,
"grad_norm": 0.07389391213655472,
"learning_rate": 0.00022799999999999999,
"loss": 0.819,
"step": 160
},
{
"epoch": 1.2979351032448379,
"grad_norm": 0.08229434490203857,
"learning_rate": 0.00022559999999999998,
"loss": 0.8181,
"step": 165
},
{
"epoch": 1.337266470009833,
"grad_norm": 0.07665972411632538,
"learning_rate": 0.00022319999999999998,
"loss": 0.8118,
"step": 170
},
{
"epoch": 1.376597836774828,
"grad_norm": 0.09001573175191879,
"learning_rate": 0.00022079999999999997,
"loss": 0.8157,
"step": 175
},
{
"epoch": 1.415929203539823,
"grad_norm": 0.07965826243162155,
"learning_rate": 0.00021839999999999997,
"loss": 0.8111,
"step": 180
},
{
"epoch": 1.455260570304818,
"grad_norm": 0.08642959594726562,
"learning_rate": 0.00021599999999999996,
"loss": 0.8003,
"step": 185
},
{
"epoch": 1.494591937069813,
"grad_norm": 0.0749087929725647,
"learning_rate": 0.00021359999999999996,
"loss": 0.7975,
"step": 190
},
{
"epoch": 1.5339233038348081,
"grad_norm": 0.08575734496116638,
"learning_rate": 0.00021119999999999996,
"loss": 0.7888,
"step": 195
},
{
"epoch": 1.5732546705998034,
"grad_norm": 0.0887129157781601,
"learning_rate": 0.00020879999999999998,
"loss": 0.7857,
"step": 200
},
{
"epoch": 1.5732546705998034,
"eval_loss": 0.8026237487792969,
"eval_runtime": 24.2397,
"eval_samples_per_second": 6.766,
"eval_steps_per_second": 0.866,
"step": 200
},
{
"epoch": 1.6125860373647984,
"grad_norm": 0.0926935002207756,
"learning_rate": 0.00020639999999999998,
"loss": 0.7877,
"step": 205
},
{
"epoch": 1.6519174041297935,
"grad_norm": 0.08537031710147858,
"learning_rate": 0.000204,
"loss": 0.7767,
"step": 210
},
{
"epoch": 1.6912487708947888,
"grad_norm": 0.0766814798116684,
"learning_rate": 0.0002016,
"loss": 0.785,
"step": 215
},
{
"epoch": 1.7305801376597838,
"grad_norm": 0.08394207805395126,
"learning_rate": 0.0001992,
"loss": 0.7832,
"step": 220
},
{
"epoch": 1.7699115044247788,
"grad_norm": 0.0813060775399208,
"learning_rate": 0.00019679999999999999,
"loss": 0.7766,
"step": 225
},
{
"epoch": 1.809242871189774,
"grad_norm": 0.08242856711149216,
"learning_rate": 0.00019439999999999998,
"loss": 0.7775,
"step": 230
},
{
"epoch": 1.848574237954769,
"grad_norm": 0.07610878348350525,
"learning_rate": 0.00019199999999999998,
"loss": 0.7736,
"step": 235
},
{
"epoch": 1.887905604719764,
"grad_norm": 0.08326178044080734,
"learning_rate": 0.00018959999999999997,
"loss": 0.7753,
"step": 240
},
{
"epoch": 1.927236971484759,
"grad_norm": 0.09425383061170578,
"learning_rate": 0.0001872,
"loss": 0.7577,
"step": 245
},
{
"epoch": 1.966568338249754,
"grad_norm": 0.08694498240947723,
"learning_rate": 0.0001848,
"loss": 0.7606,
"step": 250
},
{
"epoch": 2.005899705014749,
"grad_norm": 0.22805309295654297,
"learning_rate": 0.0001824,
"loss": 0.8871,
"step": 255
},
{
"epoch": 2.045231071779744,
"grad_norm": 0.09610473364591599,
"learning_rate": 0.00017999999999999998,
"loss": 0.7315,
"step": 260
},
{
"epoch": 2.084562438544739,
"grad_norm": 0.09666857868432999,
"learning_rate": 0.00017759999999999998,
"loss": 0.7315,
"step": 265
},
{
"epoch": 2.1238938053097347,
"grad_norm": 0.09328849613666534,
"learning_rate": 0.00017519999999999998,
"loss": 0.7344,
"step": 270
},
{
"epoch": 2.1632251720747298,
"grad_norm": 0.08137473464012146,
"learning_rate": 0.00017279999999999997,
"loss": 0.7347,
"step": 275
},
{
"epoch": 2.202556538839725,
"grad_norm": 0.08166103810071945,
"learning_rate": 0.00017039999999999997,
"loss": 0.7281,
"step": 280
},
{
"epoch": 2.24188790560472,
"grad_norm": 0.08074019104242325,
"learning_rate": 0.000168,
"loss": 0.7345,
"step": 285
},
{
"epoch": 2.281219272369715,
"grad_norm": 0.08479057997465134,
"learning_rate": 0.0001656,
"loss": 0.726,
"step": 290
},
{
"epoch": 2.32055063913471,
"grad_norm": 0.08091601729393005,
"learning_rate": 0.0001632,
"loss": 0.7184,
"step": 295
},
{
"epoch": 2.359882005899705,
"grad_norm": 0.08470489084720612,
"learning_rate": 0.0001608,
"loss": 0.7233,
"step": 300
},
{
"epoch": 2.359882005899705,
"eval_loss": 0.7612683176994324,
"eval_runtime": 24.27,
"eval_samples_per_second": 6.757,
"eval_steps_per_second": 0.865,
"step": 300
},
{
"epoch": 2.3992133726647,
"grad_norm": 0.08677177131175995,
"learning_rate": 0.0001584,
"loss": 0.721,
"step": 305
},
{
"epoch": 2.438544739429695,
"grad_norm": 0.08474377542734146,
"learning_rate": 0.000156,
"loss": 0.7141,
"step": 310
},
{
"epoch": 2.47787610619469,
"grad_norm": 0.08565227687358856,
"learning_rate": 0.0001536,
"loss": 0.7173,
"step": 315
},
{
"epoch": 2.5172074729596856,
"grad_norm": 0.08714301139116287,
"learning_rate": 0.0001512,
"loss": 0.7274,
"step": 320
},
{
"epoch": 2.5565388397246807,
"grad_norm": 0.0934271439909935,
"learning_rate": 0.00014879999999999998,
"loss": 0.7263,
"step": 325
},
{
"epoch": 2.5958702064896757,
"grad_norm": 0.08581375330686569,
"learning_rate": 0.00014639999999999998,
"loss": 0.7248,
"step": 330
},
{
"epoch": 2.6352015732546707,
"grad_norm": 0.08378680050373077,
"learning_rate": 0.00014399999999999998,
"loss": 0.721,
"step": 335
},
{
"epoch": 2.674532940019666,
"grad_norm": 0.08449660986661911,
"learning_rate": 0.00014159999999999997,
"loss": 0.7156,
"step": 340
},
{
"epoch": 2.713864306784661,
"grad_norm": 0.08646751940250397,
"learning_rate": 0.0001392,
"loss": 0.7094,
"step": 345
},
{
"epoch": 2.753195673549656,
"grad_norm": 0.08911272883415222,
"learning_rate": 0.0001368,
"loss": 0.709,
"step": 350
},
{
"epoch": 2.792527040314651,
"grad_norm": 0.0970829427242279,
"learning_rate": 0.0001344,
"loss": 0.7107,
"step": 355
},
{
"epoch": 2.831858407079646,
"grad_norm": 0.0854572132229805,
"learning_rate": 0.00013199999999999998,
"loss": 0.7148,
"step": 360
},
{
"epoch": 2.871189773844641,
"grad_norm": 0.08210612088441849,
"learning_rate": 0.00012959999999999998,
"loss": 0.7132,
"step": 365
},
{
"epoch": 2.910521140609636,
"grad_norm": 0.0925467386841774,
"learning_rate": 0.00012719999999999997,
"loss": 0.7201,
"step": 370
},
{
"epoch": 2.949852507374631,
"grad_norm": 0.09149914979934692,
"learning_rate": 0.00012479999999999997,
"loss": 0.7086,
"step": 375
},
{
"epoch": 2.989183874139626,
"grad_norm": 0.0827464610338211,
"learning_rate": 0.0001224,
"loss": 0.7102,
"step": 380
},
{
"epoch": 3.0285152409046217,
"grad_norm": 0.09861475974321365,
"learning_rate": 0.00011999999999999999,
"loss": 0.8086,
"step": 385
},
{
"epoch": 3.0678466076696167,
"grad_norm": 0.09810496121644974,
"learning_rate": 0.0001176,
"loss": 0.6784,
"step": 390
},
{
"epoch": 3.1071779744346117,
"grad_norm": 0.08657824248075485,
"learning_rate": 0.0001152,
"loss": 0.6818,
"step": 395
},
{
"epoch": 3.146509341199607,
"grad_norm": 0.08861815184354782,
"learning_rate": 0.00011279999999999999,
"loss": 0.6755,
"step": 400
},
{
"epoch": 3.146509341199607,
"eval_loss": 0.7408613562583923,
"eval_runtime": 24.2895,
"eval_samples_per_second": 6.752,
"eval_steps_per_second": 0.865,
"step": 400
},
{
"epoch": 3.185840707964602,
"grad_norm": 0.09166675060987473,
"learning_rate": 0.00011039999999999999,
"loss": 0.6797,
"step": 405
},
{
"epoch": 3.225172074729597,
"grad_norm": 0.08929497748613358,
"learning_rate": 0.00010799999999999998,
"loss": 0.6803,
"step": 410
},
{
"epoch": 3.264503441494592,
"grad_norm": 0.08206567913293839,
"learning_rate": 0.00010559999999999998,
"loss": 0.674,
"step": 415
},
{
"epoch": 3.303834808259587,
"grad_norm": 0.08605019748210907,
"learning_rate": 0.00010319999999999999,
"loss": 0.671,
"step": 420
},
{
"epoch": 3.343166175024582,
"grad_norm": 0.09013310074806213,
"learning_rate": 0.0001008,
"loss": 0.6741,
"step": 425
},
{
"epoch": 3.382497541789577,
"grad_norm": 0.08961386978626251,
"learning_rate": 9.839999999999999e-05,
"loss": 0.6788,
"step": 430
},
{
"epoch": 3.421828908554572,
"grad_norm": 0.08613158762454987,
"learning_rate": 9.599999999999999e-05,
"loss": 0.6681,
"step": 435
},
{
"epoch": 3.4611602753195676,
"grad_norm": 0.08968936651945114,
"learning_rate": 9.36e-05,
"loss": 0.6726,
"step": 440
},
{
"epoch": 3.5004916420845626,
"grad_norm": 0.0898197814822197,
"learning_rate": 9.12e-05,
"loss": 0.6693,
"step": 445
},
{
"epoch": 3.5398230088495577,
"grad_norm": 0.09239344298839569,
"learning_rate": 8.879999999999999e-05,
"loss": 0.6648,
"step": 450
},
{
"epoch": 3.5791543756145527,
"grad_norm": 0.08533533662557602,
"learning_rate": 8.639999999999999e-05,
"loss": 0.6659,
"step": 455
},
{
"epoch": 3.618485742379548,
"grad_norm": 0.08410927653312683,
"learning_rate": 8.4e-05,
"loss": 0.6725,
"step": 460
},
{
"epoch": 3.657817109144543,
"grad_norm": 0.08881025016307831,
"learning_rate": 8.16e-05,
"loss": 0.67,
"step": 465
},
{
"epoch": 3.697148475909538,
"grad_norm": 0.08324339985847473,
"learning_rate": 7.92e-05,
"loss": 0.6722,
"step": 470
},
{
"epoch": 3.736479842674533,
"grad_norm": 0.09193731844425201,
"learning_rate": 7.68e-05,
"loss": 0.6689,
"step": 475
},
{
"epoch": 3.775811209439528,
"grad_norm": 0.0902351662516594,
"learning_rate": 7.439999999999999e-05,
"loss": 0.6784,
"step": 480
},
{
"epoch": 3.815142576204523,
"grad_norm": 0.08959916979074478,
"learning_rate": 7.199999999999999e-05,
"loss": 0.6808,
"step": 485
},
{
"epoch": 3.854473942969518,
"grad_norm": 0.0888456478714943,
"learning_rate": 6.96e-05,
"loss": 0.6703,
"step": 490
},
{
"epoch": 3.893805309734513,
"grad_norm": 0.08982842415571213,
"learning_rate": 6.72e-05,
"loss": 0.675,
"step": 495
},
{
"epoch": 3.933136676499508,
"grad_norm": 0.0874968022108078,
"learning_rate": 6.479999999999999e-05,
"loss": 0.6673,
"step": 500
},
{
"epoch": 3.933136676499508,
"eval_loss": 0.726395308971405,
"eval_runtime": 24.2191,
"eval_samples_per_second": 6.772,
"eval_steps_per_second": 0.867,
"step": 500
},
{
"epoch": 3.972468043264503,
"grad_norm": 0.08432712405920029,
"learning_rate": 6.239999999999999e-05,
"loss": 0.6747,
"step": 505
},
{
"epoch": 4.011799410029498,
"grad_norm": 0.09498456120491028,
"learning_rate": 5.9999999999999995e-05,
"loss": 0.7806,
"step": 510
},
{
"epoch": 4.051130776794493,
"grad_norm": 0.09371250122785568,
"learning_rate": 5.76e-05,
"loss": 0.6417,
"step": 515
},
{
"epoch": 4.090462143559488,
"grad_norm": 0.09341807663440704,
"learning_rate": 5.519999999999999e-05,
"loss": 0.6399,
"step": 520
},
{
"epoch": 4.129793510324483,
"grad_norm": 0.08755338191986084,
"learning_rate": 5.279999999999999e-05,
"loss": 0.6408,
"step": 525
},
{
"epoch": 4.169124877089478,
"grad_norm": 0.08822200447320938,
"learning_rate": 5.04e-05,
"loss": 0.6407,
"step": 530
},
{
"epoch": 4.208456243854474,
"grad_norm": 0.08952053636312485,
"learning_rate": 4.7999999999999994e-05,
"loss": 0.645,
"step": 535
},
{
"epoch": 4.247787610619469,
"grad_norm": 0.0895063579082489,
"learning_rate": 4.56e-05,
"loss": 0.6409,
"step": 540
},
{
"epoch": 4.2871189773844645,
"grad_norm": 0.08820494264364243,
"learning_rate": 4.319999999999999e-05,
"loss": 0.6422,
"step": 545
},
{
"epoch": 4.3264503441494595,
"grad_norm": 0.08639927208423615,
"learning_rate": 4.08e-05,
"loss": 0.6476,
"step": 550
},
{
"epoch": 4.3657817109144545,
"grad_norm": 0.08915071934461594,
"learning_rate": 3.84e-05,
"loss": 0.6389,
"step": 555
},
{
"epoch": 4.40511307767945,
"grad_norm": 0.08612582087516785,
"learning_rate": 3.5999999999999994e-05,
"loss": 0.6401,
"step": 560
},
{
"epoch": 4.444444444444445,
"grad_norm": 0.08792046457529068,
"learning_rate": 3.36e-05,
"loss": 0.6479,
"step": 565
},
{
"epoch": 4.48377581120944,
"grad_norm": 0.08458750694990158,
"learning_rate": 3.119999999999999e-05,
"loss": 0.6448,
"step": 570
},
{
"epoch": 4.523107177974435,
"grad_norm": 0.0890946015715599,
"learning_rate": 2.88e-05,
"loss": 0.6441,
"step": 575
},
{
"epoch": 4.56243854473943,
"grad_norm": 0.08621781319379807,
"learning_rate": 2.6399999999999995e-05,
"loss": 0.6422,
"step": 580
},
{
"epoch": 4.601769911504425,
"grad_norm": 0.08367058634757996,
"learning_rate": 2.3999999999999997e-05,
"loss": 0.6479,
"step": 585
},
{
"epoch": 4.64110127826942,
"grad_norm": 0.08786690980195999,
"learning_rate": 2.1599999999999996e-05,
"loss": 0.6346,
"step": 590
},
{
"epoch": 4.680432645034415,
"grad_norm": 0.08560498058795929,
"learning_rate": 1.92e-05,
"loss": 0.6413,
"step": 595
},
{
"epoch": 4.71976401179941,
"grad_norm": 0.08328807353973389,
"learning_rate": 1.68e-05,
"loss": 0.6399,
"step": 600
},
{
"epoch": 4.71976401179941,
"eval_loss": 0.7229037880897522,
"eval_runtime": 24.3039,
"eval_samples_per_second": 6.748,
"eval_steps_per_second": 0.864,
"step": 600
}
],
"logging_steps": 5,
"max_steps": 635,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.043845109381595e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}