inflaton's picture
fine-tuned qwen2 72b
69fc39f
raw
history blame
21.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9991075412762158,
"eval_steps": 70,
"global_step": 560,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0178491744756805,
"grad_norm": 1.8217403888702393,
"learning_rate": 2.9761904761904763e-06,
"loss": 2.7425,
"step": 5
},
{
"epoch": 0.035698348951361,
"grad_norm": 2.104698419570923,
"learning_rate": 5.9523809523809525e-06,
"loss": 2.861,
"step": 10
},
{
"epoch": 0.0535475234270415,
"grad_norm": 2.7389333248138428,
"learning_rate": 8.92857142857143e-06,
"loss": 2.8281,
"step": 15
},
{
"epoch": 0.071396697902722,
"grad_norm": 3.9298207759857178,
"learning_rate": 1.1904761904761905e-05,
"loss": 3.1888,
"step": 20
},
{
"epoch": 0.0892458723784025,
"grad_norm": 2.648014783859253,
"learning_rate": 1.4880952380952381e-05,
"loss": 2.6461,
"step": 25
},
{
"epoch": 0.107095046854083,
"grad_norm": 1.587472915649414,
"learning_rate": 1.785714285714286e-05,
"loss": 2.3212,
"step": 30
},
{
"epoch": 0.1249442213297635,
"grad_norm": 0.8390935063362122,
"learning_rate": 2.0833333333333336e-05,
"loss": 1.8036,
"step": 35
},
{
"epoch": 0.142793395805444,
"grad_norm": 0.46670979261398315,
"learning_rate": 2.380952380952381e-05,
"loss": 1.5552,
"step": 40
},
{
"epoch": 0.1606425702811245,
"grad_norm": 0.45171597599983215,
"learning_rate": 2.6785714285714288e-05,
"loss": 1.6626,
"step": 45
},
{
"epoch": 0.178491744756805,
"grad_norm": 0.5605499744415283,
"learning_rate": 2.9761904761904762e-05,
"loss": 1.4897,
"step": 50
},
{
"epoch": 0.1963409192324855,
"grad_norm": 0.5553259253501892,
"learning_rate": 3.273809523809524e-05,
"loss": 1.5373,
"step": 55
},
{
"epoch": 0.214190093708166,
"grad_norm": 0.6260251402854919,
"learning_rate": 3.571428571428572e-05,
"loss": 1.4779,
"step": 60
},
{
"epoch": 0.2320392681838465,
"grad_norm": 0.6063796877861023,
"learning_rate": 3.8690476190476195e-05,
"loss": 1.483,
"step": 65
},
{
"epoch": 0.249888442659527,
"grad_norm": 0.5549850463867188,
"learning_rate": 4.166666666666667e-05,
"loss": 1.5022,
"step": 70
},
{
"epoch": 0.249888442659527,
"eval_loss": 1.451762318611145,
"eval_runtime": 17.7549,
"eval_samples_per_second": 2.591,
"eval_steps_per_second": 2.591,
"step": 70
},
{
"epoch": 0.2677376171352075,
"grad_norm": 0.482930988073349,
"learning_rate": 4.464285714285715e-05,
"loss": 1.4256,
"step": 75
},
{
"epoch": 0.285586791610888,
"grad_norm": 0.4240593910217285,
"learning_rate": 4.761904761904762e-05,
"loss": 1.3655,
"step": 80
},
{
"epoch": 0.3034359660865685,
"grad_norm": 0.4872314929962158,
"learning_rate": 5.05952380952381e-05,
"loss": 1.4478,
"step": 85
},
{
"epoch": 0.321285140562249,
"grad_norm": 0.42132768034935,
"learning_rate": 5.3571428571428575e-05,
"loss": 1.3305,
"step": 90
},
{
"epoch": 0.3391343150379295,
"grad_norm": 0.6932046413421631,
"learning_rate": 5.6547619047619046e-05,
"loss": 1.4279,
"step": 95
},
{
"epoch": 0.35698348951361,
"grad_norm": 0.6714524626731873,
"learning_rate": 5.9523809523809524e-05,
"loss": 1.4967,
"step": 100
},
{
"epoch": 0.3748326639892905,
"grad_norm": 0.5682816505432129,
"learning_rate": 6.25e-05,
"loss": 1.4739,
"step": 105
},
{
"epoch": 0.392681838464971,
"grad_norm": 0.7795937657356262,
"learning_rate": 6.547619047619048e-05,
"loss": 1.3751,
"step": 110
},
{
"epoch": 0.4105310129406515,
"grad_norm": 0.8056842088699341,
"learning_rate": 6.845238095238096e-05,
"loss": 1.3699,
"step": 115
},
{
"epoch": 0.428380187416332,
"grad_norm": 0.8373801112174988,
"learning_rate": 7.142857142857143e-05,
"loss": 1.4696,
"step": 120
},
{
"epoch": 0.4462293618920125,
"grad_norm": 1.0051416158676147,
"learning_rate": 7.440476190476191e-05,
"loss": 1.4059,
"step": 125
},
{
"epoch": 0.464078536367693,
"grad_norm": 0.5304180383682251,
"learning_rate": 7.738095238095239e-05,
"loss": 1.3072,
"step": 130
},
{
"epoch": 0.4819277108433735,
"grad_norm": 0.8797634243965149,
"learning_rate": 8.035714285714287e-05,
"loss": 1.4132,
"step": 135
},
{
"epoch": 0.499776885319054,
"grad_norm": 0.9049625396728516,
"learning_rate": 8.333333333333334e-05,
"loss": 1.4121,
"step": 140
},
{
"epoch": 0.499776885319054,
"eval_loss": 1.3727394342422485,
"eval_runtime": 17.745,
"eval_samples_per_second": 2.592,
"eval_steps_per_second": 2.592,
"step": 140
},
{
"epoch": 0.5176260597947345,
"grad_norm": 0.6793915033340454,
"learning_rate": 8.630952380952382e-05,
"loss": 1.3109,
"step": 145
},
{
"epoch": 0.535475234270415,
"grad_norm": 0.7171015739440918,
"learning_rate": 8.92857142857143e-05,
"loss": 1.3781,
"step": 150
},
{
"epoch": 0.5533244087460955,
"grad_norm": 0.6738716959953308,
"learning_rate": 9.226190476190478e-05,
"loss": 1.3564,
"step": 155
},
{
"epoch": 0.571173583221776,
"grad_norm": 0.699975311756134,
"learning_rate": 9.523809523809524e-05,
"loss": 1.2387,
"step": 160
},
{
"epoch": 0.5890227576974565,
"grad_norm": 0.7659904956817627,
"learning_rate": 9.821428571428572e-05,
"loss": 1.3042,
"step": 165
},
{
"epoch": 0.606871932173137,
"grad_norm": 0.9782125353813171,
"learning_rate": 9.999956828659095e-05,
"loss": 1.3709,
"step": 170
},
{
"epoch": 0.6247211066488175,
"grad_norm": 1.0532957315444946,
"learning_rate": 9.999471159635539e-05,
"loss": 1.3844,
"step": 175
},
{
"epoch": 0.642570281124498,
"grad_norm": 0.7373877167701721,
"learning_rate": 9.998445910004082e-05,
"loss": 1.2852,
"step": 180
},
{
"epoch": 0.6604194556001785,
"grad_norm": 1.0207768678665161,
"learning_rate": 9.996881190417393e-05,
"loss": 1.4652,
"step": 185
},
{
"epoch": 0.678268630075859,
"grad_norm": 0.7943917512893677,
"learning_rate": 9.994777169751806e-05,
"loss": 1.3743,
"step": 190
},
{
"epoch": 0.6961178045515395,
"grad_norm": 0.7461659908294678,
"learning_rate": 9.992134075089084e-05,
"loss": 1.2423,
"step": 195
},
{
"epoch": 0.71396697902722,
"grad_norm": 0.9689913988113403,
"learning_rate": 9.988952191691925e-05,
"loss": 1.3113,
"step": 200
},
{
"epoch": 0.7318161535029005,
"grad_norm": 0.766276478767395,
"learning_rate": 9.985231862973168e-05,
"loss": 1.3524,
"step": 205
},
{
"epoch": 0.749665327978581,
"grad_norm": 0.6728419661521912,
"learning_rate": 9.980973490458728e-05,
"loss": 1.4038,
"step": 210
},
{
"epoch": 0.749665327978581,
"eval_loss": 1.3051044940948486,
"eval_runtime": 17.7559,
"eval_samples_per_second": 2.591,
"eval_steps_per_second": 2.591,
"step": 210
},
{
"epoch": 0.7675145024542614,
"grad_norm": 1.0456575155258179,
"learning_rate": 9.976177533744261e-05,
"loss": 1.3626,
"step": 215
},
{
"epoch": 0.785363676929942,
"grad_norm": 0.9017456769943237,
"learning_rate": 9.97084451044556e-05,
"loss": 1.3232,
"step": 220
},
{
"epoch": 0.8032128514056225,
"grad_norm": 0.9113703966140747,
"learning_rate": 9.964974996142698e-05,
"loss": 1.2826,
"step": 225
},
{
"epoch": 0.821062025881303,
"grad_norm": 0.7177279591560364,
"learning_rate": 9.958569624317893e-05,
"loss": 1.2794,
"step": 230
},
{
"epoch": 0.8389112003569835,
"grad_norm": 0.9058728814125061,
"learning_rate": 9.951629086287151e-05,
"loss": 1.3853,
"step": 235
},
{
"epoch": 0.856760374832664,
"grad_norm": 0.6813459992408752,
"learning_rate": 9.944154131125642e-05,
"loss": 1.3533,
"step": 240
},
{
"epoch": 0.8746095493083444,
"grad_norm": 0.7113555073738098,
"learning_rate": 9.936145565586871e-05,
"loss": 1.3395,
"step": 245
},
{
"epoch": 0.892458723784025,
"grad_norm": 1.243597149848938,
"learning_rate": 9.927604254015585e-05,
"loss": 1.443,
"step": 250
},
{
"epoch": 0.9103078982597055,
"grad_norm": 0.8651953339576721,
"learning_rate": 9.918531118254507e-05,
"loss": 1.398,
"step": 255
},
{
"epoch": 0.928157072735386,
"grad_norm": 0.8877395987510681,
"learning_rate": 9.90892713754483e-05,
"loss": 1.346,
"step": 260
},
{
"epoch": 0.9460062472110665,
"grad_norm": 0.8857008814811707,
"learning_rate": 9.898793348420536e-05,
"loss": 1.3921,
"step": 265
},
{
"epoch": 0.963855421686747,
"grad_norm": 0.8319969177246094,
"learning_rate": 9.888130844596524e-05,
"loss": 1.3838,
"step": 270
},
{
"epoch": 0.9817045961624274,
"grad_norm": 0.7452044486999512,
"learning_rate": 9.876940776850569e-05,
"loss": 1.3529,
"step": 275
},
{
"epoch": 0.999553770638108,
"grad_norm": 0.7535015940666199,
"learning_rate": 9.865224352899119e-05,
"loss": 1.2739,
"step": 280
},
{
"epoch": 0.999553770638108,
"eval_loss": 1.289029836654663,
"eval_runtime": 17.7491,
"eval_samples_per_second": 2.592,
"eval_steps_per_second": 2.592,
"step": 280
},
{
"epoch": 1.0174029451137885,
"grad_norm": 0.7779117226600647,
"learning_rate": 9.852982837266955e-05,
"loss": 1.2339,
"step": 285
},
{
"epoch": 1.035252119589469,
"grad_norm": 0.8113610744476318,
"learning_rate": 9.840217551150706e-05,
"loss": 1.0982,
"step": 290
},
{
"epoch": 1.0531012940651494,
"grad_norm": 1.004701852798462,
"learning_rate": 9.826929872276255e-05,
"loss": 1.2537,
"step": 295
},
{
"epoch": 1.07095046854083,
"grad_norm": 1.524734616279602,
"learning_rate": 9.81312123475006e-05,
"loss": 1.1664,
"step": 300
},
{
"epoch": 1.0887996430165106,
"grad_norm": 1.5680856704711914,
"learning_rate": 9.798793128904356e-05,
"loss": 1.08,
"step": 305
},
{
"epoch": 1.106648817492191,
"grad_norm": 1.4838035106658936,
"learning_rate": 9.78394710113631e-05,
"loss": 1.1029,
"step": 310
},
{
"epoch": 1.1244979919678715,
"grad_norm": 1.522316575050354,
"learning_rate": 9.768584753741134e-05,
"loss": 1.1524,
"step": 315
},
{
"epoch": 1.142347166443552,
"grad_norm": 1.3976528644561768,
"learning_rate": 9.752707744739145e-05,
"loss": 1.1328,
"step": 320
},
{
"epoch": 1.1601963409192324,
"grad_norm": 1.4764764308929443,
"learning_rate": 9.736317787696816e-05,
"loss": 1.1174,
"step": 325
},
{
"epoch": 1.178045515394913,
"grad_norm": 1.3623173236846924,
"learning_rate": 9.719416651541839e-05,
"loss": 1.0493,
"step": 330
},
{
"epoch": 1.1958946898705936,
"grad_norm": 1.3625001907348633,
"learning_rate": 9.702006160372209e-05,
"loss": 1.0479,
"step": 335
},
{
"epoch": 1.213743864346274,
"grad_norm": 1.7509726285934448,
"learning_rate": 9.684088193259355e-05,
"loss": 1.1043,
"step": 340
},
{
"epoch": 1.2315930388219545,
"grad_norm": 1.5920188426971436,
"learning_rate": 9.665664684045333e-05,
"loss": 1.1096,
"step": 345
},
{
"epoch": 1.249442213297635,
"grad_norm": 1.6554943323135376,
"learning_rate": 9.646737621134112e-05,
"loss": 1.1436,
"step": 350
},
{
"epoch": 1.249442213297635,
"eval_loss": 1.3194608688354492,
"eval_runtime": 17.7382,
"eval_samples_per_second": 2.593,
"eval_steps_per_second": 2.593,
"step": 350
},
{
"epoch": 1.2672913877733154,
"grad_norm": 1.881818175315857,
"learning_rate": 9.627309047276974e-05,
"loss": 1.0549,
"step": 355
},
{
"epoch": 1.285140562248996,
"grad_norm": 1.8770464658737183,
"learning_rate": 9.607381059352038e-05,
"loss": 1.1576,
"step": 360
},
{
"epoch": 1.3029897367246766,
"grad_norm": 1.6901912689208984,
"learning_rate": 9.586955808137958e-05,
"loss": 1.1246,
"step": 365
},
{
"epoch": 1.320838911200357,
"grad_norm": 1.7667070627212524,
"learning_rate": 9.566035498081784e-05,
"loss": 1.125,
"step": 370
},
{
"epoch": 1.3386880856760375,
"grad_norm": 1.6150933504104614,
"learning_rate": 9.544622387061055e-05,
"loss": 1.1687,
"step": 375
},
{
"epoch": 1.356537260151718,
"grad_norm": 1.5824884176254272,
"learning_rate": 9.522718786140097e-05,
"loss": 0.9699,
"step": 380
},
{
"epoch": 1.3743864346273984,
"grad_norm": 1.5410280227661133,
"learning_rate": 9.500327059320606e-05,
"loss": 1.1379,
"step": 385
},
{
"epoch": 1.392235609103079,
"grad_norm": 2.264235496520996,
"learning_rate": 9.477449623286505e-05,
"loss": 1.0511,
"step": 390
},
{
"epoch": 1.4100847835787595,
"grad_norm": 1.7440612316131592,
"learning_rate": 9.454088947143116e-05,
"loss": 1.0003,
"step": 395
},
{
"epoch": 1.42793395805444,
"grad_norm": 1.770466923713684,
"learning_rate": 9.430247552150673e-05,
"loss": 1.1631,
"step": 400
},
{
"epoch": 1.4457831325301205,
"grad_norm": 1.9537169933319092,
"learning_rate": 9.405928011452211e-05,
"loss": 1.045,
"step": 405
},
{
"epoch": 1.463632307005801,
"grad_norm": 1.452445387840271,
"learning_rate": 9.381132949795861e-05,
"loss": 1.0511,
"step": 410
},
{
"epoch": 1.4814814814814814,
"grad_norm": 2.176547050476074,
"learning_rate": 9.35586504325155e-05,
"loss": 1.1637,
"step": 415
},
{
"epoch": 1.499330655957162,
"grad_norm": 2.15567684173584,
"learning_rate": 9.330127018922194e-05,
"loss": 1.0783,
"step": 420
},
{
"epoch": 1.499330655957162,
"eval_loss": 1.3106330633163452,
"eval_runtime": 17.7447,
"eval_samples_per_second": 2.592,
"eval_steps_per_second": 2.592,
"step": 420
},
{
"epoch": 1.5171798304328425,
"grad_norm": 1.6800014972686768,
"learning_rate": 9.303921654649362e-05,
"loss": 1.0406,
"step": 425
},
{
"epoch": 1.5350290049085231,
"grad_norm": 1.926607370376587,
"learning_rate": 9.277251778713474e-05,
"loss": 1.1469,
"step": 430
},
{
"epoch": 1.5528781793842035,
"grad_norm": 1.7155028581619263,
"learning_rate": 9.250120269528546e-05,
"loss": 1.0453,
"step": 435
},
{
"epoch": 1.5707273538598838,
"grad_norm": 1.9001247882843018,
"learning_rate": 9.22253005533154e-05,
"loss": 1.0611,
"step": 440
},
{
"epoch": 1.5885765283355644,
"grad_norm": 2.2804248332977295,
"learning_rate": 9.194484113866313e-05,
"loss": 1.082,
"step": 445
},
{
"epoch": 1.606425702811245,
"grad_norm": 1.9318439960479736,
"learning_rate": 9.165985472062246e-05,
"loss": 1.2404,
"step": 450
},
{
"epoch": 1.6242748772869255,
"grad_norm": 1.6018136739730835,
"learning_rate": 9.137037205707552e-05,
"loss": 1.0436,
"step": 455
},
{
"epoch": 1.6421240517626061,
"grad_norm": 2.1986541748046875,
"learning_rate": 9.107642439117321e-05,
"loss": 1.1227,
"step": 460
},
{
"epoch": 1.6599732262382865,
"grad_norm": 1.5558295249938965,
"learning_rate": 9.077804344796302e-05,
"loss": 1.0858,
"step": 465
},
{
"epoch": 1.6778224007139668,
"grad_norm": 1.8423618078231812,
"learning_rate": 9.04752614309652e-05,
"loss": 1.0998,
"step": 470
},
{
"epoch": 1.6956715751896474,
"grad_norm": 1.9065622091293335,
"learning_rate": 9.01681110186971e-05,
"loss": 1.0433,
"step": 475
},
{
"epoch": 1.713520749665328,
"grad_norm": 2.0103020668029785,
"learning_rate": 8.985662536114613e-05,
"loss": 1.0798,
"step": 480
},
{
"epoch": 1.7313699241410085,
"grad_norm": 1.5299313068389893,
"learning_rate": 8.954083807619208e-05,
"loss": 1.1012,
"step": 485
},
{
"epoch": 1.7492190986166891,
"grad_norm": 1.6331924200057983,
"learning_rate": 8.922078324597879e-05,
"loss": 1.1219,
"step": 490
},
{
"epoch": 1.7492190986166891,
"eval_loss": 1.3044873476028442,
"eval_runtime": 17.7401,
"eval_samples_per_second": 2.593,
"eval_steps_per_second": 2.593,
"step": 490
},
{
"epoch": 1.7670682730923695,
"grad_norm": 1.6050705909729004,
"learning_rate": 8.889649541323574e-05,
"loss": 1.16,
"step": 495
},
{
"epoch": 1.7849174475680498,
"grad_norm": 1.7604998350143433,
"learning_rate": 8.856800957755e-05,
"loss": 1.091,
"step": 500
},
{
"epoch": 1.8027666220437304,
"grad_norm": 1.6485258340835571,
"learning_rate": 8.823536119158864e-05,
"loss": 1.072,
"step": 505
},
{
"epoch": 1.820615796519411,
"grad_norm": 1.8173716068267822,
"learning_rate": 8.789858615727265e-05,
"loss": 1.0635,
"step": 510
},
{
"epoch": 1.8384649709950915,
"grad_norm": 1.468127965927124,
"learning_rate": 8.755772082190194e-05,
"loss": 1.0258,
"step": 515
},
{
"epoch": 1.8563141454707721,
"grad_norm": 1.4476536512374878,
"learning_rate": 8.721280197423258e-05,
"loss": 1.2011,
"step": 520
},
{
"epoch": 1.8741633199464525,
"grad_norm": 2.054915189743042,
"learning_rate": 8.68638668405062e-05,
"loss": 1.0539,
"step": 525
},
{
"epoch": 1.8920124944221328,
"grad_norm": 1.8471094369888306,
"learning_rate": 8.651095308043232e-05,
"loss": 1.0948,
"step": 530
},
{
"epoch": 1.9098616688978134,
"grad_norm": 1.7790355682373047,
"learning_rate": 8.61540987831238e-05,
"loss": 1.1245,
"step": 535
},
{
"epoch": 1.927710843373494,
"grad_norm": 1.6644902229309082,
"learning_rate": 8.579334246298593e-05,
"loss": 1.2039,
"step": 540
},
{
"epoch": 1.9455600178491745,
"grad_norm": 1.9952303171157837,
"learning_rate": 8.542872305555978e-05,
"loss": 1.1077,
"step": 545
},
{
"epoch": 1.9634091923248551,
"grad_norm": 2.225977659225464,
"learning_rate": 8.50602799133199e-05,
"loss": 1.0603,
"step": 550
},
{
"epoch": 1.9812583668005355,
"grad_norm": 1.777342438697815,
"learning_rate": 8.468805280142709e-05,
"loss": 1.1376,
"step": 555
},
{
"epoch": 1.9991075412762158,
"grad_norm": 2.2195017337799072,
"learning_rate": 8.43120818934367e-05,
"loss": 1.0966,
"step": 560
},
{
"epoch": 1.9991075412762158,
"eval_loss": 1.3094360828399658,
"eval_runtime": 17.7539,
"eval_samples_per_second": 2.591,
"eval_steps_per_second": 2.591,
"step": 560
}
],
"logging_steps": 5,
"max_steps": 1680,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 70,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.899812762314342e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}