Spaces:
Build error
Build error
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 5.997322623828648, | |
"eval_steps": 70, | |
"global_step": 1680, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.0178491744756805, | |
"grad_norm": 1.8217403888702393, | |
"learning_rate": 2.9761904761904763e-06, | |
"loss": 2.7425, | |
"step": 5 | |
}, | |
{ | |
"epoch": 0.035698348951361, | |
"grad_norm": 2.104698419570923, | |
"learning_rate": 5.9523809523809525e-06, | |
"loss": 2.861, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.0535475234270415, | |
"grad_norm": 2.7389333248138428, | |
"learning_rate": 8.92857142857143e-06, | |
"loss": 2.8281, | |
"step": 15 | |
}, | |
{ | |
"epoch": 0.071396697902722, | |
"grad_norm": 3.9298207759857178, | |
"learning_rate": 1.1904761904761905e-05, | |
"loss": 3.1888, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.0892458723784025, | |
"grad_norm": 2.648014783859253, | |
"learning_rate": 1.4880952380952381e-05, | |
"loss": 2.6461, | |
"step": 25 | |
}, | |
{ | |
"epoch": 0.107095046854083, | |
"grad_norm": 1.587472915649414, | |
"learning_rate": 1.785714285714286e-05, | |
"loss": 2.3212, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.1249442213297635, | |
"grad_norm": 0.8390935063362122, | |
"learning_rate": 2.0833333333333336e-05, | |
"loss": 1.8036, | |
"step": 35 | |
}, | |
{ | |
"epoch": 0.142793395805444, | |
"grad_norm": 0.46670979261398315, | |
"learning_rate": 2.380952380952381e-05, | |
"loss": 1.5552, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.1606425702811245, | |
"grad_norm": 0.45171597599983215, | |
"learning_rate": 2.6785714285714288e-05, | |
"loss": 1.6626, | |
"step": 45 | |
}, | |
{ | |
"epoch": 0.178491744756805, | |
"grad_norm": 0.5605499744415283, | |
"learning_rate": 2.9761904761904762e-05, | |
"loss": 1.4897, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.1963409192324855, | |
"grad_norm": 0.5553259253501892, | |
"learning_rate": 3.273809523809524e-05, | |
"loss": 1.5373, | |
"step": 55 | |
}, | |
{ | |
"epoch": 0.214190093708166, | |
"grad_norm": 0.6260251402854919, | |
"learning_rate": 3.571428571428572e-05, | |
"loss": 1.4779, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.2320392681838465, | |
"grad_norm": 0.6063796877861023, | |
"learning_rate": 3.8690476190476195e-05, | |
"loss": 1.483, | |
"step": 65 | |
}, | |
{ | |
"epoch": 0.249888442659527, | |
"grad_norm": 0.5549850463867188, | |
"learning_rate": 4.166666666666667e-05, | |
"loss": 1.5022, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.249888442659527, | |
"eval_loss": 1.451762318611145, | |
"eval_runtime": 17.7549, | |
"eval_samples_per_second": 2.591, | |
"eval_steps_per_second": 2.591, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.2677376171352075, | |
"grad_norm": 0.482930988073349, | |
"learning_rate": 4.464285714285715e-05, | |
"loss": 1.4256, | |
"step": 75 | |
}, | |
{ | |
"epoch": 0.285586791610888, | |
"grad_norm": 0.4240593910217285, | |
"learning_rate": 4.761904761904762e-05, | |
"loss": 1.3655, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.3034359660865685, | |
"grad_norm": 0.4872314929962158, | |
"learning_rate": 5.05952380952381e-05, | |
"loss": 1.4478, | |
"step": 85 | |
}, | |
{ | |
"epoch": 0.321285140562249, | |
"grad_norm": 0.42132768034935, | |
"learning_rate": 5.3571428571428575e-05, | |
"loss": 1.3305, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.3391343150379295, | |
"grad_norm": 0.6932046413421631, | |
"learning_rate": 5.6547619047619046e-05, | |
"loss": 1.4279, | |
"step": 95 | |
}, | |
{ | |
"epoch": 0.35698348951361, | |
"grad_norm": 0.6714524626731873, | |
"learning_rate": 5.9523809523809524e-05, | |
"loss": 1.4967, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.3748326639892905, | |
"grad_norm": 0.5682816505432129, | |
"learning_rate": 6.25e-05, | |
"loss": 1.4739, | |
"step": 105 | |
}, | |
{ | |
"epoch": 0.392681838464971, | |
"grad_norm": 0.7795937657356262, | |
"learning_rate": 6.547619047619048e-05, | |
"loss": 1.3751, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.4105310129406515, | |
"grad_norm": 0.8056842088699341, | |
"learning_rate": 6.845238095238096e-05, | |
"loss": 1.3699, | |
"step": 115 | |
}, | |
{ | |
"epoch": 0.428380187416332, | |
"grad_norm": 0.8373801112174988, | |
"learning_rate": 7.142857142857143e-05, | |
"loss": 1.4696, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.4462293618920125, | |
"grad_norm": 1.0051416158676147, | |
"learning_rate": 7.440476190476191e-05, | |
"loss": 1.4059, | |
"step": 125 | |
}, | |
{ | |
"epoch": 0.464078536367693, | |
"grad_norm": 0.5304180383682251, | |
"learning_rate": 7.738095238095239e-05, | |
"loss": 1.3072, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.4819277108433735, | |
"grad_norm": 0.8797634243965149, | |
"learning_rate": 8.035714285714287e-05, | |
"loss": 1.4132, | |
"step": 135 | |
}, | |
{ | |
"epoch": 0.499776885319054, | |
"grad_norm": 0.9049625396728516, | |
"learning_rate": 8.333333333333334e-05, | |
"loss": 1.4121, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.499776885319054, | |
"eval_loss": 1.3727394342422485, | |
"eval_runtime": 17.745, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.5176260597947345, | |
"grad_norm": 0.6793915033340454, | |
"learning_rate": 8.630952380952382e-05, | |
"loss": 1.3109, | |
"step": 145 | |
}, | |
{ | |
"epoch": 0.535475234270415, | |
"grad_norm": 0.7171015739440918, | |
"learning_rate": 8.92857142857143e-05, | |
"loss": 1.3781, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.5533244087460955, | |
"grad_norm": 0.6738716959953308, | |
"learning_rate": 9.226190476190478e-05, | |
"loss": 1.3564, | |
"step": 155 | |
}, | |
{ | |
"epoch": 0.571173583221776, | |
"grad_norm": 0.699975311756134, | |
"learning_rate": 9.523809523809524e-05, | |
"loss": 1.2387, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.5890227576974565, | |
"grad_norm": 0.7659904956817627, | |
"learning_rate": 9.821428571428572e-05, | |
"loss": 1.3042, | |
"step": 165 | |
}, | |
{ | |
"epoch": 0.606871932173137, | |
"grad_norm": 0.9782125353813171, | |
"learning_rate": 9.999956828659095e-05, | |
"loss": 1.3709, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.6247211066488175, | |
"grad_norm": 1.0532957315444946, | |
"learning_rate": 9.999471159635539e-05, | |
"loss": 1.3844, | |
"step": 175 | |
}, | |
{ | |
"epoch": 0.642570281124498, | |
"grad_norm": 0.7373877167701721, | |
"learning_rate": 9.998445910004082e-05, | |
"loss": 1.2852, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.6604194556001785, | |
"grad_norm": 1.0207768678665161, | |
"learning_rate": 9.996881190417393e-05, | |
"loss": 1.4652, | |
"step": 185 | |
}, | |
{ | |
"epoch": 0.678268630075859, | |
"grad_norm": 0.7943917512893677, | |
"learning_rate": 9.994777169751806e-05, | |
"loss": 1.3743, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.6961178045515395, | |
"grad_norm": 0.7461659908294678, | |
"learning_rate": 9.992134075089084e-05, | |
"loss": 1.2423, | |
"step": 195 | |
}, | |
{ | |
"epoch": 0.71396697902722, | |
"grad_norm": 0.9689913988113403, | |
"learning_rate": 9.988952191691925e-05, | |
"loss": 1.3113, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.7318161535029005, | |
"grad_norm": 0.766276478767395, | |
"learning_rate": 9.985231862973168e-05, | |
"loss": 1.3524, | |
"step": 205 | |
}, | |
{ | |
"epoch": 0.749665327978581, | |
"grad_norm": 0.6728419661521912, | |
"learning_rate": 9.980973490458728e-05, | |
"loss": 1.4038, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.749665327978581, | |
"eval_loss": 1.3051044940948486, | |
"eval_runtime": 17.7559, | |
"eval_samples_per_second": 2.591, | |
"eval_steps_per_second": 2.591, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.7675145024542614, | |
"grad_norm": 1.0456575155258179, | |
"learning_rate": 9.976177533744261e-05, | |
"loss": 1.3626, | |
"step": 215 | |
}, | |
{ | |
"epoch": 0.785363676929942, | |
"grad_norm": 0.9017456769943237, | |
"learning_rate": 9.97084451044556e-05, | |
"loss": 1.3232, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.8032128514056225, | |
"grad_norm": 0.9113703966140747, | |
"learning_rate": 9.964974996142698e-05, | |
"loss": 1.2826, | |
"step": 225 | |
}, | |
{ | |
"epoch": 0.821062025881303, | |
"grad_norm": 0.7177279591560364, | |
"learning_rate": 9.958569624317893e-05, | |
"loss": 1.2794, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.8389112003569835, | |
"grad_norm": 0.9058728814125061, | |
"learning_rate": 9.951629086287151e-05, | |
"loss": 1.3853, | |
"step": 235 | |
}, | |
{ | |
"epoch": 0.856760374832664, | |
"grad_norm": 0.6813459992408752, | |
"learning_rate": 9.944154131125642e-05, | |
"loss": 1.3533, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.8746095493083444, | |
"grad_norm": 0.7113555073738098, | |
"learning_rate": 9.936145565586871e-05, | |
"loss": 1.3395, | |
"step": 245 | |
}, | |
{ | |
"epoch": 0.892458723784025, | |
"grad_norm": 1.243597149848938, | |
"learning_rate": 9.927604254015585e-05, | |
"loss": 1.443, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.9103078982597055, | |
"grad_norm": 0.8651953339576721, | |
"learning_rate": 9.918531118254507e-05, | |
"loss": 1.398, | |
"step": 255 | |
}, | |
{ | |
"epoch": 0.928157072735386, | |
"grad_norm": 0.8877395987510681, | |
"learning_rate": 9.90892713754483e-05, | |
"loss": 1.346, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.9460062472110665, | |
"grad_norm": 0.8857008814811707, | |
"learning_rate": 9.898793348420536e-05, | |
"loss": 1.3921, | |
"step": 265 | |
}, | |
{ | |
"epoch": 0.963855421686747, | |
"grad_norm": 0.8319969177246094, | |
"learning_rate": 9.888130844596524e-05, | |
"loss": 1.3838, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.9817045961624274, | |
"grad_norm": 0.7452044486999512, | |
"learning_rate": 9.876940776850569e-05, | |
"loss": 1.3529, | |
"step": 275 | |
}, | |
{ | |
"epoch": 0.999553770638108, | |
"grad_norm": 0.7535015940666199, | |
"learning_rate": 9.865224352899119e-05, | |
"loss": 1.2739, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.999553770638108, | |
"eval_loss": 1.289029836654663, | |
"eval_runtime": 17.7491, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 280 | |
}, | |
{ | |
"epoch": 1.0174029451137885, | |
"grad_norm": 0.7779117226600647, | |
"learning_rate": 9.852982837266955e-05, | |
"loss": 1.2339, | |
"step": 285 | |
}, | |
{ | |
"epoch": 1.035252119589469, | |
"grad_norm": 0.8113610744476318, | |
"learning_rate": 9.840217551150706e-05, | |
"loss": 1.0982, | |
"step": 290 | |
}, | |
{ | |
"epoch": 1.0531012940651494, | |
"grad_norm": 1.004701852798462, | |
"learning_rate": 9.826929872276255e-05, | |
"loss": 1.2537, | |
"step": 295 | |
}, | |
{ | |
"epoch": 1.07095046854083, | |
"grad_norm": 1.524734616279602, | |
"learning_rate": 9.81312123475006e-05, | |
"loss": 1.1664, | |
"step": 300 | |
}, | |
{ | |
"epoch": 1.0887996430165106, | |
"grad_norm": 1.5680856704711914, | |
"learning_rate": 9.798793128904356e-05, | |
"loss": 1.08, | |
"step": 305 | |
}, | |
{ | |
"epoch": 1.106648817492191, | |
"grad_norm": 1.4838035106658936, | |
"learning_rate": 9.78394710113631e-05, | |
"loss": 1.1029, | |
"step": 310 | |
}, | |
{ | |
"epoch": 1.1244979919678715, | |
"grad_norm": 1.522316575050354, | |
"learning_rate": 9.768584753741134e-05, | |
"loss": 1.1524, | |
"step": 315 | |
}, | |
{ | |
"epoch": 1.142347166443552, | |
"grad_norm": 1.3976528644561768, | |
"learning_rate": 9.752707744739145e-05, | |
"loss": 1.1328, | |
"step": 320 | |
}, | |
{ | |
"epoch": 1.1601963409192324, | |
"grad_norm": 1.4764764308929443, | |
"learning_rate": 9.736317787696816e-05, | |
"loss": 1.1174, | |
"step": 325 | |
}, | |
{ | |
"epoch": 1.178045515394913, | |
"grad_norm": 1.3623173236846924, | |
"learning_rate": 9.719416651541839e-05, | |
"loss": 1.0493, | |
"step": 330 | |
}, | |
{ | |
"epoch": 1.1958946898705936, | |
"grad_norm": 1.3625001907348633, | |
"learning_rate": 9.702006160372209e-05, | |
"loss": 1.0479, | |
"step": 335 | |
}, | |
{ | |
"epoch": 1.213743864346274, | |
"grad_norm": 1.7509726285934448, | |
"learning_rate": 9.684088193259355e-05, | |
"loss": 1.1043, | |
"step": 340 | |
}, | |
{ | |
"epoch": 1.2315930388219545, | |
"grad_norm": 1.5920188426971436, | |
"learning_rate": 9.665664684045333e-05, | |
"loss": 1.1096, | |
"step": 345 | |
}, | |
{ | |
"epoch": 1.249442213297635, | |
"grad_norm": 1.6554943323135376, | |
"learning_rate": 9.646737621134112e-05, | |
"loss": 1.1436, | |
"step": 350 | |
}, | |
{ | |
"epoch": 1.249442213297635, | |
"eval_loss": 1.3194608688354492, | |
"eval_runtime": 17.7382, | |
"eval_samples_per_second": 2.593, | |
"eval_steps_per_second": 2.593, | |
"step": 350 | |
}, | |
{ | |
"epoch": 1.2672913877733154, | |
"grad_norm": 1.881818175315857, | |
"learning_rate": 9.627309047276974e-05, | |
"loss": 1.0549, | |
"step": 355 | |
}, | |
{ | |
"epoch": 1.285140562248996, | |
"grad_norm": 1.8770464658737183, | |
"learning_rate": 9.607381059352038e-05, | |
"loss": 1.1576, | |
"step": 360 | |
}, | |
{ | |
"epoch": 1.3029897367246766, | |
"grad_norm": 1.6901912689208984, | |
"learning_rate": 9.586955808137958e-05, | |
"loss": 1.1246, | |
"step": 365 | |
}, | |
{ | |
"epoch": 1.320838911200357, | |
"grad_norm": 1.7667070627212524, | |
"learning_rate": 9.566035498081784e-05, | |
"loss": 1.125, | |
"step": 370 | |
}, | |
{ | |
"epoch": 1.3386880856760375, | |
"grad_norm": 1.6150933504104614, | |
"learning_rate": 9.544622387061055e-05, | |
"loss": 1.1687, | |
"step": 375 | |
}, | |
{ | |
"epoch": 1.356537260151718, | |
"grad_norm": 1.5824884176254272, | |
"learning_rate": 9.522718786140097e-05, | |
"loss": 0.9699, | |
"step": 380 | |
}, | |
{ | |
"epoch": 1.3743864346273984, | |
"grad_norm": 1.5410280227661133, | |
"learning_rate": 9.500327059320606e-05, | |
"loss": 1.1379, | |
"step": 385 | |
}, | |
{ | |
"epoch": 1.392235609103079, | |
"grad_norm": 2.264235496520996, | |
"learning_rate": 9.477449623286505e-05, | |
"loss": 1.0511, | |
"step": 390 | |
}, | |
{ | |
"epoch": 1.4100847835787595, | |
"grad_norm": 1.7440612316131592, | |
"learning_rate": 9.454088947143116e-05, | |
"loss": 1.0003, | |
"step": 395 | |
}, | |
{ | |
"epoch": 1.42793395805444, | |
"grad_norm": 1.770466923713684, | |
"learning_rate": 9.430247552150673e-05, | |
"loss": 1.1631, | |
"step": 400 | |
}, | |
{ | |
"epoch": 1.4457831325301205, | |
"grad_norm": 1.9537169933319092, | |
"learning_rate": 9.405928011452211e-05, | |
"loss": 1.045, | |
"step": 405 | |
}, | |
{ | |
"epoch": 1.463632307005801, | |
"grad_norm": 1.452445387840271, | |
"learning_rate": 9.381132949795861e-05, | |
"loss": 1.0511, | |
"step": 410 | |
}, | |
{ | |
"epoch": 1.4814814814814814, | |
"grad_norm": 2.176547050476074, | |
"learning_rate": 9.35586504325155e-05, | |
"loss": 1.1637, | |
"step": 415 | |
}, | |
{ | |
"epoch": 1.499330655957162, | |
"grad_norm": 2.15567684173584, | |
"learning_rate": 9.330127018922194e-05, | |
"loss": 1.0783, | |
"step": 420 | |
}, | |
{ | |
"epoch": 1.499330655957162, | |
"eval_loss": 1.3106330633163452, | |
"eval_runtime": 17.7447, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 420 | |
}, | |
{ | |
"epoch": 1.5171798304328425, | |
"grad_norm": 1.6800014972686768, | |
"learning_rate": 9.303921654649362e-05, | |
"loss": 1.0406, | |
"step": 425 | |
}, | |
{ | |
"epoch": 1.5350290049085231, | |
"grad_norm": 1.926607370376587, | |
"learning_rate": 9.277251778713474e-05, | |
"loss": 1.1469, | |
"step": 430 | |
}, | |
{ | |
"epoch": 1.5528781793842035, | |
"grad_norm": 1.7155028581619263, | |
"learning_rate": 9.250120269528546e-05, | |
"loss": 1.0453, | |
"step": 435 | |
}, | |
{ | |
"epoch": 1.5707273538598838, | |
"grad_norm": 1.9001247882843018, | |
"learning_rate": 9.22253005533154e-05, | |
"loss": 1.0611, | |
"step": 440 | |
}, | |
{ | |
"epoch": 1.5885765283355644, | |
"grad_norm": 2.2804248332977295, | |
"learning_rate": 9.194484113866313e-05, | |
"loss": 1.082, | |
"step": 445 | |
}, | |
{ | |
"epoch": 1.606425702811245, | |
"grad_norm": 1.9318439960479736, | |
"learning_rate": 9.165985472062246e-05, | |
"loss": 1.2404, | |
"step": 450 | |
}, | |
{ | |
"epoch": 1.6242748772869255, | |
"grad_norm": 1.6018136739730835, | |
"learning_rate": 9.137037205707552e-05, | |
"loss": 1.0436, | |
"step": 455 | |
}, | |
{ | |
"epoch": 1.6421240517626061, | |
"grad_norm": 2.1986541748046875, | |
"learning_rate": 9.107642439117321e-05, | |
"loss": 1.1227, | |
"step": 460 | |
}, | |
{ | |
"epoch": 1.6599732262382865, | |
"grad_norm": 1.5558295249938965, | |
"learning_rate": 9.077804344796302e-05, | |
"loss": 1.0858, | |
"step": 465 | |
}, | |
{ | |
"epoch": 1.6778224007139668, | |
"grad_norm": 1.8423618078231812, | |
"learning_rate": 9.04752614309652e-05, | |
"loss": 1.0998, | |
"step": 470 | |
}, | |
{ | |
"epoch": 1.6956715751896474, | |
"grad_norm": 1.9065622091293335, | |
"learning_rate": 9.01681110186971e-05, | |
"loss": 1.0433, | |
"step": 475 | |
}, | |
{ | |
"epoch": 1.713520749665328, | |
"grad_norm": 2.0103020668029785, | |
"learning_rate": 8.985662536114613e-05, | |
"loss": 1.0798, | |
"step": 480 | |
}, | |
{ | |
"epoch": 1.7313699241410085, | |
"grad_norm": 1.5299313068389893, | |
"learning_rate": 8.954083807619208e-05, | |
"loss": 1.1012, | |
"step": 485 | |
}, | |
{ | |
"epoch": 1.7492190986166891, | |
"grad_norm": 1.6331924200057983, | |
"learning_rate": 8.922078324597879e-05, | |
"loss": 1.1219, | |
"step": 490 | |
}, | |
{ | |
"epoch": 1.7492190986166891, | |
"eval_loss": 1.3044873476028442, | |
"eval_runtime": 17.7401, | |
"eval_samples_per_second": 2.593, | |
"eval_steps_per_second": 2.593, | |
"step": 490 | |
}, | |
{ | |
"epoch": 1.7670682730923695, | |
"grad_norm": 1.6050705909729004, | |
"learning_rate": 8.889649541323574e-05, | |
"loss": 1.16, | |
"step": 495 | |
}, | |
{ | |
"epoch": 1.7849174475680498, | |
"grad_norm": 1.7604998350143433, | |
"learning_rate": 8.856800957755e-05, | |
"loss": 1.091, | |
"step": 500 | |
}, | |
{ | |
"epoch": 1.8027666220437304, | |
"grad_norm": 1.6485258340835571, | |
"learning_rate": 8.823536119158864e-05, | |
"loss": 1.072, | |
"step": 505 | |
}, | |
{ | |
"epoch": 1.820615796519411, | |
"grad_norm": 1.8173716068267822, | |
"learning_rate": 8.789858615727265e-05, | |
"loss": 1.0635, | |
"step": 510 | |
}, | |
{ | |
"epoch": 1.8384649709950915, | |
"grad_norm": 1.468127965927124, | |
"learning_rate": 8.755772082190194e-05, | |
"loss": 1.0258, | |
"step": 515 | |
}, | |
{ | |
"epoch": 1.8563141454707721, | |
"grad_norm": 1.4476536512374878, | |
"learning_rate": 8.721280197423258e-05, | |
"loss": 1.2011, | |
"step": 520 | |
}, | |
{ | |
"epoch": 1.8741633199464525, | |
"grad_norm": 2.054915189743042, | |
"learning_rate": 8.68638668405062e-05, | |
"loss": 1.0539, | |
"step": 525 | |
}, | |
{ | |
"epoch": 1.8920124944221328, | |
"grad_norm": 1.8471094369888306, | |
"learning_rate": 8.651095308043232e-05, | |
"loss": 1.0948, | |
"step": 530 | |
}, | |
{ | |
"epoch": 1.9098616688978134, | |
"grad_norm": 1.7790355682373047, | |
"learning_rate": 8.61540987831238e-05, | |
"loss": 1.1245, | |
"step": 535 | |
}, | |
{ | |
"epoch": 1.927710843373494, | |
"grad_norm": 1.6644902229309082, | |
"learning_rate": 8.579334246298593e-05, | |
"loss": 1.2039, | |
"step": 540 | |
}, | |
{ | |
"epoch": 1.9455600178491745, | |
"grad_norm": 1.9952303171157837, | |
"learning_rate": 8.542872305555978e-05, | |
"loss": 1.1077, | |
"step": 545 | |
}, | |
{ | |
"epoch": 1.9634091923248551, | |
"grad_norm": 2.225977659225464, | |
"learning_rate": 8.50602799133199e-05, | |
"loss": 1.0603, | |
"step": 550 | |
}, | |
{ | |
"epoch": 1.9812583668005355, | |
"grad_norm": 1.777342438697815, | |
"learning_rate": 8.468805280142709e-05, | |
"loss": 1.1376, | |
"step": 555 | |
}, | |
{ | |
"epoch": 1.9991075412762158, | |
"grad_norm": 2.2195017337799072, | |
"learning_rate": 8.43120818934367e-05, | |
"loss": 1.0966, | |
"step": 560 | |
}, | |
{ | |
"epoch": 1.9991075412762158, | |
"eval_loss": 1.3094360828399658, | |
"eval_runtime": 17.7539, | |
"eval_samples_per_second": 2.591, | |
"eval_steps_per_second": 2.591, | |
"step": 560 | |
}, | |
{ | |
"epoch": 2.0169567157518964, | |
"grad_norm": 2.012312173843384, | |
"learning_rate": 8.393240776696274e-05, | |
"loss": 0.6867, | |
"step": 565 | |
}, | |
{ | |
"epoch": 2.034805890227577, | |
"grad_norm": 3.092951774597168, | |
"learning_rate": 8.354907139929851e-05, | |
"loss": 0.6025, | |
"step": 570 | |
}, | |
{ | |
"epoch": 2.0526550647032575, | |
"grad_norm": 4.8303399085998535, | |
"learning_rate": 8.316211416299397e-05, | |
"loss": 0.6497, | |
"step": 575 | |
}, | |
{ | |
"epoch": 2.070504239178938, | |
"grad_norm": 3.1457698345184326, | |
"learning_rate": 8.27715778213905e-05, | |
"loss": 0.5803, | |
"step": 580 | |
}, | |
{ | |
"epoch": 2.0883534136546187, | |
"grad_norm": 2.5240321159362793, | |
"learning_rate": 8.237750452411353e-05, | |
"loss": 0.494, | |
"step": 585 | |
}, | |
{ | |
"epoch": 2.106202588130299, | |
"grad_norm": 2.630946636199951, | |
"learning_rate": 8.197993680252334e-05, | |
"loss": 0.6428, | |
"step": 590 | |
}, | |
{ | |
"epoch": 2.1240517626059794, | |
"grad_norm": 2.9942588806152344, | |
"learning_rate": 8.157891756512488e-05, | |
"loss": 0.6612, | |
"step": 595 | |
}, | |
{ | |
"epoch": 2.14190093708166, | |
"grad_norm": 2.8771650791168213, | |
"learning_rate": 8.117449009293668e-05, | |
"loss": 0.5783, | |
"step": 600 | |
}, | |
{ | |
"epoch": 2.1597501115573405, | |
"grad_norm": 3.1111013889312744, | |
"learning_rate": 8.076669803481965e-05, | |
"loss": 0.5799, | |
"step": 605 | |
}, | |
{ | |
"epoch": 2.177599286033021, | |
"grad_norm": 3.715027093887329, | |
"learning_rate": 8.035558540276618e-05, | |
"loss": 0.5344, | |
"step": 610 | |
}, | |
{ | |
"epoch": 2.1954484605087012, | |
"grad_norm": 2.936890125274658, | |
"learning_rate": 7.994119656715002e-05, | |
"loss": 0.5605, | |
"step": 615 | |
}, | |
{ | |
"epoch": 2.213297634984382, | |
"grad_norm": 2.79441499710083, | |
"learning_rate": 7.952357625193749e-05, | |
"loss": 0.5923, | |
"step": 620 | |
}, | |
{ | |
"epoch": 2.2311468094600624, | |
"grad_norm": 3.444474697113037, | |
"learning_rate": 7.91027695298606e-05, | |
"loss": 0.6067, | |
"step": 625 | |
}, | |
{ | |
"epoch": 2.248995983935743, | |
"grad_norm": 3.034071445465088, | |
"learning_rate": 7.86788218175523e-05, | |
"loss": 0.6134, | |
"step": 630 | |
}, | |
{ | |
"epoch": 2.248995983935743, | |
"eval_loss": 1.4945974349975586, | |
"eval_runtime": 17.7423, | |
"eval_samples_per_second": 2.593, | |
"eval_steps_per_second": 2.593, | |
"step": 630 | |
}, | |
{ | |
"epoch": 2.2668451584114235, | |
"grad_norm": 3.0743188858032227, | |
"learning_rate": 7.8251778870645e-05, | |
"loss": 0.5798, | |
"step": 635 | |
}, | |
{ | |
"epoch": 2.284694332887104, | |
"grad_norm": 3.250493049621582, | |
"learning_rate": 7.782168677883206e-05, | |
"loss": 0.5705, | |
"step": 640 | |
}, | |
{ | |
"epoch": 2.3025435073627847, | |
"grad_norm": 2.4863390922546387, | |
"learning_rate": 7.738859196089358e-05, | |
"loss": 0.6119, | |
"step": 645 | |
}, | |
{ | |
"epoch": 2.320392681838465, | |
"grad_norm": 3.1027884483337402, | |
"learning_rate": 7.695254115968648e-05, | |
"loss": 0.6352, | |
"step": 650 | |
}, | |
{ | |
"epoch": 2.3382418563141454, | |
"grad_norm": 2.840583562850952, | |
"learning_rate": 7.651358143709972e-05, | |
"loss": 0.6341, | |
"step": 655 | |
}, | |
{ | |
"epoch": 2.356091030789826, | |
"grad_norm": 3.057770252227783, | |
"learning_rate": 7.60717601689749e-05, | |
"loss": 0.6695, | |
"step": 660 | |
}, | |
{ | |
"epoch": 2.3739402052655065, | |
"grad_norm": 3.563372850418091, | |
"learning_rate": 7.562712503999327e-05, | |
"loss": 0.5715, | |
"step": 665 | |
}, | |
{ | |
"epoch": 2.391789379741187, | |
"grad_norm": 3.2286486625671387, | |
"learning_rate": 7.517972403852905e-05, | |
"loss": 0.7753, | |
"step": 670 | |
}, | |
{ | |
"epoch": 2.4096385542168672, | |
"grad_norm": 2.9088051319122314, | |
"learning_rate": 7.472960545147038e-05, | |
"loss": 0.5529, | |
"step": 675 | |
}, | |
{ | |
"epoch": 2.427487728692548, | |
"grad_norm": 2.9432833194732666, | |
"learning_rate": 7.427681785900761e-05, | |
"loss": 0.5715, | |
"step": 680 | |
}, | |
{ | |
"epoch": 2.4453369031682284, | |
"grad_norm": 2.483222723007202, | |
"learning_rate": 7.382141012939034e-05, | |
"loss": 0.6085, | |
"step": 685 | |
}, | |
{ | |
"epoch": 2.463186077643909, | |
"grad_norm": 2.9013617038726807, | |
"learning_rate": 7.33634314136531e-05, | |
"loss": 0.627, | |
"step": 690 | |
}, | |
{ | |
"epoch": 2.4810352521195895, | |
"grad_norm": 2.746309995651245, | |
"learning_rate": 7.290293114031061e-05, | |
"loss": 0.6403, | |
"step": 695 | |
}, | |
{ | |
"epoch": 2.49888442659527, | |
"grad_norm": 2.8350794315338135, | |
"learning_rate": 7.243995901002312e-05, | |
"loss": 0.6342, | |
"step": 700 | |
}, | |
{ | |
"epoch": 2.49888442659527, | |
"eval_loss": 1.4858874082565308, | |
"eval_runtime": 17.7385, | |
"eval_samples_per_second": 2.593, | |
"eval_steps_per_second": 2.593, | |
"step": 700 | |
}, | |
{ | |
"epoch": 2.5167336010709507, | |
"grad_norm": 3.006899833679199, | |
"learning_rate": 7.197456499023225e-05, | |
"loss": 0.5921, | |
"step": 705 | |
}, | |
{ | |
"epoch": 2.534582775546631, | |
"grad_norm": 2.9739573001861572, | |
"learning_rate": 7.150679930976825e-05, | |
"loss": 0.5873, | |
"step": 710 | |
}, | |
{ | |
"epoch": 2.5524319500223114, | |
"grad_norm": 3.7028846740722656, | |
"learning_rate": 7.103671245342887e-05, | |
"loss": 0.6661, | |
"step": 715 | |
}, | |
{ | |
"epoch": 2.570281124497992, | |
"grad_norm": 3.090599775314331, | |
"learning_rate": 7.056435515653059e-05, | |
"loss": 0.5388, | |
"step": 720 | |
}, | |
{ | |
"epoch": 2.5881302989736725, | |
"grad_norm": 2.799252986907959, | |
"learning_rate": 7.008977839943299e-05, | |
"loss": 0.6641, | |
"step": 725 | |
}, | |
{ | |
"epoch": 2.605979473449353, | |
"grad_norm": 2.8093032836914062, | |
"learning_rate": 6.961303340203653e-05, | |
"loss": 0.6221, | |
"step": 730 | |
}, | |
{ | |
"epoch": 2.6238286479250332, | |
"grad_norm": 3.6351985931396484, | |
"learning_rate": 6.91341716182545e-05, | |
"loss": 0.599, | |
"step": 735 | |
}, | |
{ | |
"epoch": 2.641677822400714, | |
"grad_norm": 2.6190829277038574, | |
"learning_rate": 6.86532447304597e-05, | |
"loss": 0.6047, | |
"step": 740 | |
}, | |
{ | |
"epoch": 2.6595269968763944, | |
"grad_norm": 3.227262020111084, | |
"learning_rate": 6.817030464390656e-05, | |
"loss": 0.614, | |
"step": 745 | |
}, | |
{ | |
"epoch": 2.677376171352075, | |
"grad_norm": 2.5810439586639404, | |
"learning_rate": 6.768540348112907e-05, | |
"loss": 0.6367, | |
"step": 750 | |
}, | |
{ | |
"epoch": 2.6952253458277555, | |
"grad_norm": 3.030888557434082, | |
"learning_rate": 6.719859357631535e-05, | |
"loss": 0.5681, | |
"step": 755 | |
}, | |
{ | |
"epoch": 2.713074520303436, | |
"grad_norm": 3.1176657676696777, | |
"learning_rate": 6.670992746965938e-05, | |
"loss": 0.5723, | |
"step": 760 | |
}, | |
{ | |
"epoch": 2.7309236947791167, | |
"grad_norm": 3.0151100158691406, | |
"learning_rate": 6.621945790169036e-05, | |
"loss": 0.6385, | |
"step": 765 | |
}, | |
{ | |
"epoch": 2.748772869254797, | |
"grad_norm": 3.4799766540527344, | |
"learning_rate": 6.572723780758069e-05, | |
"loss": 0.6665, | |
"step": 770 | |
}, | |
{ | |
"epoch": 2.748772869254797, | |
"eval_loss": 1.5236101150512695, | |
"eval_runtime": 17.7462, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 770 | |
}, | |
{ | |
"epoch": 2.7666220437304774, | |
"grad_norm": 3.1448163986206055, | |
"learning_rate": 6.523332031143272e-05, | |
"loss": 0.6083, | |
"step": 775 | |
}, | |
{ | |
"epoch": 2.784471218206158, | |
"grad_norm": 2.874833106994629, | |
"learning_rate": 6.473775872054521e-05, | |
"loss": 0.6493, | |
"step": 780 | |
}, | |
{ | |
"epoch": 2.8023203926818385, | |
"grad_norm": 3.2550127506256104, | |
"learning_rate": 6.424060651966007e-05, | |
"loss": 0.5722, | |
"step": 785 | |
}, | |
{ | |
"epoch": 2.820169567157519, | |
"grad_norm": 3.066908121109009, | |
"learning_rate": 6.374191736518974e-05, | |
"loss": 0.611, | |
"step": 790 | |
}, | |
{ | |
"epoch": 2.8380187416331992, | |
"grad_norm": 3.05871319770813, | |
"learning_rate": 6.324174507942637e-05, | |
"loss": 0.6202, | |
"step": 795 | |
}, | |
{ | |
"epoch": 2.85586791610888, | |
"grad_norm": 3.2599833011627197, | |
"learning_rate": 6.274014364473274e-05, | |
"loss": 0.5593, | |
"step": 800 | |
}, | |
{ | |
"epoch": 2.8737170905845604, | |
"grad_norm": 2.897418260574341, | |
"learning_rate": 6.22371671977162e-05, | |
"loss": 0.7415, | |
"step": 805 | |
}, | |
{ | |
"epoch": 2.891566265060241, | |
"grad_norm": 3.032317876815796, | |
"learning_rate": 6.173287002338577e-05, | |
"loss": 0.6544, | |
"step": 810 | |
}, | |
{ | |
"epoch": 2.9094154395359215, | |
"grad_norm": 2.7111008167266846, | |
"learning_rate": 6.122730654929334e-05, | |
"loss": 0.6421, | |
"step": 815 | |
}, | |
{ | |
"epoch": 2.927264614011602, | |
"grad_norm": 2.7735886573791504, | |
"learning_rate": 6.072053133965938e-05, | |
"loss": 0.6332, | |
"step": 820 | |
}, | |
{ | |
"epoch": 2.9451137884872827, | |
"grad_norm": 3.4417500495910645, | |
"learning_rate": 6.021259908948402e-05, | |
"loss": 0.6508, | |
"step": 825 | |
}, | |
{ | |
"epoch": 2.962962962962963, | |
"grad_norm": 3.432999849319458, | |
"learning_rate": 5.970356461864391e-05, | |
"loss": 0.621, | |
"step": 830 | |
}, | |
{ | |
"epoch": 2.9808121374386434, | |
"grad_norm": 3.470132827758789, | |
"learning_rate": 5.919348286597569e-05, | |
"loss": 0.6347, | |
"step": 835 | |
}, | |
{ | |
"epoch": 2.998661311914324, | |
"grad_norm": 3.153116226196289, | |
"learning_rate": 5.868240888334653e-05, | |
"loss": 0.6101, | |
"step": 840 | |
}, | |
{ | |
"epoch": 2.998661311914324, | |
"eval_loss": 1.5220016241073608, | |
"eval_runtime": 17.7399, | |
"eval_samples_per_second": 2.593, | |
"eval_steps_per_second": 2.593, | |
"step": 840 | |
}, | |
{ | |
"epoch": 3.0165104863900045, | |
"grad_norm": 2.5395278930664062, | |
"learning_rate": 5.8170397829712485e-05, | |
"loss": 0.4183, | |
"step": 845 | |
}, | |
{ | |
"epoch": 3.034359660865685, | |
"grad_norm": 2.833970308303833, | |
"learning_rate": 5.765750496516547e-05, | |
"loss": 0.1667, | |
"step": 850 | |
}, | |
{ | |
"epoch": 3.0522088353413657, | |
"grad_norm": 3.447057008743286, | |
"learning_rate": 5.714378564496901e-05, | |
"loss": 0.255, | |
"step": 855 | |
}, | |
{ | |
"epoch": 3.070058009817046, | |
"grad_norm": 3.9993224143981934, | |
"learning_rate": 5.6629295313583974e-05, | |
"loss": 0.2424, | |
"step": 860 | |
}, | |
{ | |
"epoch": 3.0879071842927264, | |
"grad_norm": 3.626281499862671, | |
"learning_rate": 5.611408949868457e-05, | |
"loss": 0.2097, | |
"step": 865 | |
}, | |
{ | |
"epoch": 3.105756358768407, | |
"grad_norm": 2.693284034729004, | |
"learning_rate": 5.559822380516539e-05, | |
"loss": 0.2271, | |
"step": 870 | |
}, | |
{ | |
"epoch": 3.1236055332440875, | |
"grad_norm": 2.439389705657959, | |
"learning_rate": 5.5081753909140096e-05, | |
"loss": 0.1982, | |
"step": 875 | |
}, | |
{ | |
"epoch": 3.141454707719768, | |
"grad_norm": 2.6163575649261475, | |
"learning_rate": 5.456473555193242e-05, | |
"loss": 0.2192, | |
"step": 880 | |
}, | |
{ | |
"epoch": 3.1593038821954487, | |
"grad_norm": 2.405829668045044, | |
"learning_rate": 5.404722453406017e-05, | |
"loss": 0.2097, | |
"step": 885 | |
}, | |
{ | |
"epoch": 3.177153056671129, | |
"grad_norm": 2.819413423538208, | |
"learning_rate": 5.3529276709212816e-05, | |
"loss": 0.2213, | |
"step": 890 | |
}, | |
{ | |
"epoch": 3.1950022311468094, | |
"grad_norm": 3.6370203495025635, | |
"learning_rate": 5.30109479782233e-05, | |
"loss": 0.2559, | |
"step": 895 | |
}, | |
{ | |
"epoch": 3.21285140562249, | |
"grad_norm": 3.4090726375579834, | |
"learning_rate": 5.249229428303486e-05, | |
"loss": 0.1955, | |
"step": 900 | |
}, | |
{ | |
"epoch": 3.2307005800981705, | |
"grad_norm": 2.8171908855438232, | |
"learning_rate": 5.197337160066331e-05, | |
"loss": 0.2642, | |
"step": 905 | |
}, | |
{ | |
"epoch": 3.248549754573851, | |
"grad_norm": 3.926447629928589, | |
"learning_rate": 5.145423593715557e-05, | |
"loss": 0.2467, | |
"step": 910 | |
}, | |
{ | |
"epoch": 3.248549754573851, | |
"eval_loss": 1.8390079736709595, | |
"eval_runtime": 17.7348, | |
"eval_samples_per_second": 2.594, | |
"eval_steps_per_second": 2.594, | |
"step": 910 | |
}, | |
{ | |
"epoch": 3.266398929049531, | |
"grad_norm": 2.7143030166625977, | |
"learning_rate": 5.0934943321545115e-05, | |
"loss": 0.2239, | |
"step": 915 | |
}, | |
{ | |
"epoch": 3.284248103525212, | |
"grad_norm": 2.717496871948242, | |
"learning_rate": 5.041554979980486e-05, | |
"loss": 0.1545, | |
"step": 920 | |
}, | |
{ | |
"epoch": 3.3020972780008924, | |
"grad_norm": 3.516397714614868, | |
"learning_rate": 4.9896111428798254e-05, | |
"loss": 0.2819, | |
"step": 925 | |
}, | |
{ | |
"epoch": 3.319946452476573, | |
"grad_norm": 3.3290677070617676, | |
"learning_rate": 4.9376684270229254e-05, | |
"loss": 0.3043, | |
"step": 930 | |
}, | |
{ | |
"epoch": 3.3377956269522535, | |
"grad_norm": 2.914736032485962, | |
"learning_rate": 4.8857324384591653e-05, | |
"loss": 0.2494, | |
"step": 935 | |
}, | |
{ | |
"epoch": 3.355644801427934, | |
"grad_norm": 3.37791109085083, | |
"learning_rate": 4.8338087825118675e-05, | |
"loss": 0.2271, | |
"step": 940 | |
}, | |
{ | |
"epoch": 3.3734939759036147, | |
"grad_norm": 3.295100688934326, | |
"learning_rate": 4.781903063173321e-05, | |
"loss": 0.242, | |
"step": 945 | |
}, | |
{ | |
"epoch": 3.391343150379295, | |
"grad_norm": 2.5792458057403564, | |
"learning_rate": 4.730020882499964e-05, | |
"loss": 0.2244, | |
"step": 950 | |
}, | |
{ | |
"epoch": 3.4091923248549754, | |
"grad_norm": 3.0014591217041016, | |
"learning_rate": 4.678167840007767e-05, | |
"loss": 0.2552, | |
"step": 955 | |
}, | |
{ | |
"epoch": 3.427041499330656, | |
"grad_norm": 3.207282066345215, | |
"learning_rate": 4.626349532067879e-05, | |
"loss": 0.2542, | |
"step": 960 | |
}, | |
{ | |
"epoch": 3.4448906738063365, | |
"grad_norm": 3.85109543800354, | |
"learning_rate": 4.574571551302647e-05, | |
"loss": 0.3249, | |
"step": 965 | |
}, | |
{ | |
"epoch": 3.462739848282017, | |
"grad_norm": 3.3335843086242676, | |
"learning_rate": 4.522839485981994e-05, | |
"loss": 0.2729, | |
"step": 970 | |
}, | |
{ | |
"epoch": 3.480589022757697, | |
"grad_norm": 2.885708808898926, | |
"learning_rate": 4.471158919420312e-05, | |
"loss": 0.2595, | |
"step": 975 | |
}, | |
{ | |
"epoch": 3.498438197233378, | |
"grad_norm": 3.215789556503296, | |
"learning_rate": 4.4195354293738484e-05, | |
"loss": 0.2284, | |
"step": 980 | |
}, | |
{ | |
"epoch": 3.498438197233378, | |
"eval_loss": 1.82525634765625, | |
"eval_runtime": 17.7537, | |
"eval_samples_per_second": 2.591, | |
"eval_steps_per_second": 2.591, | |
"step": 980 | |
}, | |
{ | |
"epoch": 3.5162873717090584, | |
"grad_norm": 3.4772818088531494, | |
"learning_rate": 4.367974587438733e-05, | |
"loss": 0.1947, | |
"step": 985 | |
}, | |
{ | |
"epoch": 3.534136546184739, | |
"grad_norm": 2.6401774883270264, | |
"learning_rate": 4.316481958449634e-05, | |
"loss": 0.2352, | |
"step": 990 | |
}, | |
{ | |
"epoch": 3.5519857206604195, | |
"grad_norm": 3.997591733932495, | |
"learning_rate": 4.2650630998791615e-05, | |
"loss": 0.2047, | |
"step": 995 | |
}, | |
{ | |
"epoch": 3.5698348951361, | |
"grad_norm": 2.5615384578704834, | |
"learning_rate": 4.213723561238074e-05, | |
"loss": 0.2369, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 3.5876840696117807, | |
"grad_norm": 2.5114736557006836, | |
"learning_rate": 4.162468883476319e-05, | |
"loss": 0.2416, | |
"step": 1005 | |
}, | |
{ | |
"epoch": 3.605533244087461, | |
"grad_norm": 4.23993444442749, | |
"learning_rate": 4.111304598385018e-05, | |
"loss": 0.2353, | |
"step": 1010 | |
}, | |
{ | |
"epoch": 3.6233824185631414, | |
"grad_norm": 3.239319324493408, | |
"learning_rate": 4.060236227999441e-05, | |
"loss": 0.2155, | |
"step": 1015 | |
}, | |
{ | |
"epoch": 3.641231593038822, | |
"grad_norm": 2.030393600463867, | |
"learning_rate": 4.0092692840030134e-05, | |
"loss": 0.2241, | |
"step": 1020 | |
}, | |
{ | |
"epoch": 3.6590807675145025, | |
"grad_norm": 3.636963367462158, | |
"learning_rate": 3.9584092671324606e-05, | |
"loss": 0.2408, | |
"step": 1025 | |
}, | |
{ | |
"epoch": 3.676929941990183, | |
"grad_norm": 4.295063495635986, | |
"learning_rate": 3.907661666584131e-05, | |
"loss": 0.2423, | |
"step": 1030 | |
}, | |
{ | |
"epoch": 3.694779116465863, | |
"grad_norm": 3.268596887588501, | |
"learning_rate": 3.857031959421553e-05, | |
"loss": 0.2581, | |
"step": 1035 | |
}, | |
{ | |
"epoch": 3.7126282909415442, | |
"grad_norm": 3.0428457260131836, | |
"learning_rate": 3.806525609984312e-05, | |
"loss": 0.206, | |
"step": 1040 | |
}, | |
{ | |
"epoch": 3.7304774654172244, | |
"grad_norm": 3.523777484893799, | |
"learning_rate": 3.7561480692983006e-05, | |
"loss": 0.1956, | |
"step": 1045 | |
}, | |
{ | |
"epoch": 3.748326639892905, | |
"grad_norm": 2.972714900970459, | |
"learning_rate": 3.705904774487396e-05, | |
"loss": 0.2839, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 3.748326639892905, | |
"eval_loss": 1.8687995672225952, | |
"eval_runtime": 17.732, | |
"eval_samples_per_second": 2.594, | |
"eval_steps_per_second": 2.594, | |
"step": 1050 | |
}, | |
{ | |
"epoch": 3.7661758143685855, | |
"grad_norm": 3.9769251346588135, | |
"learning_rate": 3.655801148186655e-05, | |
"loss": 0.2433, | |
"step": 1055 | |
}, | |
{ | |
"epoch": 3.784024988844266, | |
"grad_norm": 3.03606915473938, | |
"learning_rate": 3.6058425979570485e-05, | |
"loss": 0.2085, | |
"step": 1060 | |
}, | |
{ | |
"epoch": 3.8018741633199467, | |
"grad_norm": 3.5858893394470215, | |
"learning_rate": 3.556034515701852e-05, | |
"loss": 0.2277, | |
"step": 1065 | |
}, | |
{ | |
"epoch": 3.819723337795627, | |
"grad_norm": 2.5949602127075195, | |
"learning_rate": 3.506382277084696e-05, | |
"loss": 0.2497, | |
"step": 1070 | |
}, | |
{ | |
"epoch": 3.8375725122713074, | |
"grad_norm": 2.8706088066101074, | |
"learning_rate": 3.4568912409493945e-05, | |
"loss": 0.2462, | |
"step": 1075 | |
}, | |
{ | |
"epoch": 3.855421686746988, | |
"grad_norm": 3.238346576690674, | |
"learning_rate": 3.4075667487415785e-05, | |
"loss": 0.2004, | |
"step": 1080 | |
}, | |
{ | |
"epoch": 3.8732708612226685, | |
"grad_norm": 3.36478590965271, | |
"learning_rate": 3.358414123932195e-05, | |
"loss": 0.226, | |
"step": 1085 | |
}, | |
{ | |
"epoch": 3.891120035698349, | |
"grad_norm": 3.0954155921936035, | |
"learning_rate": 3.3094386714429724e-05, | |
"loss": 0.2114, | |
"step": 1090 | |
}, | |
{ | |
"epoch": 3.908969210174029, | |
"grad_norm": 3.016141891479492, | |
"learning_rate": 3.2606456770738636e-05, | |
"loss": 0.2694, | |
"step": 1095 | |
}, | |
{ | |
"epoch": 3.9268183846497102, | |
"grad_norm": 2.976658821105957, | |
"learning_rate": 3.212040406932569e-05, | |
"loss": 0.1828, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 3.9446675591253904, | |
"grad_norm": 2.8186426162719727, | |
"learning_rate": 3.163628106866172e-05, | |
"loss": 0.1451, | |
"step": 1105 | |
}, | |
{ | |
"epoch": 3.962516733601071, | |
"grad_norm": 2.959024429321289, | |
"learning_rate": 3.115414001894974e-05, | |
"loss": 0.2349, | |
"step": 1110 | |
}, | |
{ | |
"epoch": 3.9803659080767515, | |
"grad_norm": 2.9852728843688965, | |
"learning_rate": 3.067403295648566e-05, | |
"loss": 0.2235, | |
"step": 1115 | |
}, | |
{ | |
"epoch": 3.998215082552432, | |
"grad_norm": 2.79172945022583, | |
"learning_rate": 3.019601169804216e-05, | |
"loss": 0.2111, | |
"step": 1120 | |
}, | |
{ | |
"epoch": 3.998215082552432, | |
"eval_loss": 1.891045093536377, | |
"eval_runtime": 17.7382, | |
"eval_samples_per_second": 2.593, | |
"eval_steps_per_second": 2.593, | |
"step": 1120 | |
}, | |
{ | |
"epoch": 4.016064257028113, | |
"grad_norm": 1.1968103647232056, | |
"learning_rate": 2.9720127835276256e-05, | |
"loss": 0.1074, | |
"step": 1125 | |
}, | |
{ | |
"epoch": 4.033913431503793, | |
"grad_norm": 1.4865480661392212, | |
"learning_rate": 2.9246432729161055e-05, | |
"loss": 0.0628, | |
"step": 1130 | |
}, | |
{ | |
"epoch": 4.051762605979474, | |
"grad_norm": 2.913541078567505, | |
"learning_rate": 2.8774977504442647e-05, | |
"loss": 0.0615, | |
"step": 1135 | |
}, | |
{ | |
"epoch": 4.069611780455154, | |
"grad_norm": 2.1043801307678223, | |
"learning_rate": 2.8305813044122097e-05, | |
"loss": 0.0658, | |
"step": 1140 | |
}, | |
{ | |
"epoch": 4.087460954930834, | |
"grad_norm": 1.942076325416565, | |
"learning_rate": 2.7838989983964065e-05, | |
"loss": 0.0458, | |
"step": 1145 | |
}, | |
{ | |
"epoch": 4.105310129406515, | |
"grad_norm": 2.3953213691711426, | |
"learning_rate": 2.737455870703155e-05, | |
"loss": 0.0877, | |
"step": 1150 | |
}, | |
{ | |
"epoch": 4.123159303882195, | |
"grad_norm": 1.9993913173675537, | |
"learning_rate": 2.6912569338248315e-05, | |
"loss": 0.0567, | |
"step": 1155 | |
}, | |
{ | |
"epoch": 4.141008478357876, | |
"grad_norm": 2.4731192588806152, | |
"learning_rate": 2.645307173898901e-05, | |
"loss": 0.0817, | |
"step": 1160 | |
}, | |
{ | |
"epoch": 4.158857652833556, | |
"grad_norm": 2.3913474082946777, | |
"learning_rate": 2.5996115501697694e-05, | |
"loss": 0.0517, | |
"step": 1165 | |
}, | |
{ | |
"epoch": 4.176706827309237, | |
"grad_norm": 4.154366493225098, | |
"learning_rate": 2.5541749944535554e-05, | |
"loss": 0.0649, | |
"step": 1170 | |
}, | |
{ | |
"epoch": 4.1945560017849175, | |
"grad_norm": 1.4376811981201172, | |
"learning_rate": 2.5090024106057962e-05, | |
"loss": 0.0613, | |
"step": 1175 | |
}, | |
{ | |
"epoch": 4.212405176260598, | |
"grad_norm": 2.038010835647583, | |
"learning_rate": 2.464098673992205e-05, | |
"loss": 0.0763, | |
"step": 1180 | |
}, | |
{ | |
"epoch": 4.230254350736279, | |
"grad_norm": 1.862741470336914, | |
"learning_rate": 2.4194686309624663e-05, | |
"loss": 0.0733, | |
"step": 1185 | |
}, | |
{ | |
"epoch": 4.248103525211959, | |
"grad_norm": 2.7354800701141357, | |
"learning_rate": 2.3751170983272e-05, | |
"loss": 0.0753, | |
"step": 1190 | |
}, | |
{ | |
"epoch": 4.248103525211959, | |
"eval_loss": 2.2224178314208984, | |
"eval_runtime": 17.7489, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 1190 | |
}, | |
{ | |
"epoch": 4.26595269968764, | |
"grad_norm": 1.102008581161499, | |
"learning_rate": 2.3310488628380757e-05, | |
"loss": 0.0839, | |
"step": 1195 | |
}, | |
{ | |
"epoch": 4.28380187416332, | |
"grad_norm": 4.02572774887085, | |
"learning_rate": 2.2872686806712035e-05, | |
"loss": 0.0811, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 4.301651048639, | |
"grad_norm": 1.9711402654647827, | |
"learning_rate": 2.243781276913811e-05, | |
"loss": 0.0783, | |
"step": 1205 | |
}, | |
{ | |
"epoch": 4.319500223114681, | |
"grad_norm": 2.0151891708374023, | |
"learning_rate": 2.200591345054267e-05, | |
"loss": 0.0488, | |
"step": 1210 | |
}, | |
{ | |
"epoch": 4.337349397590361, | |
"grad_norm": 4.591026782989502, | |
"learning_rate": 2.157703546475539e-05, | |
"loss": 0.0704, | |
"step": 1215 | |
}, | |
{ | |
"epoch": 4.355198572066042, | |
"grad_norm": 1.2874963283538818, | |
"learning_rate": 2.115122509952085e-05, | |
"loss": 0.0653, | |
"step": 1220 | |
}, | |
{ | |
"epoch": 4.373047746541722, | |
"grad_norm": 2.7136454582214355, | |
"learning_rate": 2.0728528311502976e-05, | |
"loss": 0.0471, | |
"step": 1225 | |
}, | |
{ | |
"epoch": 4.3908969210174025, | |
"grad_norm": 2.6785166263580322, | |
"learning_rate": 2.0308990721324927e-05, | |
"loss": 0.0757, | |
"step": 1230 | |
}, | |
{ | |
"epoch": 4.4087460954930835, | |
"grad_norm": 1.6510692834854126, | |
"learning_rate": 1.989265760864542e-05, | |
"loss": 0.0456, | |
"step": 1235 | |
}, | |
{ | |
"epoch": 4.426595269968764, | |
"grad_norm": 1.2233620882034302, | |
"learning_rate": 1.947957390727185e-05, | |
"loss": 0.0555, | |
"step": 1240 | |
}, | |
{ | |
"epoch": 4.444444444444445, | |
"grad_norm": 2.3564908504486084, | |
"learning_rate": 1.906978420031059e-05, | |
"loss": 0.0559, | |
"step": 1245 | |
}, | |
{ | |
"epoch": 4.462293618920125, | |
"grad_norm": 1.9344422817230225, | |
"learning_rate": 1.8663332715355396e-05, | |
"loss": 0.0395, | |
"step": 1250 | |
}, | |
{ | |
"epoch": 4.480142793395806, | |
"grad_norm": 1.6214028596878052, | |
"learning_rate": 1.8260263319713844e-05, | |
"loss": 0.0681, | |
"step": 1255 | |
}, | |
{ | |
"epoch": 4.497991967871486, | |
"grad_norm": 2.0569422245025635, | |
"learning_rate": 1.7860619515673033e-05, | |
"loss": 0.072, | |
"step": 1260 | |
}, | |
{ | |
"epoch": 4.497991967871486, | |
"eval_loss": 2.309265613555908, | |
"eval_runtime": 17.7487, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 1260 | |
}, | |
{ | |
"epoch": 4.515841142347167, | |
"grad_norm": 2.3488802909851074, | |
"learning_rate": 1.746444443580433e-05, | |
"loss": 0.0614, | |
"step": 1265 | |
}, | |
{ | |
"epoch": 4.533690316822847, | |
"grad_norm": 2.056544303894043, | |
"learning_rate": 1.7071780838308288e-05, | |
"loss": 0.0644, | |
"step": 1270 | |
}, | |
{ | |
"epoch": 4.551539491298527, | |
"grad_norm": 2.576493740081787, | |
"learning_rate": 1.6682671102399805e-05, | |
"loss": 0.0678, | |
"step": 1275 | |
}, | |
{ | |
"epoch": 4.569388665774208, | |
"grad_norm": 1.5977071523666382, | |
"learning_rate": 1.629715722373423e-05, | |
"loss": 0.0474, | |
"step": 1280 | |
}, | |
{ | |
"epoch": 4.587237840249888, | |
"grad_norm": 3.0858843326568604, | |
"learning_rate": 1.5915280809874932e-05, | |
"loss": 0.0813, | |
"step": 1285 | |
}, | |
{ | |
"epoch": 4.605087014725569, | |
"grad_norm": 2.914644241333008, | |
"learning_rate": 1.553708307580265e-05, | |
"loss": 0.0483, | |
"step": 1290 | |
}, | |
{ | |
"epoch": 4.6229361892012495, | |
"grad_norm": 2.8291921615600586, | |
"learning_rate": 1.5162604839467265e-05, | |
"loss": 0.0644, | |
"step": 1295 | |
}, | |
{ | |
"epoch": 4.64078536367693, | |
"grad_norm": 2.5296852588653564, | |
"learning_rate": 1.4791886517382413e-05, | |
"loss": 0.0581, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 4.658634538152611, | |
"grad_norm": 1.3932641744613647, | |
"learning_rate": 1.4424968120263504e-05, | |
"loss": 0.0569, | |
"step": 1305 | |
}, | |
{ | |
"epoch": 4.676483712628291, | |
"grad_norm": 1.6407183408737183, | |
"learning_rate": 1.4061889248709343e-05, | |
"loss": 0.0645, | |
"step": 1310 | |
}, | |
{ | |
"epoch": 4.694332887103972, | |
"grad_norm": 2.565559148788452, | |
"learning_rate": 1.370268908892825e-05, | |
"loss": 0.0588, | |
"step": 1315 | |
}, | |
{ | |
"epoch": 4.712182061579652, | |
"grad_norm": 2.400225877761841, | |
"learning_rate": 1.3347406408508695e-05, | |
"loss": 0.078, | |
"step": 1320 | |
}, | |
{ | |
"epoch": 4.730031236055332, | |
"grad_norm": 3.091597318649292, | |
"learning_rate": 1.2996079552235263e-05, | |
"loss": 0.0948, | |
"step": 1325 | |
}, | |
{ | |
"epoch": 4.747880410531013, | |
"grad_norm": 2.0770254135131836, | |
"learning_rate": 1.264874643795021e-05, | |
"loss": 0.0351, | |
"step": 1330 | |
}, | |
{ | |
"epoch": 4.747880410531013, | |
"eval_loss": 2.2220773696899414, | |
"eval_runtime": 17.7459, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 1330 | |
}, | |
{ | |
"epoch": 4.765729585006693, | |
"grad_norm": 1.403196930885315, | |
"learning_rate": 1.230544455246101e-05, | |
"loss": 0.031, | |
"step": 1335 | |
}, | |
{ | |
"epoch": 4.783578759482374, | |
"grad_norm": 2.3339104652404785, | |
"learning_rate": 1.1966210947494583e-05, | |
"loss": 0.0584, | |
"step": 1340 | |
}, | |
{ | |
"epoch": 4.801427933958054, | |
"grad_norm": 2.0965840816497803, | |
"learning_rate": 1.1631082235698316e-05, | |
"loss": 0.0633, | |
"step": 1345 | |
}, | |
{ | |
"epoch": 4.8192771084337345, | |
"grad_norm": 1.8118559122085571, | |
"learning_rate": 1.130009458668863e-05, | |
"loss": 0.0485, | |
"step": 1350 | |
}, | |
{ | |
"epoch": 4.8371262829094155, | |
"grad_norm": 1.4843353033065796, | |
"learning_rate": 1.097328372314721e-05, | |
"loss": 0.0271, | |
"step": 1355 | |
}, | |
{ | |
"epoch": 4.854975457385096, | |
"grad_norm": 2.7621233463287354, | |
"learning_rate": 1.0650684916965559e-05, | |
"loss": 0.0559, | |
"step": 1360 | |
}, | |
{ | |
"epoch": 4.872824631860777, | |
"grad_norm": 0.8147066831588745, | |
"learning_rate": 1.0332332985438248e-05, | |
"loss": 0.0582, | |
"step": 1365 | |
}, | |
{ | |
"epoch": 4.890673806336457, | |
"grad_norm": 2.686469316482544, | |
"learning_rate": 1.0018262287505086e-05, | |
"loss": 0.0965, | |
"step": 1370 | |
}, | |
{ | |
"epoch": 4.908522980812138, | |
"grad_norm": 1.0777071714401245, | |
"learning_rate": 9.708506720042932e-06, | |
"loss": 0.0565, | |
"step": 1375 | |
}, | |
{ | |
"epoch": 4.926372155287818, | |
"grad_norm": 3.4182119369506836, | |
"learning_rate": 9.403099714207175e-06, | |
"loss": 0.0542, | |
"step": 1380 | |
}, | |
{ | |
"epoch": 4.944221329763499, | |
"grad_norm": 1.8600770235061646, | |
"learning_rate": 9.102074231823727e-06, | |
"loss": 0.0856, | |
"step": 1385 | |
}, | |
{ | |
"epoch": 4.962070504239179, | |
"grad_norm": 2.112198829650879, | |
"learning_rate": 8.805462761831418e-06, | |
"loss": 0.0524, | |
"step": 1390 | |
}, | |
{ | |
"epoch": 4.979919678714859, | |
"grad_norm": 1.6986050605773926, | |
"learning_rate": 8.513297316775625e-06, | |
"loss": 0.0641, | |
"step": 1395 | |
}, | |
{ | |
"epoch": 4.99776885319054, | |
"grad_norm": 1.5771281719207764, | |
"learning_rate": 8.225609429353187e-06, | |
"loss": 0.0644, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 4.99776885319054, | |
"eval_loss": 2.2804083824157715, | |
"eval_runtime": 17.7438, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 5.01561802766622, | |
"grad_norm": 0.8998332023620605, | |
"learning_rate": 7.942430149009161e-06, | |
"loss": 0.0227, | |
"step": 1405 | |
}, | |
{ | |
"epoch": 5.033467202141901, | |
"grad_norm": 0.6817569136619568, | |
"learning_rate": 7.663790038585793e-06, | |
"loss": 0.0175, | |
"step": 1410 | |
}, | |
{ | |
"epoch": 5.0513163766175815, | |
"grad_norm": 0.35046374797821045, | |
"learning_rate": 7.389719171023857e-06, | |
"loss": 0.0161, | |
"step": 1415 | |
}, | |
{ | |
"epoch": 5.069165551093262, | |
"grad_norm": 0.34632906317710876, | |
"learning_rate": 7.1202471261170245e-06, | |
"loss": 0.0268, | |
"step": 1420 | |
}, | |
{ | |
"epoch": 5.087014725568943, | |
"grad_norm": 0.5170720219612122, | |
"learning_rate": 6.855402987319348e-06, | |
"loss": 0.0089, | |
"step": 1425 | |
}, | |
{ | |
"epoch": 5.104863900044623, | |
"grad_norm": 0.4268277585506439, | |
"learning_rate": 6.595215338606397e-06, | |
"loss": 0.0178, | |
"step": 1430 | |
}, | |
{ | |
"epoch": 5.122713074520304, | |
"grad_norm": 0.7124648094177246, | |
"learning_rate": 6.339712261390213e-06, | |
"loss": 0.0132, | |
"step": 1435 | |
}, | |
{ | |
"epoch": 5.140562248995984, | |
"grad_norm": 0.5214135050773621, | |
"learning_rate": 6.088921331488568e-06, | |
"loss": 0.0173, | |
"step": 1440 | |
}, | |
{ | |
"epoch": 5.158411423471664, | |
"grad_norm": 0.3924752473831177, | |
"learning_rate": 5.8428696161488215e-06, | |
"loss": 0.0064, | |
"step": 1445 | |
}, | |
{ | |
"epoch": 5.176260597947345, | |
"grad_norm": 0.33278706669807434, | |
"learning_rate": 5.601583671126531e-06, | |
"loss": 0.021, | |
"step": 1450 | |
}, | |
{ | |
"epoch": 5.194109772423025, | |
"grad_norm": 1.2323592901229858, | |
"learning_rate": 5.365089537819434e-06, | |
"loss": 0.0508, | |
"step": 1455 | |
}, | |
{ | |
"epoch": 5.211958946898706, | |
"grad_norm": 0.3533659875392914, | |
"learning_rate": 5.133412740456806e-06, | |
"loss": 0.0196, | |
"step": 1460 | |
}, | |
{ | |
"epoch": 5.229808121374386, | |
"grad_norm": 0.837640643119812, | |
"learning_rate": 4.906578283344759e-06, | |
"loss": 0.0109, | |
"step": 1465 | |
}, | |
{ | |
"epoch": 5.247657295850067, | |
"grad_norm": 0.5542824268341064, | |
"learning_rate": 4.684610648167503e-06, | |
"loss": 0.0257, | |
"step": 1470 | |
}, | |
{ | |
"epoch": 5.247657295850067, | |
"eval_loss": 2.559340238571167, | |
"eval_runtime": 17.7441, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 1470 | |
}, | |
{ | |
"epoch": 5.2655064703257475, | |
"grad_norm": 1.8598344326019287, | |
"learning_rate": 4.467533791345191e-06, | |
"loss": 0.0159, | |
"step": 1475 | |
}, | |
{ | |
"epoch": 5.283355644801428, | |
"grad_norm": 0.721049427986145, | |
"learning_rate": 4.255371141448272e-06, | |
"loss": 0.0117, | |
"step": 1480 | |
}, | |
{ | |
"epoch": 5.301204819277109, | |
"grad_norm": 0.5821325778961182, | |
"learning_rate": 4.048145596668967e-06, | |
"loss": 0.0092, | |
"step": 1485 | |
}, | |
{ | |
"epoch": 5.319053993752789, | |
"grad_norm": 1.502493977546692, | |
"learning_rate": 3.84587952234991e-06, | |
"loss": 0.0214, | |
"step": 1490 | |
}, | |
{ | |
"epoch": 5.33690316822847, | |
"grad_norm": 2.6551506519317627, | |
"learning_rate": 3.6485947485702832e-06, | |
"loss": 0.0087, | |
"step": 1495 | |
}, | |
{ | |
"epoch": 5.35475234270415, | |
"grad_norm": 0.7094867825508118, | |
"learning_rate": 3.4563125677897932e-06, | |
"loss": 0.0193, | |
"step": 1500 | |
}, | |
{ | |
"epoch": 5.37260151717983, | |
"grad_norm": 0.48095235228538513, | |
"learning_rate": 3.269053732550581e-06, | |
"loss": 0.0181, | |
"step": 1505 | |
}, | |
{ | |
"epoch": 5.390450691655511, | |
"grad_norm": 1.0630472898483276, | |
"learning_rate": 3.086838453237506e-06, | |
"loss": 0.0184, | |
"step": 1510 | |
}, | |
{ | |
"epoch": 5.408299866131191, | |
"grad_norm": 1.2398452758789062, | |
"learning_rate": 2.9096863958968268e-06, | |
"loss": 0.0083, | |
"step": 1515 | |
}, | |
{ | |
"epoch": 5.426149040606872, | |
"grad_norm": 1.2862337827682495, | |
"learning_rate": 2.737616680113758e-06, | |
"loss": 0.0129, | |
"step": 1520 | |
}, | |
{ | |
"epoch": 5.443998215082552, | |
"grad_norm": 1.1500790119171143, | |
"learning_rate": 2.570647876948895e-06, | |
"loss": 0.0176, | |
"step": 1525 | |
}, | |
{ | |
"epoch": 5.461847389558233, | |
"grad_norm": 1.017544150352478, | |
"learning_rate": 2.408798006933882e-06, | |
"loss": 0.026, | |
"step": 1530 | |
}, | |
{ | |
"epoch": 5.4796965640339135, | |
"grad_norm": 0.36417996883392334, | |
"learning_rate": 2.252084538126542e-06, | |
"loss": 0.0495, | |
"step": 1535 | |
}, | |
{ | |
"epoch": 5.497545738509594, | |
"grad_norm": 0.6736142039299011, | |
"learning_rate": 2.100524384225555e-06, | |
"loss": 0.0249, | |
"step": 1540 | |
}, | |
{ | |
"epoch": 5.497545738509594, | |
"eval_loss": 2.6220109462738037, | |
"eval_runtime": 17.7422, | |
"eval_samples_per_second": 2.593, | |
"eval_steps_per_second": 2.593, | |
"step": 1540 | |
}, | |
{ | |
"epoch": 5.515394912985275, | |
"grad_norm": 0.5455936193466187, | |
"learning_rate": 1.9541339027450256e-06, | |
"loss": 0.0197, | |
"step": 1545 | |
}, | |
{ | |
"epoch": 5.533244087460955, | |
"grad_norm": 1.3337368965148926, | |
"learning_rate": 1.8129288932490274e-06, | |
"loss": 0.0154, | |
"step": 1550 | |
}, | |
{ | |
"epoch": 5.551093261936636, | |
"grad_norm": 0.9104143381118774, | |
"learning_rate": 1.6769245956464396e-06, | |
"loss": 0.0192, | |
"step": 1555 | |
}, | |
{ | |
"epoch": 5.568942436412316, | |
"grad_norm": 0.9945054650306702, | |
"learning_rate": 1.5461356885461075e-06, | |
"loss": 0.0271, | |
"step": 1560 | |
}, | |
{ | |
"epoch": 5.586791610887996, | |
"grad_norm": 1.1372507810592651, | |
"learning_rate": 1.4205762876726092e-06, | |
"loss": 0.0128, | |
"step": 1565 | |
}, | |
{ | |
"epoch": 5.604640785363677, | |
"grad_norm": 0.292233943939209, | |
"learning_rate": 1.3002599443428243e-06, | |
"loss": 0.0167, | |
"step": 1570 | |
}, | |
{ | |
"epoch": 5.622489959839357, | |
"grad_norm": 0.8667420148849487, | |
"learning_rate": 1.1851996440033319e-06, | |
"loss": 0.0196, | |
"step": 1575 | |
}, | |
{ | |
"epoch": 5.640339134315038, | |
"grad_norm": 0.6354473233222961, | |
"learning_rate": 1.0754078048289374e-06, | |
"loss": 0.0141, | |
"step": 1580 | |
}, | |
{ | |
"epoch": 5.658188308790718, | |
"grad_norm": 1.5247339010238647, | |
"learning_rate": 9.708962763824048e-07, | |
"loss": 0.0289, | |
"step": 1585 | |
}, | |
{ | |
"epoch": 5.6760374832663985, | |
"grad_norm": 1.2465256452560425, | |
"learning_rate": 8.716763383355864e-07, | |
"loss": 0.0161, | |
"step": 1590 | |
}, | |
{ | |
"epoch": 5.6938866577420795, | |
"grad_norm": 1.1474500894546509, | |
"learning_rate": 7.777586992519959e-07, | |
"loss": 0.0133, | |
"step": 1595 | |
}, | |
{ | |
"epoch": 5.71173583221776, | |
"grad_norm": 1.2113944292068481, | |
"learning_rate": 6.891534954310885e-07, | |
"loss": 0.0272, | |
"step": 1600 | |
}, | |
{ | |
"epoch": 5.729585006693441, | |
"grad_norm": 0.8237090706825256, | |
"learning_rate": 6.058702898142643e-07, | |
"loss": 0.0084, | |
"step": 1605 | |
}, | |
{ | |
"epoch": 5.747434181169121, | |
"grad_norm": 1.4685379266738892, | |
"learning_rate": 5.279180709527765e-07, | |
"loss": 0.0238, | |
"step": 1610 | |
}, | |
{ | |
"epoch": 5.747434181169121, | |
"eval_loss": 2.618908643722534, | |
"eval_runtime": 17.7484, | |
"eval_samples_per_second": 2.592, | |
"eval_steps_per_second": 2.592, | |
"step": 1610 | |
}, | |
{ | |
"epoch": 5.765283355644802, | |
"grad_norm": 1.3691818714141846, | |
"learning_rate": 4.553052520375911e-07, | |
"loss": 0.0246, | |
"step": 1615 | |
}, | |
{ | |
"epoch": 5.783132530120482, | |
"grad_norm": 0.16103225946426392, | |
"learning_rate": 3.8803966999139684e-07, | |
"loss": 0.0098, | |
"step": 1620 | |
}, | |
{ | |
"epoch": 5.800981704596163, | |
"grad_norm": 1.2005606889724731, | |
"learning_rate": 3.261285846227868e-07, | |
"loss": 0.0179, | |
"step": 1625 | |
}, | |
{ | |
"epoch": 5.818830879071843, | |
"grad_norm": 0.3046216070652008, | |
"learning_rate": 2.6957867784270787e-07, | |
"loss": 0.0139, | |
"step": 1630 | |
}, | |
{ | |
"epoch": 5.836680053547523, | |
"grad_norm": 0.48873192071914673, | |
"learning_rate": 2.1839605294330933e-07, | |
"loss": 0.0117, | |
"step": 1635 | |
}, | |
{ | |
"epoch": 5.854529228023204, | |
"grad_norm": 0.46313759684562683, | |
"learning_rate": 1.725862339392259e-07, | |
"loss": 0.0191, | |
"step": 1640 | |
}, | |
{ | |
"epoch": 5.872378402498884, | |
"grad_norm": 0.34420424699783325, | |
"learning_rate": 1.3215416497138754e-07, | |
"loss": 0.0147, | |
"step": 1645 | |
}, | |
{ | |
"epoch": 5.890227576974565, | |
"grad_norm": 0.7899921536445618, | |
"learning_rate": 9.710420977340762e-08, | |
"loss": 0.014, | |
"step": 1650 | |
}, | |
{ | |
"epoch": 5.9080767514502455, | |
"grad_norm": 0.9719728827476501, | |
"learning_rate": 6.744015120061509e-08, | |
"loss": 0.0146, | |
"step": 1655 | |
}, | |
{ | |
"epoch": 5.925925925925926, | |
"grad_norm": 0.30600279569625854, | |
"learning_rate": 4.316519082179227e-08, | |
"loss": 0.0223, | |
"step": 1660 | |
}, | |
{ | |
"epoch": 5.943775100401607, | |
"grad_norm": 0.7548239231109619, | |
"learning_rate": 2.4281948573617874e-08, | |
"loss": 0.0112, | |
"step": 1665 | |
}, | |
{ | |
"epoch": 5.961624274877287, | |
"grad_norm": 0.9639114141464233, | |
"learning_rate": 1.0792462477909882e-08, | |
"loss": 0.0273, | |
"step": 1670 | |
}, | |
{ | |
"epoch": 5.979473449352968, | |
"grad_norm": 2.499755382537842, | |
"learning_rate": 2.6981884216847884e-09, | |
"loss": 0.0282, | |
"step": 1675 | |
}, | |
{ | |
"epoch": 5.997322623828648, | |
"grad_norm": 2.1217262744903564, | |
"learning_rate": 0.0, | |
"loss": 0.0262, | |
"step": 1680 | |
}, | |
{ | |
"epoch": 5.997322623828648, | |
"eval_loss": 2.630276679992676, | |
"eval_runtime": 17.7504, | |
"eval_samples_per_second": 2.591, | |
"eval_steps_per_second": 2.591, | |
"step": 1680 | |
}, | |
{ | |
"epoch": 5.997322623828648, | |
"step": 1680, | |
"total_flos": 1.7687758754493235e+18, | |
"train_loss": 0.5969825589208908, | |
"train_runtime": 20472.2071, | |
"train_samples_per_second": 1.314, | |
"train_steps_per_second": 0.082 | |
} | |
], | |
"logging_steps": 5, | |
"max_steps": 1680, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 6, | |
"save_steps": 70, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": true | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.7687758754493235e+18, | |
"train_batch_size": 2, | |
"trial_name": null, | |
"trial_params": null | |
} | |