VERSIL91's picture
End of training
9646192 verified
{
"best_metric": 5.759253978729248,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.05201222287237501,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000260061114361875,
"grad_norm": 4.700757026672363,
"learning_rate": 5e-06,
"loss": 6.7348,
"step": 1
},
{
"epoch": 0.000260061114361875,
"eval_loss": 6.605810642242432,
"eval_runtime": 49.1131,
"eval_samples_per_second": 131.879,
"eval_steps_per_second": 65.95,
"step": 1
},
{
"epoch": 0.00052012222872375,
"grad_norm": 4.639775276184082,
"learning_rate": 1e-05,
"loss": 6.8599,
"step": 2
},
{
"epoch": 0.0007801833430856252,
"grad_norm": 5.271045684814453,
"learning_rate": 1.5e-05,
"loss": 6.6529,
"step": 3
},
{
"epoch": 0.0010402444574475,
"grad_norm": 5.043214797973633,
"learning_rate": 2e-05,
"loss": 6.4242,
"step": 4
},
{
"epoch": 0.001300305571809375,
"grad_norm": 4.489984512329102,
"learning_rate": 2.5e-05,
"loss": 6.4993,
"step": 5
},
{
"epoch": 0.0015603666861712503,
"grad_norm": 4.286848545074463,
"learning_rate": 3e-05,
"loss": 6.7653,
"step": 6
},
{
"epoch": 0.0018204278005331254,
"grad_norm": 4.734979152679443,
"learning_rate": 3.5e-05,
"loss": 6.7299,
"step": 7
},
{
"epoch": 0.002080488914895,
"grad_norm": 4.462507724761963,
"learning_rate": 4e-05,
"loss": 6.3463,
"step": 8
},
{
"epoch": 0.0023405500292568754,
"grad_norm": 3.9474422931671143,
"learning_rate": 4.5e-05,
"loss": 6.5243,
"step": 9
},
{
"epoch": 0.00260061114361875,
"grad_norm": 4.359175682067871,
"learning_rate": 5e-05,
"loss": 6.7619,
"step": 10
},
{
"epoch": 0.0028606722579806254,
"grad_norm": 3.6640963554382324,
"learning_rate": 5.500000000000001e-05,
"loss": 6.4133,
"step": 11
},
{
"epoch": 0.0031207333723425007,
"grad_norm": 3.34222412109375,
"learning_rate": 6e-05,
"loss": 6.0977,
"step": 12
},
{
"epoch": 0.0033807944867043755,
"grad_norm": 3.309803009033203,
"learning_rate": 6.500000000000001e-05,
"loss": 6.1704,
"step": 13
},
{
"epoch": 0.0036408556010662507,
"grad_norm": 3.5255839824676514,
"learning_rate": 7e-05,
"loss": 6.4846,
"step": 14
},
{
"epoch": 0.0039009167154281255,
"grad_norm": 2.9046642780303955,
"learning_rate": 7.500000000000001e-05,
"loss": 6.4176,
"step": 15
},
{
"epoch": 0.00416097782979,
"grad_norm": 3.0866973400115967,
"learning_rate": 8e-05,
"loss": 5.9843,
"step": 16
},
{
"epoch": 0.004421038944151876,
"grad_norm": 2.954580545425415,
"learning_rate": 8.5e-05,
"loss": 6.2319,
"step": 17
},
{
"epoch": 0.004681100058513751,
"grad_norm": 2.644932746887207,
"learning_rate": 9e-05,
"loss": 6.5849,
"step": 18
},
{
"epoch": 0.004941161172875626,
"grad_norm": 2.613598346710205,
"learning_rate": 9.5e-05,
"loss": 6.1687,
"step": 19
},
{
"epoch": 0.0052012222872375,
"grad_norm": 2.6821439266204834,
"learning_rate": 0.0001,
"loss": 6.3548,
"step": 20
},
{
"epoch": 0.005461283401599376,
"grad_norm": 2.6434664726257324,
"learning_rate": 9.999238475781957e-05,
"loss": 6.1017,
"step": 21
},
{
"epoch": 0.005721344515961251,
"grad_norm": 2.6511456966400146,
"learning_rate": 9.99695413509548e-05,
"loss": 6.0292,
"step": 22
},
{
"epoch": 0.005981405630323126,
"grad_norm": 2.6858441829681396,
"learning_rate": 9.99314767377287e-05,
"loss": 6.1429,
"step": 23
},
{
"epoch": 0.006241466744685001,
"grad_norm": 2.4335083961486816,
"learning_rate": 9.987820251299122e-05,
"loss": 5.9954,
"step": 24
},
{
"epoch": 0.006501527859046876,
"grad_norm": 2.553853988647461,
"learning_rate": 9.980973490458728e-05,
"loss": 6.2822,
"step": 25
},
{
"epoch": 0.006761588973408751,
"grad_norm": 2.6574552059173584,
"learning_rate": 9.972609476841367e-05,
"loss": 6.0658,
"step": 26
},
{
"epoch": 0.007021650087770626,
"grad_norm": 2.649275779724121,
"learning_rate": 9.962730758206611e-05,
"loss": 6.1537,
"step": 27
},
{
"epoch": 0.0072817112021325014,
"grad_norm": 2.73947811126709,
"learning_rate": 9.951340343707852e-05,
"loss": 6.2567,
"step": 28
},
{
"epoch": 0.007541772316494376,
"grad_norm": 2.5566582679748535,
"learning_rate": 9.938441702975689e-05,
"loss": 6.0508,
"step": 29
},
{
"epoch": 0.007801833430856251,
"grad_norm": 2.3408713340759277,
"learning_rate": 9.924038765061042e-05,
"loss": 6.1741,
"step": 30
},
{
"epoch": 0.008061894545218126,
"grad_norm": 2.44429087638855,
"learning_rate": 9.908135917238321e-05,
"loss": 6.0916,
"step": 31
},
{
"epoch": 0.00832195565958,
"grad_norm": 2.625361442565918,
"learning_rate": 9.890738003669029e-05,
"loss": 5.9718,
"step": 32
},
{
"epoch": 0.008582016773941877,
"grad_norm": 2.38539457321167,
"learning_rate": 9.871850323926177e-05,
"loss": 6.0312,
"step": 33
},
{
"epoch": 0.008842077888303752,
"grad_norm": 2.4743857383728027,
"learning_rate": 9.851478631379982e-05,
"loss": 6.0379,
"step": 34
},
{
"epoch": 0.009102139002665627,
"grad_norm": 2.776047468185425,
"learning_rate": 9.829629131445342e-05,
"loss": 6.1597,
"step": 35
},
{
"epoch": 0.009362200117027502,
"grad_norm": 2.8223772048950195,
"learning_rate": 9.806308479691595e-05,
"loss": 6.0539,
"step": 36
},
{
"epoch": 0.009622261231389376,
"grad_norm": 2.7097480297088623,
"learning_rate": 9.781523779815179e-05,
"loss": 5.7932,
"step": 37
},
{
"epoch": 0.009882322345751251,
"grad_norm": 2.4470372200012207,
"learning_rate": 9.755282581475769e-05,
"loss": 6.0691,
"step": 38
},
{
"epoch": 0.010142383460113126,
"grad_norm": 2.8650152683258057,
"learning_rate": 9.727592877996585e-05,
"loss": 6.1383,
"step": 39
},
{
"epoch": 0.010402444574475,
"grad_norm": 2.1736435890197754,
"learning_rate": 9.698463103929542e-05,
"loss": 5.8534,
"step": 40
},
{
"epoch": 0.010662505688836877,
"grad_norm": 2.3826494216918945,
"learning_rate": 9.667902132486009e-05,
"loss": 5.8372,
"step": 41
},
{
"epoch": 0.010922566803198752,
"grad_norm": 2.3414485454559326,
"learning_rate": 9.635919272833938e-05,
"loss": 5.9025,
"step": 42
},
{
"epoch": 0.011182627917560627,
"grad_norm": 3.2391109466552734,
"learning_rate": 9.602524267262203e-05,
"loss": 6.2408,
"step": 43
},
{
"epoch": 0.011442689031922502,
"grad_norm": 2.560239791870117,
"learning_rate": 9.567727288213005e-05,
"loss": 5.844,
"step": 44
},
{
"epoch": 0.011702750146284377,
"grad_norm": 4.429433345794678,
"learning_rate": 9.53153893518325e-05,
"loss": 5.9313,
"step": 45
},
{
"epoch": 0.011962811260646251,
"grad_norm": 5.025948524475098,
"learning_rate": 9.493970231495835e-05,
"loss": 6.1156,
"step": 46
},
{
"epoch": 0.012222872375008126,
"grad_norm": 4.9493727684021,
"learning_rate": 9.45503262094184e-05,
"loss": 6.1575,
"step": 47
},
{
"epoch": 0.012482933489370003,
"grad_norm": 2.9699819087982178,
"learning_rate": 9.414737964294636e-05,
"loss": 5.8556,
"step": 48
},
{
"epoch": 0.012742994603731878,
"grad_norm": 2.35396409034729,
"learning_rate": 9.373098535696979e-05,
"loss": 5.7382,
"step": 49
},
{
"epoch": 0.013003055718093752,
"grad_norm": 1.974317193031311,
"learning_rate": 9.330127018922194e-05,
"loss": 6.0416,
"step": 50
},
{
"epoch": 0.013003055718093752,
"eval_loss": 5.998004913330078,
"eval_runtime": 48.629,
"eval_samples_per_second": 133.192,
"eval_steps_per_second": 66.606,
"step": 50
},
{
"epoch": 0.013263116832455627,
"grad_norm": 4.363042831420898,
"learning_rate": 9.285836503510562e-05,
"loss": 5.9608,
"step": 51
},
{
"epoch": 0.013523177946817502,
"grad_norm": 4.278781414031982,
"learning_rate": 9.24024048078213e-05,
"loss": 6.3781,
"step": 52
},
{
"epoch": 0.013783239061179377,
"grad_norm": 3.778721332550049,
"learning_rate": 9.193352839727121e-05,
"loss": 6.2402,
"step": 53
},
{
"epoch": 0.014043300175541252,
"grad_norm": 3.498566150665283,
"learning_rate": 9.145187862775209e-05,
"loss": 5.8672,
"step": 54
},
{
"epoch": 0.014303361289903128,
"grad_norm": 2.5575318336486816,
"learning_rate": 9.09576022144496e-05,
"loss": 6.2251,
"step": 55
},
{
"epoch": 0.014563422404265003,
"grad_norm": 2.7712318897247314,
"learning_rate": 9.045084971874738e-05,
"loss": 5.8533,
"step": 56
},
{
"epoch": 0.014823483518626878,
"grad_norm": 2.6722428798675537,
"learning_rate": 8.993177550236464e-05,
"loss": 6.2118,
"step": 57
},
{
"epoch": 0.015083544632988753,
"grad_norm": 2.6320745944976807,
"learning_rate": 8.940053768033609e-05,
"loss": 5.9229,
"step": 58
},
{
"epoch": 0.015343605747350627,
"grad_norm": 3.009500503540039,
"learning_rate": 8.885729807284856e-05,
"loss": 6.1045,
"step": 59
},
{
"epoch": 0.015603666861712502,
"grad_norm": 2.7028679847717285,
"learning_rate": 8.83022221559489e-05,
"loss": 5.9121,
"step": 60
},
{
"epoch": 0.01586372797607438,
"grad_norm": 2.4579215049743652,
"learning_rate": 8.773547901113862e-05,
"loss": 5.6692,
"step": 61
},
{
"epoch": 0.01612378909043625,
"grad_norm": 2.4235904216766357,
"learning_rate": 8.715724127386972e-05,
"loss": 6.0567,
"step": 62
},
{
"epoch": 0.016383850204798128,
"grad_norm": 2.282179594039917,
"learning_rate": 8.656768508095853e-05,
"loss": 5.9487,
"step": 63
},
{
"epoch": 0.01664391131916,
"grad_norm": 2.4681618213653564,
"learning_rate": 8.596699001693255e-05,
"loss": 5.8193,
"step": 64
},
{
"epoch": 0.016903972433521878,
"grad_norm": 2.345444440841675,
"learning_rate": 8.535533905932738e-05,
"loss": 5.7292,
"step": 65
},
{
"epoch": 0.017164033547883754,
"grad_norm": 2.1743881702423096,
"learning_rate": 8.473291852294987e-05,
"loss": 5.7868,
"step": 66
},
{
"epoch": 0.017424094662245627,
"grad_norm": 2.0475363731384277,
"learning_rate": 8.409991800312493e-05,
"loss": 5.8631,
"step": 67
},
{
"epoch": 0.017684155776607504,
"grad_norm": 2.0247652530670166,
"learning_rate": 8.345653031794292e-05,
"loss": 5.9889,
"step": 68
},
{
"epoch": 0.017944216890969377,
"grad_norm": 2.2418599128723145,
"learning_rate": 8.280295144952536e-05,
"loss": 6.0085,
"step": 69
},
{
"epoch": 0.018204278005331254,
"grad_norm": 2.2492949962615967,
"learning_rate": 8.213938048432697e-05,
"loss": 5.8585,
"step": 70
},
{
"epoch": 0.018464339119693127,
"grad_norm": 2.267843008041382,
"learning_rate": 8.146601955249188e-05,
"loss": 5.6249,
"step": 71
},
{
"epoch": 0.018724400234055003,
"grad_norm": 2.3722450733184814,
"learning_rate": 8.07830737662829e-05,
"loss": 5.8605,
"step": 72
},
{
"epoch": 0.018984461348416876,
"grad_norm": 2.5645058155059814,
"learning_rate": 8.009075115760243e-05,
"loss": 5.6836,
"step": 73
},
{
"epoch": 0.019244522462778753,
"grad_norm": 1.8278781175613403,
"learning_rate": 7.938926261462366e-05,
"loss": 5.7605,
"step": 74
},
{
"epoch": 0.01950458357714063,
"grad_norm": 1.9225795269012451,
"learning_rate": 7.86788218175523e-05,
"loss": 6.0398,
"step": 75
},
{
"epoch": 0.019764644691502502,
"grad_norm": 2.0590267181396484,
"learning_rate": 7.795964517353735e-05,
"loss": 5.787,
"step": 76
},
{
"epoch": 0.02002470580586438,
"grad_norm": 2.1128058433532715,
"learning_rate": 7.723195175075136e-05,
"loss": 5.5382,
"step": 77
},
{
"epoch": 0.020284766920226252,
"grad_norm": 1.947939157485962,
"learning_rate": 7.649596321166024e-05,
"loss": 5.8782,
"step": 78
},
{
"epoch": 0.02054482803458813,
"grad_norm": 2.0571346282958984,
"learning_rate": 7.575190374550272e-05,
"loss": 6.0179,
"step": 79
},
{
"epoch": 0.02080488914895,
"grad_norm": 2.2099356651306152,
"learning_rate": 7.500000000000001e-05,
"loss": 5.6631,
"step": 80
},
{
"epoch": 0.021064950263311878,
"grad_norm": 2.370307207107544,
"learning_rate": 7.424048101231686e-05,
"loss": 5.5898,
"step": 81
},
{
"epoch": 0.021325011377673755,
"grad_norm": 2.1935746669769287,
"learning_rate": 7.347357813929454e-05,
"loss": 5.9512,
"step": 82
},
{
"epoch": 0.021585072492035628,
"grad_norm": 2.0591251850128174,
"learning_rate": 7.269952498697734e-05,
"loss": 5.8631,
"step": 83
},
{
"epoch": 0.021845133606397504,
"grad_norm": 2.2366886138916016,
"learning_rate": 7.191855733945387e-05,
"loss": 5.7982,
"step": 84
},
{
"epoch": 0.022105194720759377,
"grad_norm": 2.359128475189209,
"learning_rate": 7.113091308703498e-05,
"loss": 5.8195,
"step": 85
},
{
"epoch": 0.022365255835121254,
"grad_norm": 2.3458919525146484,
"learning_rate": 7.033683215379002e-05,
"loss": 5.8264,
"step": 86
},
{
"epoch": 0.022625316949483127,
"grad_norm": 2.4972236156463623,
"learning_rate": 6.953655642446368e-05,
"loss": 5.7646,
"step": 87
},
{
"epoch": 0.022885378063845004,
"grad_norm": 2.109933853149414,
"learning_rate": 6.873032967079561e-05,
"loss": 5.9329,
"step": 88
},
{
"epoch": 0.02314543917820688,
"grad_norm": 2.2802915573120117,
"learning_rate": 6.7918397477265e-05,
"loss": 5.6465,
"step": 89
},
{
"epoch": 0.023405500292568753,
"grad_norm": 2.1097252368927,
"learning_rate": 6.710100716628344e-05,
"loss": 5.9014,
"step": 90
},
{
"epoch": 0.02366556140693063,
"grad_norm": 2.1285314559936523,
"learning_rate": 6.627840772285784e-05,
"loss": 5.8136,
"step": 91
},
{
"epoch": 0.023925622521292503,
"grad_norm": 2.2824885845184326,
"learning_rate": 6.545084971874738e-05,
"loss": 5.786,
"step": 92
},
{
"epoch": 0.02418568363565438,
"grad_norm": 2.440842866897583,
"learning_rate": 6.461858523613684e-05,
"loss": 5.747,
"step": 93
},
{
"epoch": 0.024445744750016252,
"grad_norm": 2.4772531986236572,
"learning_rate": 6.378186779084995e-05,
"loss": 5.7615,
"step": 94
},
{
"epoch": 0.02470580586437813,
"grad_norm": 1.9417319297790527,
"learning_rate": 6.294095225512603e-05,
"loss": 5.6267,
"step": 95
},
{
"epoch": 0.024965866978740005,
"grad_norm": 2.200648784637451,
"learning_rate": 6.209609477998338e-05,
"loss": 5.4595,
"step": 96
},
{
"epoch": 0.02522592809310188,
"grad_norm": 1.9711750745773315,
"learning_rate": 6.124755271719325e-05,
"loss": 5.8309,
"step": 97
},
{
"epoch": 0.025485989207463755,
"grad_norm": 2.5392403602600098,
"learning_rate": 6.0395584540887963e-05,
"loss": 5.8212,
"step": 98
},
{
"epoch": 0.025746050321825628,
"grad_norm": 1.87886643409729,
"learning_rate": 5.9540449768827246e-05,
"loss": 5.7186,
"step": 99
},
{
"epoch": 0.026006111436187505,
"grad_norm": 2.1092851161956787,
"learning_rate": 5.868240888334653e-05,
"loss": 5.8411,
"step": 100
},
{
"epoch": 0.026006111436187505,
"eval_loss": 5.840994834899902,
"eval_runtime": 48.6683,
"eval_samples_per_second": 133.085,
"eval_steps_per_second": 66.553,
"step": 100
},
{
"epoch": 0.026266172550549378,
"grad_norm": 2.57004714012146,
"learning_rate": 5.782172325201155e-05,
"loss": 5.9867,
"step": 101
},
{
"epoch": 0.026526233664911254,
"grad_norm": 2.1859915256500244,
"learning_rate": 5.695865504800327e-05,
"loss": 6.0568,
"step": 102
},
{
"epoch": 0.02678629477927313,
"grad_norm": 2.2751128673553467,
"learning_rate": 5.6093467170257374e-05,
"loss": 5.8706,
"step": 103
},
{
"epoch": 0.027046355893635004,
"grad_norm": 2.1009652614593506,
"learning_rate": 5.522642316338268e-05,
"loss": 5.9817,
"step": 104
},
{
"epoch": 0.02730641700799688,
"grad_norm": 2.266655445098877,
"learning_rate": 5.435778713738292e-05,
"loss": 5.6929,
"step": 105
},
{
"epoch": 0.027566478122358753,
"grad_norm": 1.9885139465332031,
"learning_rate": 5.348782368720626e-05,
"loss": 5.978,
"step": 106
},
{
"epoch": 0.02782653923672063,
"grad_norm": 2.028961420059204,
"learning_rate": 5.26167978121472e-05,
"loss": 5.8351,
"step": 107
},
{
"epoch": 0.028086600351082503,
"grad_norm": 1.9886070489883423,
"learning_rate": 5.174497483512506e-05,
"loss": 5.8824,
"step": 108
},
{
"epoch": 0.02834666146544438,
"grad_norm": 2.343224048614502,
"learning_rate": 5.0872620321864185e-05,
"loss": 5.9582,
"step": 109
},
{
"epoch": 0.028606722579806256,
"grad_norm": 2.5639307498931885,
"learning_rate": 5e-05,
"loss": 5.8952,
"step": 110
},
{
"epoch": 0.02886678369416813,
"grad_norm": 2.0477116107940674,
"learning_rate": 4.912737967813583e-05,
"loss": 5.9346,
"step": 111
},
{
"epoch": 0.029126844808530006,
"grad_norm": 1.8866691589355469,
"learning_rate": 4.825502516487497e-05,
"loss": 6.0466,
"step": 112
},
{
"epoch": 0.02938690592289188,
"grad_norm": 1.9691741466522217,
"learning_rate": 4.738320218785281e-05,
"loss": 5.5029,
"step": 113
},
{
"epoch": 0.029646967037253755,
"grad_norm": 1.8502812385559082,
"learning_rate": 4.6512176312793736e-05,
"loss": 5.7583,
"step": 114
},
{
"epoch": 0.02990702815161563,
"grad_norm": 2.5639379024505615,
"learning_rate": 4.564221286261709e-05,
"loss": 5.3675,
"step": 115
},
{
"epoch": 0.030167089265977505,
"grad_norm": 1.9151495695114136,
"learning_rate": 4.477357683661734e-05,
"loss": 5.7421,
"step": 116
},
{
"epoch": 0.030427150380339378,
"grad_norm": 1.8344489336013794,
"learning_rate": 4.390653282974264e-05,
"loss": 5.871,
"step": 117
},
{
"epoch": 0.030687211494701255,
"grad_norm": 1.7897135019302368,
"learning_rate": 4.3041344951996746e-05,
"loss": 5.7288,
"step": 118
},
{
"epoch": 0.03094727260906313,
"grad_norm": 2.029815196990967,
"learning_rate": 4.2178276747988446e-05,
"loss": 5.9199,
"step": 119
},
{
"epoch": 0.031207333723425004,
"grad_norm": 1.9268794059753418,
"learning_rate": 4.131759111665349e-05,
"loss": 5.8271,
"step": 120
},
{
"epoch": 0.03146739483778688,
"grad_norm": 1.8820242881774902,
"learning_rate": 4.045955023117276e-05,
"loss": 5.7487,
"step": 121
},
{
"epoch": 0.03172745595214876,
"grad_norm": 1.9745794534683228,
"learning_rate": 3.960441545911204e-05,
"loss": 5.6484,
"step": 122
},
{
"epoch": 0.03198751706651063,
"grad_norm": 1.8738607168197632,
"learning_rate": 3.875244728280676e-05,
"loss": 5.9072,
"step": 123
},
{
"epoch": 0.0322475781808725,
"grad_norm": 2.0338311195373535,
"learning_rate": 3.790390522001662e-05,
"loss": 5.5293,
"step": 124
},
{
"epoch": 0.03250763929523438,
"grad_norm": 2.4185123443603516,
"learning_rate": 3.705904774487396e-05,
"loss": 5.8123,
"step": 125
},
{
"epoch": 0.032767700409596257,
"grad_norm": 2.2349812984466553,
"learning_rate": 3.6218132209150045e-05,
"loss": 5.9097,
"step": 126
},
{
"epoch": 0.03302776152395813,
"grad_norm": 2.378011703491211,
"learning_rate": 3.5381414763863166e-05,
"loss": 5.8261,
"step": 127
},
{
"epoch": 0.03328782263832,
"grad_norm": 2.0955443382263184,
"learning_rate": 3.4549150281252636e-05,
"loss": 5.8907,
"step": 128
},
{
"epoch": 0.03354788375268188,
"grad_norm": 2.049766778945923,
"learning_rate": 3.372159227714218e-05,
"loss": 5.6951,
"step": 129
},
{
"epoch": 0.033807944867043756,
"grad_norm": 2.211913585662842,
"learning_rate": 3.289899283371657e-05,
"loss": 5.6701,
"step": 130
},
{
"epoch": 0.03406800598140563,
"grad_norm": 2.1762237548828125,
"learning_rate": 3.2081602522734986e-05,
"loss": 5.8482,
"step": 131
},
{
"epoch": 0.03432806709576751,
"grad_norm": 1.9880789518356323,
"learning_rate": 3.12696703292044e-05,
"loss": 5.8196,
"step": 132
},
{
"epoch": 0.03458812821012938,
"grad_norm": 2.4828646183013916,
"learning_rate": 3.046344357553632e-05,
"loss": 5.5032,
"step": 133
},
{
"epoch": 0.034848189324491255,
"grad_norm": 2.043897867202759,
"learning_rate": 2.9663167846209998e-05,
"loss": 5.712,
"step": 134
},
{
"epoch": 0.03510825043885313,
"grad_norm": 2.0667715072631836,
"learning_rate": 2.886908691296504e-05,
"loss": 5.7623,
"step": 135
},
{
"epoch": 0.03536831155321501,
"grad_norm": 1.8965405225753784,
"learning_rate": 2.8081442660546125e-05,
"loss": 6.0675,
"step": 136
},
{
"epoch": 0.03562837266757688,
"grad_norm": 2.2518186569213867,
"learning_rate": 2.7300475013022663e-05,
"loss": 5.9103,
"step": 137
},
{
"epoch": 0.035888433781938754,
"grad_norm": 2.1538784503936768,
"learning_rate": 2.6526421860705473e-05,
"loss": 6.0621,
"step": 138
},
{
"epoch": 0.03614849489630063,
"grad_norm": 2.6064345836639404,
"learning_rate": 2.575951898768315e-05,
"loss": 5.2939,
"step": 139
},
{
"epoch": 0.03640855601066251,
"grad_norm": 2.093737840652466,
"learning_rate": 2.500000000000001e-05,
"loss": 5.6876,
"step": 140
},
{
"epoch": 0.03666861712502438,
"grad_norm": 1.951988697052002,
"learning_rate": 2.4248096254497288e-05,
"loss": 5.6047,
"step": 141
},
{
"epoch": 0.03692867823938625,
"grad_norm": 2.254817247390747,
"learning_rate": 2.350403678833976e-05,
"loss": 5.9507,
"step": 142
},
{
"epoch": 0.03718873935374813,
"grad_norm": 2.305563449859619,
"learning_rate": 2.2768048249248648e-05,
"loss": 6.1418,
"step": 143
},
{
"epoch": 0.037448800468110006,
"grad_norm": 2.025839328765869,
"learning_rate": 2.2040354826462668e-05,
"loss": 5.6341,
"step": 144
},
{
"epoch": 0.03770886158247188,
"grad_norm": 2.292506456375122,
"learning_rate": 2.132117818244771e-05,
"loss": 5.681,
"step": 145
},
{
"epoch": 0.03796892269683375,
"grad_norm": 2.450554132461548,
"learning_rate": 2.061073738537635e-05,
"loss": 6.2452,
"step": 146
},
{
"epoch": 0.03822898381119563,
"grad_norm": 1.7618736028671265,
"learning_rate": 1.9909248842397584e-05,
"loss": 5.5073,
"step": 147
},
{
"epoch": 0.038489044925557506,
"grad_norm": 1.966064453125,
"learning_rate": 1.9216926233717085e-05,
"loss": 5.7759,
"step": 148
},
{
"epoch": 0.03874910603991938,
"grad_norm": 2.008685350418091,
"learning_rate": 1.8533980447508137e-05,
"loss": 5.9435,
"step": 149
},
{
"epoch": 0.03900916715428126,
"grad_norm": 2.194380521774292,
"learning_rate": 1.7860619515673033e-05,
"loss": 6.1454,
"step": 150
},
{
"epoch": 0.03900916715428126,
"eval_loss": 5.7715630531311035,
"eval_runtime": 48.9592,
"eval_samples_per_second": 132.294,
"eval_steps_per_second": 66.157,
"step": 150
},
{
"epoch": 0.03926922826864313,
"grad_norm": 2.1486167907714844,
"learning_rate": 1.7197048550474643e-05,
"loss": 5.9314,
"step": 151
},
{
"epoch": 0.039529289383005005,
"grad_norm": 2.113476276397705,
"learning_rate": 1.6543469682057106e-05,
"loss": 5.6945,
"step": 152
},
{
"epoch": 0.03978935049736688,
"grad_norm": 2.3845126628875732,
"learning_rate": 1.5900081996875083e-05,
"loss": 6.0969,
"step": 153
},
{
"epoch": 0.04004941161172876,
"grad_norm": 2.2422235012054443,
"learning_rate": 1.526708147705013e-05,
"loss": 5.7678,
"step": 154
},
{
"epoch": 0.04030947272609063,
"grad_norm": 2.3688573837280273,
"learning_rate": 1.4644660940672627e-05,
"loss": 5.7291,
"step": 155
},
{
"epoch": 0.040569533840452504,
"grad_norm": 2.266603469848633,
"learning_rate": 1.4033009983067452e-05,
"loss": 6.0137,
"step": 156
},
{
"epoch": 0.040829594954814384,
"grad_norm": 2.143157482147217,
"learning_rate": 1.3432314919041478e-05,
"loss": 5.6486,
"step": 157
},
{
"epoch": 0.04108965606917626,
"grad_norm": 2.3287534713745117,
"learning_rate": 1.2842758726130283e-05,
"loss": 5.6967,
"step": 158
},
{
"epoch": 0.04134971718353813,
"grad_norm": 2.0474727153778076,
"learning_rate": 1.22645209888614e-05,
"loss": 5.9549,
"step": 159
},
{
"epoch": 0.0416097782979,
"grad_norm": 2.322507619857788,
"learning_rate": 1.1697777844051105e-05,
"loss": 5.6328,
"step": 160
},
{
"epoch": 0.04186983941226188,
"grad_norm": 2.4897751808166504,
"learning_rate": 1.1142701927151456e-05,
"loss": 5.7139,
"step": 161
},
{
"epoch": 0.042129900526623756,
"grad_norm": 1.8576676845550537,
"learning_rate": 1.0599462319663905e-05,
"loss": 6.0336,
"step": 162
},
{
"epoch": 0.04238996164098563,
"grad_norm": 2.2130608558654785,
"learning_rate": 1.006822449763537e-05,
"loss": 5.5478,
"step": 163
},
{
"epoch": 0.04265002275534751,
"grad_norm": 2.381807327270508,
"learning_rate": 9.549150281252633e-06,
"loss": 5.6933,
"step": 164
},
{
"epoch": 0.04291008386970938,
"grad_norm": 2.003005027770996,
"learning_rate": 9.042397785550405e-06,
"loss": 5.9695,
"step": 165
},
{
"epoch": 0.043170144984071256,
"grad_norm": 1.9043495655059814,
"learning_rate": 8.548121372247918e-06,
"loss": 5.8895,
"step": 166
},
{
"epoch": 0.04343020609843313,
"grad_norm": 1.9111559391021729,
"learning_rate": 8.066471602728803e-06,
"loss": 5.8929,
"step": 167
},
{
"epoch": 0.04369026721279501,
"grad_norm": 1.8936721086502075,
"learning_rate": 7.597595192178702e-06,
"loss": 5.9478,
"step": 168
},
{
"epoch": 0.04395032832715688,
"grad_norm": 2.007209300994873,
"learning_rate": 7.1416349648943894e-06,
"loss": 5.9119,
"step": 169
},
{
"epoch": 0.044210389441518755,
"grad_norm": 2.0411062240600586,
"learning_rate": 6.698729810778065e-06,
"loss": 5.9512,
"step": 170
},
{
"epoch": 0.044470450555880635,
"grad_norm": 1.808025598526001,
"learning_rate": 6.269014643030213e-06,
"loss": 5.82,
"step": 171
},
{
"epoch": 0.04473051167024251,
"grad_norm": 1.875134825706482,
"learning_rate": 5.852620357053651e-06,
"loss": 5.835,
"step": 172
},
{
"epoch": 0.04499057278460438,
"grad_norm": 1.7879668474197388,
"learning_rate": 5.449673790581611e-06,
"loss": 5.6259,
"step": 173
},
{
"epoch": 0.045250633898966254,
"grad_norm": 2.106720209121704,
"learning_rate": 5.060297685041659e-06,
"loss": 5.7261,
"step": 174
},
{
"epoch": 0.045510695013328134,
"grad_norm": 1.7821826934814453,
"learning_rate": 4.684610648167503e-06,
"loss": 5.6791,
"step": 175
},
{
"epoch": 0.04577075612769001,
"grad_norm": 1.8573359251022339,
"learning_rate": 4.322727117869951e-06,
"loss": 5.9206,
"step": 176
},
{
"epoch": 0.04603081724205188,
"grad_norm": 1.8888030052185059,
"learning_rate": 3.974757327377981e-06,
"loss": 5.6468,
"step": 177
},
{
"epoch": 0.04629087835641376,
"grad_norm": 1.8285032510757446,
"learning_rate": 3.6408072716606346e-06,
"loss": 5.5881,
"step": 178
},
{
"epoch": 0.04655093947077563,
"grad_norm": 1.9863262176513672,
"learning_rate": 3.3209786751399187e-06,
"loss": 5.722,
"step": 179
},
{
"epoch": 0.046811000585137506,
"grad_norm": 2.2431719303131104,
"learning_rate": 3.0153689607045845e-06,
"loss": 5.6575,
"step": 180
},
{
"epoch": 0.04707106169949938,
"grad_norm": 2.047182321548462,
"learning_rate": 2.724071220034158e-06,
"loss": 5.4315,
"step": 181
},
{
"epoch": 0.04733112281386126,
"grad_norm": 1.8060697317123413,
"learning_rate": 2.4471741852423237e-06,
"loss": 5.8507,
"step": 182
},
{
"epoch": 0.04759118392822313,
"grad_norm": 1.9971647262573242,
"learning_rate": 2.1847622018482283e-06,
"loss": 5.7017,
"step": 183
},
{
"epoch": 0.047851245042585006,
"grad_norm": 2.5897560119628906,
"learning_rate": 1.9369152030840556e-06,
"loss": 5.5973,
"step": 184
},
{
"epoch": 0.048111306156946886,
"grad_norm": 2.2017526626586914,
"learning_rate": 1.70370868554659e-06,
"loss": 5.8183,
"step": 185
},
{
"epoch": 0.04837136727130876,
"grad_norm": 2.3036229610443115,
"learning_rate": 1.4852136862001764e-06,
"loss": 5.7241,
"step": 186
},
{
"epoch": 0.04863142838567063,
"grad_norm": 2.3444440364837646,
"learning_rate": 1.2814967607382432e-06,
"loss": 5.7225,
"step": 187
},
{
"epoch": 0.048891489500032505,
"grad_norm": 2.1423869132995605,
"learning_rate": 1.0926199633097157e-06,
"loss": 5.7248,
"step": 188
},
{
"epoch": 0.049151550614394385,
"grad_norm": 2.1155991554260254,
"learning_rate": 9.186408276168013e-07,
"loss": 5.5723,
"step": 189
},
{
"epoch": 0.04941161172875626,
"grad_norm": 2.301825761795044,
"learning_rate": 7.596123493895991e-07,
"loss": 5.7194,
"step": 190
},
{
"epoch": 0.04967167284311813,
"grad_norm": 2.186354160308838,
"learning_rate": 6.15582970243117e-07,
"loss": 5.5791,
"step": 191
},
{
"epoch": 0.04993173395748001,
"grad_norm": 2.2333290576934814,
"learning_rate": 4.865965629214819e-07,
"loss": 5.763,
"step": 192
},
{
"epoch": 0.050191795071841884,
"grad_norm": 2.092721700668335,
"learning_rate": 3.7269241793390085e-07,
"loss": 5.76,
"step": 193
},
{
"epoch": 0.05045185618620376,
"grad_norm": 2.0245654582977295,
"learning_rate": 2.7390523158633554e-07,
"loss": 5.5846,
"step": 194
},
{
"epoch": 0.05071191730056563,
"grad_norm": 2.045283079147339,
"learning_rate": 1.9026509541272275e-07,
"loss": 5.6608,
"step": 195
},
{
"epoch": 0.05097197841492751,
"grad_norm": 2.104609727859497,
"learning_rate": 1.2179748700879012e-07,
"loss": 6.1454,
"step": 196
},
{
"epoch": 0.05123203952928938,
"grad_norm": 2.0057427883148193,
"learning_rate": 6.852326227130834e-08,
"loss": 5.6056,
"step": 197
},
{
"epoch": 0.051492100643651256,
"grad_norm": 1.982209324836731,
"learning_rate": 3.04586490452119e-08,
"loss": 5.5759,
"step": 198
},
{
"epoch": 0.051752161758013136,
"grad_norm": 2.1231882572174072,
"learning_rate": 7.615242180436522e-09,
"loss": 5.8412,
"step": 199
},
{
"epoch": 0.05201222287237501,
"grad_norm": 2.233738660812378,
"learning_rate": 0.0,
"loss": 6.0729,
"step": 200
},
{
"epoch": 0.05201222287237501,
"eval_loss": 5.759253978729248,
"eval_runtime": 49.0339,
"eval_samples_per_second": 132.092,
"eval_steps_per_second": 66.056,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 140636376268800.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}