aleegis12's picture
Training in progress, step 167, checkpoint
0783de3 verified
raw
history blame
30.5 kB
{
"best_metric": 1.503947377204895,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 3.009009009009009,
"eval_steps": 50,
"global_step": 167,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018018018018018018,
"grad_norm": 0.20334728062152863,
"learning_rate": 1e-05,
"loss": 1.7238,
"step": 1
},
{
"epoch": 0.018018018018018018,
"eval_loss": 2.4445245265960693,
"eval_runtime": 1.9335,
"eval_samples_per_second": 48.617,
"eval_steps_per_second": 12.413,
"step": 1
},
{
"epoch": 0.036036036036036036,
"grad_norm": 0.2328772246837616,
"learning_rate": 2e-05,
"loss": 2.3157,
"step": 2
},
{
"epoch": 0.05405405405405406,
"grad_norm": 0.17818941175937653,
"learning_rate": 3e-05,
"loss": 1.8891,
"step": 3
},
{
"epoch": 0.07207207207207207,
"grad_norm": 0.18887093663215637,
"learning_rate": 4e-05,
"loss": 1.8112,
"step": 4
},
{
"epoch": 0.09009009009009009,
"grad_norm": 0.28661012649536133,
"learning_rate": 5e-05,
"loss": 2.5251,
"step": 5
},
{
"epoch": 0.10810810810810811,
"grad_norm": 0.5507867932319641,
"learning_rate": 6e-05,
"loss": 2.5171,
"step": 6
},
{
"epoch": 0.12612612612612611,
"grad_norm": 0.5678154230117798,
"learning_rate": 7e-05,
"loss": 2.6257,
"step": 7
},
{
"epoch": 0.14414414414414414,
"grad_norm": 0.507682204246521,
"learning_rate": 8e-05,
"loss": 2.5335,
"step": 8
},
{
"epoch": 0.16216216216216217,
"grad_norm": 0.3488699495792389,
"learning_rate": 9e-05,
"loss": 2.6651,
"step": 9
},
{
"epoch": 0.18018018018018017,
"grad_norm": 0.2555129826068878,
"learning_rate": 0.0001,
"loss": 1.9695,
"step": 10
},
{
"epoch": 0.1981981981981982,
"grad_norm": 0.2776542603969574,
"learning_rate": 9.998999018714263e-05,
"loss": 1.7186,
"step": 11
},
{
"epoch": 0.21621621621621623,
"grad_norm": 0.3397429883480072,
"learning_rate": 9.995996475642466e-05,
"loss": 2.1329,
"step": 12
},
{
"epoch": 0.23423423423423423,
"grad_norm": 0.5618317127227783,
"learning_rate": 9.990993572980378e-05,
"loss": 3.0633,
"step": 13
},
{
"epoch": 0.25225225225225223,
"grad_norm": 0.1377403289079666,
"learning_rate": 9.983992313852774e-05,
"loss": 1.7843,
"step": 14
},
{
"epoch": 0.2702702702702703,
"grad_norm": 0.15059250593185425,
"learning_rate": 9.974995501511404e-05,
"loss": 2.2153,
"step": 15
},
{
"epoch": 0.2882882882882883,
"grad_norm": 0.1395379900932312,
"learning_rate": 9.964006738212575e-05,
"loss": 1.8151,
"step": 16
},
{
"epoch": 0.3063063063063063,
"grad_norm": 0.17120715975761414,
"learning_rate": 9.951030423774859e-05,
"loss": 2.0037,
"step": 17
},
{
"epoch": 0.32432432432432434,
"grad_norm": 0.45357421040534973,
"learning_rate": 9.936071753817415e-05,
"loss": 2.1388,
"step": 18
},
{
"epoch": 0.34234234234234234,
"grad_norm": 0.4322102665901184,
"learning_rate": 9.919136717679722e-05,
"loss": 2.2311,
"step": 19
},
{
"epoch": 0.36036036036036034,
"grad_norm": 0.4717254638671875,
"learning_rate": 9.900232096023477e-05,
"loss": 2.157,
"step": 20
},
{
"epoch": 0.3783783783783784,
"grad_norm": 0.3566138744354248,
"learning_rate": 9.879365458117678e-05,
"loss": 2.1204,
"step": 21
},
{
"epoch": 0.3963963963963964,
"grad_norm": 0.26241886615753174,
"learning_rate": 9.856545158807938e-05,
"loss": 2.4735,
"step": 22
},
{
"epoch": 0.4144144144144144,
"grad_norm": 0.20926542580127716,
"learning_rate": 9.831780335171279e-05,
"loss": 1.7209,
"step": 23
},
{
"epoch": 0.43243243243243246,
"grad_norm": 0.31854745745658875,
"learning_rate": 9.805080902857699e-05,
"loss": 2.2578,
"step": 24
},
{
"epoch": 0.45045045045045046,
"grad_norm": 0.3495607376098633,
"learning_rate": 9.776457552120033e-05,
"loss": 2.2969,
"step": 25
},
{
"epoch": 0.46846846846846846,
"grad_norm": 0.6214109659194946,
"learning_rate": 9.745921743533653e-05,
"loss": 2.1911,
"step": 26
},
{
"epoch": 0.4864864864864865,
"grad_norm": 0.14884573221206665,
"learning_rate": 9.713485703407731e-05,
"loss": 1.6914,
"step": 27
},
{
"epoch": 0.5045045045045045,
"grad_norm": 0.2149585634469986,
"learning_rate": 9.679162418889931e-05,
"loss": 2.1283,
"step": 28
},
{
"epoch": 0.5225225225225225,
"grad_norm": 0.20466195046901703,
"learning_rate": 9.642965632766436e-05,
"loss": 1.8882,
"step": 29
},
{
"epoch": 0.5405405405405406,
"grad_norm": 0.22452042996883392,
"learning_rate": 9.604909837959455e-05,
"loss": 2.1373,
"step": 30
},
{
"epoch": 0.5585585585585585,
"grad_norm": 0.29129326343536377,
"learning_rate": 9.565010271724352e-05,
"loss": 1.6579,
"step": 31
},
{
"epoch": 0.5765765765765766,
"grad_norm": 0.4822368621826172,
"learning_rate": 9.523282909548773e-05,
"loss": 1.7155,
"step": 32
},
{
"epoch": 0.5945945945945946,
"grad_norm": 0.5174642205238342,
"learning_rate": 9.47974445875617e-05,
"loss": 1.6795,
"step": 33
},
{
"epoch": 0.6126126126126126,
"grad_norm": 0.5645754933357239,
"learning_rate": 9.434412351816328e-05,
"loss": 1.5836,
"step": 34
},
{
"epoch": 0.6306306306306306,
"grad_norm": 0.2343388795852661,
"learning_rate": 9.387304739365523e-05,
"loss": 2.0405,
"step": 35
},
{
"epoch": 0.6486486486486487,
"grad_norm": 0.2349058836698532,
"learning_rate": 9.338440482939146e-05,
"loss": 1.9695,
"step": 36
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.23872889578342438,
"learning_rate": 9.287839147419686e-05,
"loss": 1.8085,
"step": 37
},
{
"epoch": 0.6846846846846847,
"grad_norm": 0.37950703501701355,
"learning_rate": 9.2355209932031e-05,
"loss": 2.3962,
"step": 38
},
{
"epoch": 0.7027027027027027,
"grad_norm": 0.5464185476303101,
"learning_rate": 9.181506968086697e-05,
"loss": 2.802,
"step": 39
},
{
"epoch": 0.7207207207207207,
"grad_norm": 0.13629977405071259,
"learning_rate": 9.125818698881798e-05,
"loss": 1.6469,
"step": 40
},
{
"epoch": 0.7387387387387387,
"grad_norm": 0.2170867770910263,
"learning_rate": 9.068478482754532e-05,
"loss": 2.544,
"step": 41
},
{
"epoch": 0.7567567567567568,
"grad_norm": 0.19846679270267487,
"learning_rate": 9.0095092782982e-05,
"loss": 1.7291,
"step": 42
},
{
"epoch": 0.7747747747747747,
"grad_norm": 0.19683820009231567,
"learning_rate": 8.948934696340843e-05,
"loss": 1.8553,
"step": 43
},
{
"epoch": 0.7927927927927928,
"grad_norm": 2.0814313888549805,
"learning_rate": 8.886778990491631e-05,
"loss": 1.4285,
"step": 44
},
{
"epoch": 0.8108108108108109,
"grad_norm": 1.3856438398361206,
"learning_rate": 8.823067047429907e-05,
"loss": 1.3423,
"step": 45
},
{
"epoch": 0.8288288288288288,
"grad_norm": 0.75543612241745,
"learning_rate": 8.757824376940746e-05,
"loss": 1.167,
"step": 46
},
{
"epoch": 0.8468468468468469,
"grad_norm": 0.53647381067276,
"learning_rate": 8.691077101701024e-05,
"loss": 1.3325,
"step": 47
},
{
"epoch": 0.8648648648648649,
"grad_norm": 0.23286841809749603,
"learning_rate": 8.622851946820095e-05,
"loss": 2.2234,
"step": 48
},
{
"epoch": 0.8828828828828829,
"grad_norm": 0.20140407979488373,
"learning_rate": 8.553176229139261e-05,
"loss": 1.7397,
"step": 49
},
{
"epoch": 0.9009009009009009,
"grad_norm": 0.27651888132095337,
"learning_rate": 8.482077846294308e-05,
"loss": 2.145,
"step": 50
},
{
"epoch": 0.9009009009009009,
"eval_loss": 1.7854279279708862,
"eval_runtime": 1.9269,
"eval_samples_per_second": 48.782,
"eval_steps_per_second": 12.455,
"step": 50
},
{
"epoch": 0.918918918918919,
"grad_norm": 0.30430641770362854,
"learning_rate": 8.409585265545509e-05,
"loss": 2.0709,
"step": 51
},
{
"epoch": 0.9369369369369369,
"grad_norm": 0.5866444110870361,
"learning_rate": 8.335727512379534e-05,
"loss": 2.8888,
"step": 52
},
{
"epoch": 0.954954954954955,
"grad_norm": 0.13216109573841095,
"learning_rate": 8.260534158887876e-05,
"loss": 1.7877,
"step": 53
},
{
"epoch": 0.972972972972973,
"grad_norm": 0.7443955540657043,
"learning_rate": 8.184035311926396e-05,
"loss": 1.1889,
"step": 54
},
{
"epoch": 0.990990990990991,
"grad_norm": 0.31532421708106995,
"learning_rate": 8.106261601060772e-05,
"loss": 1.7782,
"step": 55
},
{
"epoch": 1.009009009009009,
"grad_norm": 0.5412552356719971,
"learning_rate": 8.027244166302642e-05,
"loss": 3.1035,
"step": 56
},
{
"epoch": 1.027027027027027,
"grad_norm": 0.21196092665195465,
"learning_rate": 7.947014645641379e-05,
"loss": 2.5004,
"step": 57
},
{
"epoch": 1.045045045045045,
"grad_norm": 0.18983232975006104,
"learning_rate": 7.865605162376486e-05,
"loss": 1.9003,
"step": 58
},
{
"epoch": 1.063063063063063,
"grad_norm": 0.2055334746837616,
"learning_rate": 7.783048312255653e-05,
"loss": 1.7878,
"step": 59
},
{
"epoch": 1.0810810810810811,
"grad_norm": 0.2857997715473175,
"learning_rate": 7.699377150423672e-05,
"loss": 1.5162,
"step": 60
},
{
"epoch": 1.0990990990990992,
"grad_norm": 0.7451598048210144,
"learning_rate": 7.614625178187402e-05,
"loss": 0.8527,
"step": 61
},
{
"epoch": 1.117117117117117,
"grad_norm": 0.6373011469841003,
"learning_rate": 7.528826329602099e-05,
"loss": 0.8998,
"step": 62
},
{
"epoch": 1.135135135135135,
"grad_norm": 0.5723519325256348,
"learning_rate": 7.442014957884472e-05,
"loss": 0.8521,
"step": 63
},
{
"epoch": 1.1531531531531531,
"grad_norm": 0.27271348237991333,
"learning_rate": 7.354225821657914e-05,
"loss": 2.143,
"step": 64
},
{
"epoch": 1.1711711711711712,
"grad_norm": 0.23478277027606964,
"learning_rate": 7.265494071035401e-05,
"loss": 1.7943,
"step": 65
},
{
"epoch": 1.1891891891891893,
"grad_norm": 0.28418228030204773,
"learning_rate": 7.175855233545668e-05,
"loss": 1.7495,
"step": 66
},
{
"epoch": 1.2072072072072073,
"grad_norm": 0.35334891080856323,
"learning_rate": 7.085345199908235e-05,
"loss": 2.0141,
"step": 67
},
{
"epoch": 1.2252252252252251,
"grad_norm": 0.5336344242095947,
"learning_rate": 6.994000209663036e-05,
"loss": 2.6846,
"step": 68
},
{
"epoch": 1.2432432432432432,
"grad_norm": 0.25877565145492554,
"learning_rate": 6.901856836660386e-05,
"loss": 1.2072,
"step": 69
},
{
"epoch": 1.2612612612612613,
"grad_norm": 0.18422676622867584,
"learning_rate": 6.808951974417078e-05,
"loss": 2.24,
"step": 70
},
{
"epoch": 1.2792792792792793,
"grad_norm": 0.2145569920539856,
"learning_rate": 6.715322821344494e-05,
"loss": 2.0781,
"step": 71
},
{
"epoch": 1.2972972972972974,
"grad_norm": 0.2036983072757721,
"learning_rate": 6.621006865854644e-05,
"loss": 1.6265,
"step": 72
},
{
"epoch": 1.3153153153153152,
"grad_norm": 0.2321735918521881,
"learning_rate": 6.526041871350086e-05,
"loss": 1.5306,
"step": 73
},
{
"epoch": 1.3333333333333333,
"grad_norm": 1.8508878946304321,
"learning_rate": 6.43046586110374e-05,
"loss": 0.7388,
"step": 74
},
{
"epoch": 1.3513513513513513,
"grad_norm": 1.0745056867599487,
"learning_rate": 6.334317103034652e-05,
"loss": 0.6945,
"step": 75
},
{
"epoch": 1.3693693693693694,
"grad_norm": 0.642358124256134,
"learning_rate": 6.237634094385813e-05,
"loss": 0.7758,
"step": 76
},
{
"epoch": 1.3873873873873874,
"grad_norm": 0.2824864685535431,
"learning_rate": 6.140455546310148e-05,
"loss": 2.0093,
"step": 77
},
{
"epoch": 1.4054054054054055,
"grad_norm": 0.35498249530792236,
"learning_rate": 6.042820368370854e-05,
"loss": 2.0257,
"step": 78
},
{
"epoch": 1.4234234234234235,
"grad_norm": 0.26013150811195374,
"learning_rate": 5.944767652962309e-05,
"loss": 1.6146,
"step": 79
},
{
"epoch": 1.4414414414414414,
"grad_norm": 0.29302212595939636,
"learning_rate": 5.8463366596577706e-05,
"loss": 2.1383,
"step": 80
},
{
"epoch": 1.4594594594594594,
"grad_norm": 0.3925705850124359,
"learning_rate": 5.747566799490132e-05,
"loss": 2.3427,
"step": 81
},
{
"epoch": 1.4774774774774775,
"grad_norm": 0.3041682839393616,
"learning_rate": 5.648497619172042e-05,
"loss": 1.6635,
"step": 82
},
{
"epoch": 1.4954954954954955,
"grad_norm": 0.19049213826656342,
"learning_rate": 5.549168785261698e-05,
"loss": 2.3609,
"step": 83
},
{
"epoch": 1.5135135135135136,
"grad_norm": 0.15779972076416016,
"learning_rate": 5.4496200682806495e-05,
"loss": 1.6961,
"step": 84
},
{
"epoch": 1.5315315315315314,
"grad_norm": 0.19497588276863098,
"learning_rate": 5.3498913267899864e-05,
"loss": 1.9126,
"step": 85
},
{
"epoch": 1.5495495495495497,
"grad_norm": 0.17658522725105286,
"learning_rate": 5.250022491431259e-05,
"loss": 1.8209,
"step": 86
},
{
"epoch": 1.5675675675675675,
"grad_norm": 1.0437264442443848,
"learning_rate": 5.150053548938557e-05,
"loss": 0.6212,
"step": 87
},
{
"epoch": 1.5855855855855856,
"grad_norm": 0.8192980885505676,
"learning_rate": 5.0500245261281175e-05,
"loss": 0.7097,
"step": 88
},
{
"epoch": 1.6036036036036037,
"grad_norm": 0.643914520740509,
"learning_rate": 4.949975473871884e-05,
"loss": 0.5015,
"step": 89
},
{
"epoch": 1.6216216216216215,
"grad_norm": 0.2833811044692993,
"learning_rate": 4.849946451061443e-05,
"loss": 1.706,
"step": 90
},
{
"epoch": 1.6396396396396398,
"grad_norm": 0.25219082832336426,
"learning_rate": 4.749977508568742e-05,
"loss": 2.0273,
"step": 91
},
{
"epoch": 1.6576576576576576,
"grad_norm": 0.2850624620914459,
"learning_rate": 4.650108673210015e-05,
"loss": 1.5778,
"step": 92
},
{
"epoch": 1.6756756756756757,
"grad_norm": 0.37064990401268005,
"learning_rate": 4.550379931719351e-05,
"loss": 1.9609,
"step": 93
},
{
"epoch": 1.6936936936936937,
"grad_norm": 0.4350699186325073,
"learning_rate": 4.4508312147383036e-05,
"loss": 1.9915,
"step": 94
},
{
"epoch": 1.7117117117117115,
"grad_norm": 0.27014970779418945,
"learning_rate": 4.3515023808279586e-05,
"loss": 1.7927,
"step": 95
},
{
"epoch": 1.7297297297297298,
"grad_norm": 0.1641526073217392,
"learning_rate": 4.252433200509869e-05,
"loss": 2.053,
"step": 96
},
{
"epoch": 1.7477477477477477,
"grad_norm": 0.1500796228647232,
"learning_rate": 4.1536633403422306e-05,
"loss": 1.5443,
"step": 97
},
{
"epoch": 1.7657657657657657,
"grad_norm": 0.19446995854377747,
"learning_rate": 4.0552323470376916e-05,
"loss": 2.1779,
"step": 98
},
{
"epoch": 1.7837837837837838,
"grad_norm": 0.18095970153808594,
"learning_rate": 3.9571796316291476e-05,
"loss": 1.5048,
"step": 99
},
{
"epoch": 1.8018018018018018,
"grad_norm": 0.5774995684623718,
"learning_rate": 3.859544453689853e-05,
"loss": 0.5426,
"step": 100
},
{
"epoch": 1.8018018018018018,
"eval_loss": 1.561373233795166,
"eval_runtime": 1.8948,
"eval_samples_per_second": 49.611,
"eval_steps_per_second": 12.667,
"step": 100
},
{
"epoch": 1.8198198198198199,
"grad_norm": 0.4646216034889221,
"learning_rate": 3.762365905614187e-05,
"loss": 0.6454,
"step": 101
},
{
"epoch": 1.8378378378378377,
"grad_norm": 0.4651610851287842,
"learning_rate": 3.665682896965349e-05,
"loss": 0.6244,
"step": 102
},
{
"epoch": 1.855855855855856,
"grad_norm": 0.3666508197784424,
"learning_rate": 3.5695341388962614e-05,
"loss": 0.8421,
"step": 103
},
{
"epoch": 1.8738738738738738,
"grad_norm": 0.20598755776882172,
"learning_rate": 3.473958128649915e-05,
"loss": 1.7942,
"step": 104
},
{
"epoch": 1.8918918918918919,
"grad_norm": 0.24941736459732056,
"learning_rate": 3.378993134145356e-05,
"loss": 1.8199,
"step": 105
},
{
"epoch": 1.90990990990991,
"grad_norm": 0.2905518412590027,
"learning_rate": 3.284677178655507e-05,
"loss": 1.7551,
"step": 106
},
{
"epoch": 1.9279279279279278,
"grad_norm": 0.3726566731929779,
"learning_rate": 3.1910480255829237e-05,
"loss": 2.3136,
"step": 107
},
{
"epoch": 1.945945945945946,
"grad_norm": 0.300400972366333,
"learning_rate": 3.098143163339615e-05,
"loss": 2.2299,
"step": 108
},
{
"epoch": 1.9639639639639639,
"grad_norm": 0.1326655149459839,
"learning_rate": 3.0059997903369656e-05,
"loss": 1.2973,
"step": 109
},
{
"epoch": 1.981981981981982,
"grad_norm": 0.19319520890712738,
"learning_rate": 2.914654800091768e-05,
"loss": 1.0709,
"step": 110
},
{
"epoch": 2.0,
"grad_norm": 0.7764068245887756,
"learning_rate": 2.824144766454333e-05,
"loss": 2.9477,
"step": 111
},
{
"epoch": 2.018018018018018,
"grad_norm": 0.1426096111536026,
"learning_rate": 2.7345059289646008e-05,
"loss": 1.6133,
"step": 112
},
{
"epoch": 2.036036036036036,
"grad_norm": 0.19993425905704498,
"learning_rate": 2.6457741783420886e-05,
"loss": 2.1937,
"step": 113
},
{
"epoch": 2.054054054054054,
"grad_norm": 0.1575198620557785,
"learning_rate": 2.5579850421155293e-05,
"loss": 1.5695,
"step": 114
},
{
"epoch": 2.0720720720720722,
"grad_norm": 0.17316873371601105,
"learning_rate": 2.4711736703979018e-05,
"loss": 1.8888,
"step": 115
},
{
"epoch": 2.09009009009009,
"grad_norm": 0.1797063797712326,
"learning_rate": 2.3853748218126e-05,
"loss": 0.7036,
"step": 116
},
{
"epoch": 2.108108108108108,
"grad_norm": 0.17858926951885223,
"learning_rate": 2.3006228495763295e-05,
"loss": 0.4263,
"step": 117
},
{
"epoch": 2.126126126126126,
"grad_norm": 0.18058860301971436,
"learning_rate": 2.2169516877443485e-05,
"loss": 0.4322,
"step": 118
},
{
"epoch": 2.144144144144144,
"grad_norm": 0.18085725605487823,
"learning_rate": 2.1343948376235144e-05,
"loss": 1.5653,
"step": 119
},
{
"epoch": 2.1621621621621623,
"grad_norm": 0.2323240488767624,
"learning_rate": 2.052985354358622e-05,
"loss": 1.9138,
"step": 120
},
{
"epoch": 2.18018018018018,
"grad_norm": 0.24628163874149323,
"learning_rate": 1.9727558336973595e-05,
"loss": 1.7247,
"step": 121
},
{
"epoch": 2.1981981981981984,
"grad_norm": 0.2715443968772888,
"learning_rate": 1.8937383989392294e-05,
"loss": 1.701,
"step": 122
},
{
"epoch": 2.2162162162162162,
"grad_norm": 0.3751051723957062,
"learning_rate": 1.8159646880736036e-05,
"loss": 1.9297,
"step": 123
},
{
"epoch": 2.234234234234234,
"grad_norm": 0.45991936326026917,
"learning_rate": 1.739465841112125e-05,
"loss": 2.0372,
"step": 124
},
{
"epoch": 2.2522522522522523,
"grad_norm": 0.1275320202112198,
"learning_rate": 1.664272487620466e-05,
"loss": 1.4902,
"step": 125
},
{
"epoch": 2.27027027027027,
"grad_norm": 0.15491849184036255,
"learning_rate": 1.590414734454493e-05,
"loss": 2.1465,
"step": 126
},
{
"epoch": 2.2882882882882885,
"grad_norm": 0.16037805378437042,
"learning_rate": 1.517922153705692e-05,
"loss": 1.8524,
"step": 127
},
{
"epoch": 2.3063063063063063,
"grad_norm": 0.19121010601520538,
"learning_rate": 1.4468237708607397e-05,
"loss": 1.7979,
"step": 128
},
{
"epoch": 2.3243243243243246,
"grad_norm": 0.19223779439926147,
"learning_rate": 1.3771480531799052e-05,
"loss": 1.6058,
"step": 129
},
{
"epoch": 2.3423423423423424,
"grad_norm": 0.14566901326179504,
"learning_rate": 1.308922898298977e-05,
"loss": 0.3995,
"step": 130
},
{
"epoch": 2.3603603603603602,
"grad_norm": 0.16846461594104767,
"learning_rate": 1.2421756230592534e-05,
"loss": 0.4075,
"step": 131
},
{
"epoch": 2.3783783783783785,
"grad_norm": 0.1482275426387787,
"learning_rate": 1.1769329525700935e-05,
"loss": 0.4448,
"step": 132
},
{
"epoch": 2.3963963963963963,
"grad_norm": 0.24686311185359955,
"learning_rate": 1.1132210095083694e-05,
"loss": 1.5153,
"step": 133
},
{
"epoch": 2.4144144144144146,
"grad_norm": 0.2111864537000656,
"learning_rate": 1.0510653036591583e-05,
"loss": 1.839,
"step": 134
},
{
"epoch": 2.4324324324324325,
"grad_norm": 0.2346525639295578,
"learning_rate": 9.904907217018e-06,
"loss": 1.3728,
"step": 135
},
{
"epoch": 2.4504504504504503,
"grad_norm": 0.32989177107810974,
"learning_rate": 9.31521517245469e-06,
"loss": 2.1364,
"step": 136
},
{
"epoch": 2.4684684684684686,
"grad_norm": 0.47966253757476807,
"learning_rate": 8.741813011182014e-06,
"loss": 2.2386,
"step": 137
},
{
"epoch": 2.4864864864864864,
"grad_norm": 0.13256965577602386,
"learning_rate": 8.18493031913305e-06,
"loss": 1.5215,
"step": 138
},
{
"epoch": 2.5045045045045047,
"grad_norm": 0.1745923012495041,
"learning_rate": 7.644790067969005e-06,
"loss": 2.0639,
"step": 139
},
{
"epoch": 2.5225225225225225,
"grad_norm": 0.2047472447156906,
"learning_rate": 7.1216085258031414e-06,
"loss": 2.0853,
"step": 140
},
{
"epoch": 2.5405405405405403,
"grad_norm": 0.1631387323141098,
"learning_rate": 6.6155951706085405e-06,
"loss": 1.4938,
"step": 141
},
{
"epoch": 2.5585585585585586,
"grad_norm": 0.14580529928207397,
"learning_rate": 6.1269526063447765e-06,
"loss": 0.5077,
"step": 142
},
{
"epoch": 2.5765765765765765,
"grad_norm": 0.17946287989616394,
"learning_rate": 5.6558764818367195e-06,
"loss": 0.6124,
"step": 143
},
{
"epoch": 2.5945945945945947,
"grad_norm": 0.137393519282341,
"learning_rate": 5.2025554124383095e-06,
"loss": 0.4831,
"step": 144
},
{
"epoch": 2.6126126126126126,
"grad_norm": 0.19595730304718018,
"learning_rate": 4.767170904512292e-06,
"loss": 1.3957,
"step": 145
},
{
"epoch": 2.6306306306306304,
"grad_norm": 0.2703566551208496,
"learning_rate": 4.349897282756487e-06,
"loss": 2.0737,
"step": 146
},
{
"epoch": 2.6486486486486487,
"grad_norm": 0.2205105572938919,
"learning_rate": 3.95090162040545e-06,
"loss": 1.8425,
"step": 147
},
{
"epoch": 2.6666666666666665,
"grad_norm": 0.2561565041542053,
"learning_rate": 3.570343672335641e-06,
"loss": 1.7804,
"step": 148
},
{
"epoch": 2.684684684684685,
"grad_norm": 0.3843667805194855,
"learning_rate": 3.2083758111006945e-06,
"loss": 1.705,
"step": 149
},
{
"epoch": 2.7027027027027026,
"grad_norm": 0.4763340353965759,
"learning_rate": 2.86514296592269e-06,
"loss": 2.2889,
"step": 150
},
{
"epoch": 2.7027027027027026,
"eval_loss": 1.503947377204895,
"eval_runtime": 1.8948,
"eval_samples_per_second": 49.609,
"eval_steps_per_second": 12.666,
"step": 150
},
{
"epoch": 2.7207207207207205,
"grad_norm": 0.17292317748069763,
"learning_rate": 2.54078256466348e-06,
"loss": 1.8323,
"step": 151
},
{
"epoch": 2.7387387387387387,
"grad_norm": 0.17607711255550385,
"learning_rate": 2.2354244787996748e-06,
"loss": 2.1262,
"step": 152
},
{
"epoch": 2.756756756756757,
"grad_norm": 0.1533748358488083,
"learning_rate": 1.9491909714230204e-06,
"loss": 1.8804,
"step": 153
},
{
"epoch": 2.774774774774775,
"grad_norm": 0.16682887077331543,
"learning_rate": 1.6821966482872264e-06,
"loss": 1.7182,
"step": 154
},
{
"epoch": 2.7927927927927927,
"grad_norm": 0.16856834292411804,
"learning_rate": 1.434548411920622e-06,
"loss": 1.1743,
"step": 155
},
{
"epoch": 2.810810810810811,
"grad_norm": 0.13685716688632965,
"learning_rate": 1.206345418823235e-06,
"loss": 0.4568,
"step": 156
},
{
"epoch": 2.828828828828829,
"grad_norm": 0.1583159863948822,
"learning_rate": 9.976790397652315e-07,
"loss": 0.4332,
"step": 157
},
{
"epoch": 2.846846846846847,
"grad_norm": 0.20499488711357117,
"learning_rate": 8.086328232027873e-07,
"loss": 0.5182,
"step": 158
},
{
"epoch": 2.864864864864865,
"grad_norm": 0.22555167973041534,
"learning_rate": 6.392824618258519e-07,
"loss": 1.8121,
"step": 159
},
{
"epoch": 2.8828828828828827,
"grad_norm": 0.22527030110359192,
"learning_rate": 4.896957622514298e-07,
"loss": 1.6064,
"step": 160
},
{
"epoch": 2.900900900900901,
"grad_norm": 0.2471853345632553,
"learning_rate": 3.599326178742535e-07,
"loss": 1.5183,
"step": 161
},
{
"epoch": 2.918918918918919,
"grad_norm": 0.3479529321193695,
"learning_rate": 2.500449848859776e-07,
"loss": 1.9306,
"step": 162
},
{
"epoch": 2.936936936936937,
"grad_norm": 0.47280144691467285,
"learning_rate": 1.6007686147225254e-07,
"loss": 2.3789,
"step": 163
},
{
"epoch": 2.954954954954955,
"grad_norm": 0.1545385867357254,
"learning_rate": 9.006427019622176e-08,
"loss": 1.9687,
"step": 164
},
{
"epoch": 2.972972972972973,
"grad_norm": 0.15617170929908752,
"learning_rate": 4.0035243575342605e-08,
"loss": 0.727,
"step": 165
},
{
"epoch": 2.990990990990991,
"grad_norm": 0.20388685166835785,
"learning_rate": 1.0009812857370016e-08,
"loss": 1.3957,
"step": 166
},
{
"epoch": 3.009009009009009,
"grad_norm": 0.4190056324005127,
"learning_rate": 0.0,
"loss": 2.7015,
"step": 167
}
],
"logging_steps": 1,
"max_steps": 167,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.268381537599488e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}