farmery's picture
Training in progress, step 166, checkpoint
fa3662c verified
{
"best_metric": 1.8791134357452393,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 0.084942393642113,
"eval_steps": 25,
"global_step": 166,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.000511701166518753,
"grad_norm": 0.46404364705085754,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.9735,
"step": 1
},
{
"epoch": 0.000511701166518753,
"eval_loss": 4.42933464050293,
"eval_runtime": 1.5342,
"eval_samples_per_second": 32.589,
"eval_steps_per_second": 8.473,
"step": 1
},
{
"epoch": 0.001023402333037506,
"grad_norm": 0.8488529324531555,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.7654,
"step": 2
},
{
"epoch": 0.001535103499556259,
"grad_norm": 1.001631736755371,
"learning_rate": 5e-05,
"loss": 3.0228,
"step": 3
},
{
"epoch": 0.002046804666075012,
"grad_norm": 1.149634838104248,
"learning_rate": 6.666666666666667e-05,
"loss": 3.2201,
"step": 4
},
{
"epoch": 0.002558505832593765,
"grad_norm": 1.4123715162277222,
"learning_rate": 8.333333333333334e-05,
"loss": 3.3185,
"step": 5
},
{
"epoch": 0.003070206999112518,
"grad_norm": 1.9225883483886719,
"learning_rate": 0.0001,
"loss": 3.4594,
"step": 6
},
{
"epoch": 0.0035819081656312713,
"grad_norm": 1.8795751333236694,
"learning_rate": 9.999132582169292e-05,
"loss": 3.501,
"step": 7
},
{
"epoch": 0.004093609332150024,
"grad_norm": 2.5900070667266846,
"learning_rate": 9.996530663083255e-05,
"loss": 3.7938,
"step": 8
},
{
"epoch": 0.004605310498668778,
"grad_norm": 2.747680187225342,
"learning_rate": 9.992195245831223e-05,
"loss": 3.7944,
"step": 9
},
{
"epoch": 0.00511701166518753,
"grad_norm": 2.346953868865967,
"learning_rate": 9.986128001799077e-05,
"loss": 3.8185,
"step": 10
},
{
"epoch": 0.005628712831706284,
"grad_norm": 2.9237945079803467,
"learning_rate": 9.978331270024886e-05,
"loss": 3.9366,
"step": 11
},
{
"epoch": 0.006140413998225036,
"grad_norm": 3.622086524963379,
"learning_rate": 9.96880805629717e-05,
"loss": 4.2799,
"step": 12
},
{
"epoch": 0.00665211516474379,
"grad_norm": 2.3093104362487793,
"learning_rate": 9.957562031996097e-05,
"loss": 1.8889,
"step": 13
},
{
"epoch": 0.0071638163312625425,
"grad_norm": 3.3800673484802246,
"learning_rate": 9.94459753267812e-05,
"loss": 2.3106,
"step": 14
},
{
"epoch": 0.007675517497781296,
"grad_norm": 3.375108242034912,
"learning_rate": 9.929919556404513e-05,
"loss": 2.3937,
"step": 15
},
{
"epoch": 0.008187218664300049,
"grad_norm": 2.4301416873931885,
"learning_rate": 9.913533761814537e-05,
"loss": 2.4152,
"step": 16
},
{
"epoch": 0.008698919830818802,
"grad_norm": 1.324080228805542,
"learning_rate": 9.895446465943926e-05,
"loss": 2.3565,
"step": 17
},
{
"epoch": 0.009210620997337556,
"grad_norm": 1.267471194267273,
"learning_rate": 9.875664641789545e-05,
"loss": 2.3992,
"step": 18
},
{
"epoch": 0.009722322163856307,
"grad_norm": 1.367044448852539,
"learning_rate": 9.85419591562117e-05,
"loss": 2.4876,
"step": 19
},
{
"epoch": 0.01023402333037506,
"grad_norm": 1.5106232166290283,
"learning_rate": 9.831048564041413e-05,
"loss": 2.6029,
"step": 20
},
{
"epoch": 0.010745724496893814,
"grad_norm": 1.4716838598251343,
"learning_rate": 9.80623151079494e-05,
"loss": 2.6521,
"step": 21
},
{
"epoch": 0.011257425663412568,
"grad_norm": 1.2971686124801636,
"learning_rate": 9.779754323328192e-05,
"loss": 2.6698,
"step": 22
},
{
"epoch": 0.01176912682993132,
"grad_norm": 1.4295047521591187,
"learning_rate": 9.751627209100952e-05,
"loss": 2.8462,
"step": 23
},
{
"epoch": 0.012280827996450073,
"grad_norm": 1.995835781097412,
"learning_rate": 9.72186101165118e-05,
"loss": 3.1823,
"step": 24
},
{
"epoch": 0.012792529162968826,
"grad_norm": 3.6110293865203857,
"learning_rate": 9.690467206414616e-05,
"loss": 3.855,
"step": 25
},
{
"epoch": 0.012792529162968826,
"eval_loss": 2.3177919387817383,
"eval_runtime": 1.5336,
"eval_samples_per_second": 32.603,
"eval_steps_per_second": 8.477,
"step": 25
},
{
"epoch": 0.01330423032948758,
"grad_norm": 0.8223591446876526,
"learning_rate": 9.657457896300791e-05,
"loss": 1.6149,
"step": 26
},
{
"epoch": 0.013815931496006332,
"grad_norm": 1.0949565172195435,
"learning_rate": 9.622845807027113e-05,
"loss": 2.0488,
"step": 27
},
{
"epoch": 0.014327632662525085,
"grad_norm": 1.1367841958999634,
"learning_rate": 9.586644282212866e-05,
"loss": 2.1151,
"step": 28
},
{
"epoch": 0.014839333829043838,
"grad_norm": 0.9595118761062622,
"learning_rate": 9.548867278234998e-05,
"loss": 2.1702,
"step": 29
},
{
"epoch": 0.015351034995562592,
"grad_norm": 0.8941548466682434,
"learning_rate": 9.509529358847655e-05,
"loss": 2.2371,
"step": 30
},
{
"epoch": 0.015862736162081344,
"grad_norm": 0.9041470289230347,
"learning_rate": 9.468645689567598e-05,
"loss": 2.2612,
"step": 31
},
{
"epoch": 0.016374437328600097,
"grad_norm": 0.9832100868225098,
"learning_rate": 9.426232031827588e-05,
"loss": 2.3107,
"step": 32
},
{
"epoch": 0.01688613849511885,
"grad_norm": 1.1351114511489868,
"learning_rate": 9.382304736900063e-05,
"loss": 2.3543,
"step": 33
},
{
"epoch": 0.017397839661637604,
"grad_norm": 1.2356266975402832,
"learning_rate": 9.336880739593416e-05,
"loss": 2.4014,
"step": 34
},
{
"epoch": 0.017909540828156358,
"grad_norm": 1.3643877506256104,
"learning_rate": 9.28997755172329e-05,
"loss": 2.5325,
"step": 35
},
{
"epoch": 0.01842124199467511,
"grad_norm": 1.4238357543945312,
"learning_rate": 9.241613255361455e-05,
"loss": 2.75,
"step": 36
},
{
"epoch": 0.018932943161193865,
"grad_norm": 1.6282374858856201,
"learning_rate": 9.191806495864812e-05,
"loss": 2.8755,
"step": 37
},
{
"epoch": 0.019444644327712615,
"grad_norm": 0.45179080963134766,
"learning_rate": 9.140576474687264e-05,
"loss": 1.6157,
"step": 38
},
{
"epoch": 0.019956345494231368,
"grad_norm": 0.4603005051612854,
"learning_rate": 9.087942941977182e-05,
"loss": 1.8998,
"step": 39
},
{
"epoch": 0.02046804666075012,
"grad_norm": 0.5871985554695129,
"learning_rate": 9.033926188963352e-05,
"loss": 2.0722,
"step": 40
},
{
"epoch": 0.020979747827268875,
"grad_norm": 0.602536141872406,
"learning_rate": 8.978547040132317e-05,
"loss": 2.054,
"step": 41
},
{
"epoch": 0.02149144899378763,
"grad_norm": 0.72366863489151,
"learning_rate": 8.921826845200139e-05,
"loss": 2.1352,
"step": 42
},
{
"epoch": 0.022003150160306382,
"grad_norm": 0.7915085554122925,
"learning_rate": 8.863787470881686e-05,
"loss": 2.0386,
"step": 43
},
{
"epoch": 0.022514851326825135,
"grad_norm": 0.8590556979179382,
"learning_rate": 8.804451292460585e-05,
"loss": 2.1996,
"step": 44
},
{
"epoch": 0.02302655249334389,
"grad_norm": 0.9212091565132141,
"learning_rate": 8.743841185163136e-05,
"loss": 2.2756,
"step": 45
},
{
"epoch": 0.02353825365986264,
"grad_norm": 0.9804298281669617,
"learning_rate": 8.681980515339464e-05,
"loss": 2.295,
"step": 46
},
{
"epoch": 0.024049954826381392,
"grad_norm": 1.0321789979934692,
"learning_rate": 8.618893131455363e-05,
"loss": 2.3681,
"step": 47
},
{
"epoch": 0.024561655992900146,
"grad_norm": 1.3598474264144897,
"learning_rate": 8.554603354898238e-05,
"loss": 2.4684,
"step": 48
},
{
"epoch": 0.0250733571594189,
"grad_norm": 1.5326489210128784,
"learning_rate": 8.489135970600768e-05,
"loss": 2.6954,
"step": 49
},
{
"epoch": 0.025585058325937653,
"grad_norm": 2.6125943660736084,
"learning_rate": 8.422516217485826e-05,
"loss": 3.1982,
"step": 50
},
{
"epoch": 0.025585058325937653,
"eval_loss": 2.026942729949951,
"eval_runtime": 1.535,
"eval_samples_per_second": 32.573,
"eval_steps_per_second": 8.469,
"step": 50
},
{
"epoch": 0.026096759492456406,
"grad_norm": 0.4928167164325714,
"learning_rate": 8.354769778736406e-05,
"loss": 1.5405,
"step": 51
},
{
"epoch": 0.02660846065897516,
"grad_norm": 0.6747775673866272,
"learning_rate": 8.285922771894254e-05,
"loss": 1.8902,
"step": 52
},
{
"epoch": 0.027120161825493913,
"grad_norm": 0.761574923992157,
"learning_rate": 8.216001738791072e-05,
"loss": 2.0376,
"step": 53
},
{
"epoch": 0.027631862992012663,
"grad_norm": 0.7025596499443054,
"learning_rate": 8.14503363531613e-05,
"loss": 2.0866,
"step": 54
},
{
"epoch": 0.028143564158531417,
"grad_norm": 0.7048724889755249,
"learning_rate": 8.073045821024256e-05,
"loss": 2.0865,
"step": 55
},
{
"epoch": 0.02865526532505017,
"grad_norm": 0.8263847231864929,
"learning_rate": 8.000066048588211e-05,
"loss": 2.0262,
"step": 56
},
{
"epoch": 0.029166966491568923,
"grad_norm": 0.8577947616577148,
"learning_rate": 7.926122453099503e-05,
"loss": 2.1969,
"step": 57
},
{
"epoch": 0.029678667658087677,
"grad_norm": 1.095415711402893,
"learning_rate": 7.85124354122177e-05,
"loss": 2.2191,
"step": 58
},
{
"epoch": 0.03019036882460643,
"grad_norm": 1.0914173126220703,
"learning_rate": 7.775458180200932e-05,
"loss": 2.2533,
"step": 59
},
{
"epoch": 0.030702069991125184,
"grad_norm": 1.104080080986023,
"learning_rate": 7.698795586736298e-05,
"loss": 2.3613,
"step": 60
},
{
"epoch": 0.031213771157643937,
"grad_norm": 1.2872527837753296,
"learning_rate": 7.62128531571699e-05,
"loss": 2.4725,
"step": 61
},
{
"epoch": 0.03172547232416269,
"grad_norm": 1.6460251808166504,
"learning_rate": 7.542957248827961e-05,
"loss": 2.7487,
"step": 62
},
{
"epoch": 0.03223717349068144,
"grad_norm": 0.5120904445648193,
"learning_rate": 7.46384158303004e-05,
"loss": 1.4998,
"step": 63
},
{
"epoch": 0.032748874657200194,
"grad_norm": 0.5734795928001404,
"learning_rate": 7.383968818918426e-05,
"loss": 1.8322,
"step": 64
},
{
"epoch": 0.03326057582371895,
"grad_norm": 0.6029512286186218,
"learning_rate": 7.303369748964134e-05,
"loss": 1.9155,
"step": 65
},
{
"epoch": 0.0337722769902377,
"grad_norm": 0.6455990076065063,
"learning_rate": 7.222075445642904e-05,
"loss": 2.0008,
"step": 66
},
{
"epoch": 0.034283978156756455,
"grad_norm": 0.6918196082115173,
"learning_rate": 7.140117249456176e-05,
"loss": 2.0541,
"step": 67
},
{
"epoch": 0.03479567932327521,
"grad_norm": 0.7618705630302429,
"learning_rate": 7.057526756848719e-05,
"loss": 2.062,
"step": 68
},
{
"epoch": 0.03530738048979396,
"grad_norm": 0.7683627605438232,
"learning_rate": 6.97433580802762e-05,
"loss": 2.0749,
"step": 69
},
{
"epoch": 0.035819081656312715,
"grad_norm": 0.8340547680854797,
"learning_rate": 6.890576474687263e-05,
"loss": 2.108,
"step": 70
},
{
"epoch": 0.03633078282283147,
"grad_norm": 0.9197708368301392,
"learning_rate": 6.80628104764508e-05,
"loss": 2.1471,
"step": 71
},
{
"epoch": 0.03684248398935022,
"grad_norm": 1.2023193836212158,
"learning_rate": 6.721482024392835e-05,
"loss": 2.2415,
"step": 72
},
{
"epoch": 0.037354185155868976,
"grad_norm": 1.1722882986068726,
"learning_rate": 6.63621209656821e-05,
"loss": 2.3475,
"step": 73
},
{
"epoch": 0.03786588632238773,
"grad_norm": 1.4040749073028564,
"learning_rate": 6.550504137351576e-05,
"loss": 2.6166,
"step": 74
},
{
"epoch": 0.038377587488906476,
"grad_norm": 2.434393882751465,
"learning_rate": 6.46439118879273e-05,
"loss": 2.8818,
"step": 75
},
{
"epoch": 0.038377587488906476,
"eval_loss": 1.9559780359268188,
"eval_runtime": 1.5335,
"eval_samples_per_second": 32.605,
"eval_steps_per_second": 8.477,
"step": 75
},
{
"epoch": 0.03888928865542523,
"grad_norm": 0.4053546190261841,
"learning_rate": 6.377906449072578e-05,
"loss": 1.5548,
"step": 76
},
{
"epoch": 0.03940098982194398,
"grad_norm": 0.550913393497467,
"learning_rate": 6.291083259704596e-05,
"loss": 1.8655,
"step": 77
},
{
"epoch": 0.039912690988462736,
"grad_norm": 0.5964261889457703,
"learning_rate": 6.203955092681039e-05,
"loss": 1.9327,
"step": 78
},
{
"epoch": 0.04042439215498149,
"grad_norm": 0.7430030107498169,
"learning_rate": 6.116555537568857e-05,
"loss": 2.0133,
"step": 79
},
{
"epoch": 0.04093609332150024,
"grad_norm": 0.7743514180183411,
"learning_rate": 6.0289182885602704e-05,
"loss": 2.0308,
"step": 80
},
{
"epoch": 0.041447794488018996,
"grad_norm": 0.8544523119926453,
"learning_rate": 5.941077131483025e-05,
"loss": 2.0096,
"step": 81
},
{
"epoch": 0.04195949565453775,
"grad_norm": 0.7836202383041382,
"learning_rate": 5.8530659307753036e-05,
"loss": 2.0285,
"step": 82
},
{
"epoch": 0.0424711968210565,
"grad_norm": 0.9040354490280151,
"learning_rate": 5.7649186164303506e-05,
"loss": 2.0367,
"step": 83
},
{
"epoch": 0.04298289798757526,
"grad_norm": 0.9767117500305176,
"learning_rate": 5.6766691709158096e-05,
"loss": 2.1512,
"step": 84
},
{
"epoch": 0.04349459915409401,
"grad_norm": 1.108844518661499,
"learning_rate": 5.5883516160728274e-05,
"loss": 2.2661,
"step": 85
},
{
"epoch": 0.044006300320612764,
"grad_norm": 1.3114573955535889,
"learning_rate": 5.500000000000001e-05,
"loss": 2.374,
"step": 86
},
{
"epoch": 0.04451800148713152,
"grad_norm": 1.5609687566757202,
"learning_rate": 5.4116483839271736e-05,
"loss": 2.6349,
"step": 87
},
{
"epoch": 0.04502970265365027,
"grad_norm": 0.42394495010375977,
"learning_rate": 5.3233308290841935e-05,
"loss": 1.4049,
"step": 88
},
{
"epoch": 0.045541403820169024,
"grad_norm": 0.524941086769104,
"learning_rate": 5.235081383569649e-05,
"loss": 1.8021,
"step": 89
},
{
"epoch": 0.04605310498668778,
"grad_norm": 0.7207713723182678,
"learning_rate": 5.1469340692246995e-05,
"loss": 1.8878,
"step": 90
},
{
"epoch": 0.046564806153206524,
"grad_norm": 0.6740505695343018,
"learning_rate": 5.058922868516978e-05,
"loss": 1.9676,
"step": 91
},
{
"epoch": 0.04707650731972528,
"grad_norm": 0.71189945936203,
"learning_rate": 4.9710817114397314e-05,
"loss": 1.9829,
"step": 92
},
{
"epoch": 0.04758820848624403,
"grad_norm": 0.6990759372711182,
"learning_rate": 4.883444462431145e-05,
"loss": 1.9317,
"step": 93
},
{
"epoch": 0.048099909652762785,
"grad_norm": 0.7979338765144348,
"learning_rate": 4.7960449073189606e-05,
"loss": 1.9789,
"step": 94
},
{
"epoch": 0.04861161081928154,
"grad_norm": 0.8845726847648621,
"learning_rate": 4.708916740295405e-05,
"loss": 2.0438,
"step": 95
},
{
"epoch": 0.04912331198580029,
"grad_norm": 0.9462182521820068,
"learning_rate": 4.6220935509274235e-05,
"loss": 2.0737,
"step": 96
},
{
"epoch": 0.049635013152319045,
"grad_norm": 1.0222724676132202,
"learning_rate": 4.535608811207272e-05,
"loss": 2.0986,
"step": 97
},
{
"epoch": 0.0501467143188378,
"grad_norm": 1.254888892173767,
"learning_rate": 4.4494958626484276e-05,
"loss": 2.3094,
"step": 98
},
{
"epoch": 0.05065841548535655,
"grad_norm": 1.4137556552886963,
"learning_rate": 4.36378790343179e-05,
"loss": 2.4861,
"step": 99
},
{
"epoch": 0.051170116651875305,
"grad_norm": 2.0688607692718506,
"learning_rate": 4.278517975607167e-05,
"loss": 2.9678,
"step": 100
},
{
"epoch": 0.051170116651875305,
"eval_loss": 1.9229816198349,
"eval_runtime": 1.5335,
"eval_samples_per_second": 32.605,
"eval_steps_per_second": 8.477,
"step": 100
},
{
"epoch": 0.05168181781839406,
"grad_norm": 0.40056556463241577,
"learning_rate": 4.19371895235492e-05,
"loss": 1.532,
"step": 101
},
{
"epoch": 0.05219351898491281,
"grad_norm": 0.6019428372383118,
"learning_rate": 4.109423525312738e-05,
"loss": 1.8149,
"step": 102
},
{
"epoch": 0.052705220151431566,
"grad_norm": 0.7042158842086792,
"learning_rate": 4.0256641919723806e-05,
"loss": 1.8967,
"step": 103
},
{
"epoch": 0.05321692131795032,
"grad_norm": 0.725395917892456,
"learning_rate": 3.942473243151281e-05,
"loss": 1.939,
"step": 104
},
{
"epoch": 0.05372862248446907,
"grad_norm": 0.8081313371658325,
"learning_rate": 3.859882750543826e-05,
"loss": 2.0155,
"step": 105
},
{
"epoch": 0.054240323650987826,
"grad_norm": 0.7761573195457458,
"learning_rate": 3.777924554357096e-05,
"loss": 2.0242,
"step": 106
},
{
"epoch": 0.05475202481750658,
"grad_norm": 0.7627308368682861,
"learning_rate": 3.6966302510358666e-05,
"loss": 2.0408,
"step": 107
},
{
"epoch": 0.055263725984025326,
"grad_norm": 0.954046905040741,
"learning_rate": 3.616031181081575e-05,
"loss": 2.0613,
"step": 108
},
{
"epoch": 0.05577542715054408,
"grad_norm": 1.0556938648223877,
"learning_rate": 3.53615841696996e-05,
"loss": 2.1036,
"step": 109
},
{
"epoch": 0.05628712831706283,
"grad_norm": 1.131365418434143,
"learning_rate": 3.45704275117204e-05,
"loss": 2.1926,
"step": 110
},
{
"epoch": 0.05679882948358159,
"grad_norm": 1.4441267251968384,
"learning_rate": 3.378714684283011e-05,
"loss": 2.3588,
"step": 111
},
{
"epoch": 0.05731053065010034,
"grad_norm": 1.711294412612915,
"learning_rate": 3.301204413263704e-05,
"loss": 2.615,
"step": 112
},
{
"epoch": 0.057822231816619094,
"grad_norm": 0.3940889239311218,
"learning_rate": 3.224541819799071e-05,
"loss": 1.4173,
"step": 113
},
{
"epoch": 0.05833393298313785,
"grad_norm": 0.5546037554740906,
"learning_rate": 3.1487564587782306e-05,
"loss": 1.6893,
"step": 114
},
{
"epoch": 0.0588456341496566,
"grad_norm": 0.6007658243179321,
"learning_rate": 3.0738775469004985e-05,
"loss": 1.8685,
"step": 115
},
{
"epoch": 0.059357335316175354,
"grad_norm": 0.675112247467041,
"learning_rate": 2.9999339514117912e-05,
"loss": 1.9225,
"step": 116
},
{
"epoch": 0.05986903648269411,
"grad_norm": 0.7189555168151855,
"learning_rate": 2.926954178975746e-05,
"loss": 1.9709,
"step": 117
},
{
"epoch": 0.06038073764921286,
"grad_norm": 0.7457597255706787,
"learning_rate": 2.854966364683872e-05,
"loss": 1.9753,
"step": 118
},
{
"epoch": 0.060892438815731614,
"grad_norm": 0.869260847568512,
"learning_rate": 2.783998261208929e-05,
"loss": 2.0098,
"step": 119
},
{
"epoch": 0.06140413998225037,
"grad_norm": 0.9872080087661743,
"learning_rate": 2.7140772281057468e-05,
"loss": 2.0447,
"step": 120
},
{
"epoch": 0.06191584114876912,
"grad_norm": 1.0225969552993774,
"learning_rate": 2.645230221263596e-05,
"loss": 2.0648,
"step": 121
},
{
"epoch": 0.062427542315287875,
"grad_norm": 1.0745333433151245,
"learning_rate": 2.577483782514174e-05,
"loss": 2.1066,
"step": 122
},
{
"epoch": 0.06293924348180663,
"grad_norm": 1.2674683332443237,
"learning_rate": 2.5108640293992337e-05,
"loss": 2.2943,
"step": 123
},
{
"epoch": 0.06345094464832537,
"grad_norm": 1.3520656824111938,
"learning_rate": 2.445396645101762e-05,
"loss": 2.3662,
"step": 124
},
{
"epoch": 0.06396264581484414,
"grad_norm": 1.9982293844223022,
"learning_rate": 2.3811068685446395e-05,
"loss": 2.8426,
"step": 125
},
{
"epoch": 0.06396264581484414,
"eval_loss": 1.8993180990219116,
"eval_runtime": 1.5346,
"eval_samples_per_second": 32.582,
"eval_steps_per_second": 8.471,
"step": 125
},
{
"epoch": 0.06447434698136288,
"grad_norm": 0.36793649196624756,
"learning_rate": 2.3180194846605367e-05,
"loss": 1.4921,
"step": 126
},
{
"epoch": 0.06498604814788164,
"grad_norm": 0.5427601337432861,
"learning_rate": 2.2561588148368657e-05,
"loss": 1.8148,
"step": 127
},
{
"epoch": 0.06549774931440039,
"grad_norm": 0.6203567385673523,
"learning_rate": 2.195548707539416e-05,
"loss": 1.8883,
"step": 128
},
{
"epoch": 0.06600945048091915,
"grad_norm": 0.6660148501396179,
"learning_rate": 2.136212529118314e-05,
"loss": 1.9282,
"step": 129
},
{
"epoch": 0.0665211516474379,
"grad_norm": 0.7544979453086853,
"learning_rate": 2.0781731547998614e-05,
"loss": 1.9454,
"step": 130
},
{
"epoch": 0.06703285281395666,
"grad_norm": 0.7683319449424744,
"learning_rate": 2.0214529598676836e-05,
"loss": 1.9783,
"step": 131
},
{
"epoch": 0.0675445539804754,
"grad_norm": 0.8448789715766907,
"learning_rate": 1.966073811036649e-05,
"loss": 1.9902,
"step": 132
},
{
"epoch": 0.06805625514699415,
"grad_norm": 0.9101701378822327,
"learning_rate": 1.9120570580228186e-05,
"loss": 2.0101,
"step": 133
},
{
"epoch": 0.06856795631351291,
"grad_norm": 1.002808928489685,
"learning_rate": 1.8594235253127375e-05,
"loss": 2.1164,
"step": 134
},
{
"epoch": 0.06907965748003166,
"grad_norm": 1.1789017915725708,
"learning_rate": 1.8081935041351887e-05,
"loss": 2.2391,
"step": 135
},
{
"epoch": 0.06959135864655042,
"grad_norm": 1.4245861768722534,
"learning_rate": 1.758386744638546e-05,
"loss": 2.3897,
"step": 136
},
{
"epoch": 0.07010305981306916,
"grad_norm": 1.6070505380630493,
"learning_rate": 1.7100224482767114e-05,
"loss": 2.5421,
"step": 137
},
{
"epoch": 0.07061476097958792,
"grad_norm": 0.4116235673427582,
"learning_rate": 1.6631192604065855e-05,
"loss": 1.4141,
"step": 138
},
{
"epoch": 0.07112646214610667,
"grad_norm": 0.5044823288917542,
"learning_rate": 1.617695263099937e-05,
"loss": 1.8181,
"step": 139
},
{
"epoch": 0.07163816331262543,
"grad_norm": 0.5927360653877258,
"learning_rate": 1.573767968172413e-05,
"loss": 1.8575,
"step": 140
},
{
"epoch": 0.07214986447914418,
"grad_norm": 0.6108676791191101,
"learning_rate": 1.531354310432403e-05,
"loss": 1.9401,
"step": 141
},
{
"epoch": 0.07266156564566294,
"grad_norm": 0.6655164361000061,
"learning_rate": 1.490470641152345e-05,
"loss": 1.9377,
"step": 142
},
{
"epoch": 0.07317326681218168,
"grad_norm": 0.758387565612793,
"learning_rate": 1.4511327217650046e-05,
"loss": 1.9332,
"step": 143
},
{
"epoch": 0.07368496797870044,
"grad_norm": 0.8023284673690796,
"learning_rate": 1.413355717787134e-05,
"loss": 1.9804,
"step": 144
},
{
"epoch": 0.07419666914521919,
"grad_norm": 0.8641353249549866,
"learning_rate": 1.3771541929728886e-05,
"loss": 1.9505,
"step": 145
},
{
"epoch": 0.07470837031173795,
"grad_norm": 1.0415940284729004,
"learning_rate": 1.3425421036992098e-05,
"loss": 2.0321,
"step": 146
},
{
"epoch": 0.0752200714782567,
"grad_norm": 1.1581557989120483,
"learning_rate": 1.309532793585384e-05,
"loss": 2.0956,
"step": 147
},
{
"epoch": 0.07573177264477546,
"grad_norm": 1.1987535953521729,
"learning_rate": 1.2781389883488218e-05,
"loss": 2.2152,
"step": 148
},
{
"epoch": 0.0762434738112942,
"grad_norm": 1.6775120496749878,
"learning_rate": 1.248372790899049e-05,
"loss": 2.586,
"step": 149
},
{
"epoch": 0.07675517497781295,
"grad_norm": 2.6760313510894775,
"learning_rate": 1.2202456766718093e-05,
"loss": 3.0532,
"step": 150
},
{
"epoch": 0.07675517497781295,
"eval_loss": 1.8791134357452393,
"eval_runtime": 1.5354,
"eval_samples_per_second": 32.565,
"eval_steps_per_second": 8.467,
"step": 150
},
{
"epoch": 0.07726687614433171,
"grad_norm": 0.33062976598739624,
"learning_rate": 1.1937684892050604e-05,
"loss": 1.3588,
"step": 151
},
{
"epoch": 0.07777857731085046,
"grad_norm": 0.5277450084686279,
"learning_rate": 1.168951435958588e-05,
"loss": 1.7291,
"step": 152
},
{
"epoch": 0.07829027847736922,
"grad_norm": 0.5764089226722717,
"learning_rate": 1.1458040843788312e-05,
"loss": 1.8712,
"step": 153
},
{
"epoch": 0.07880197964388796,
"grad_norm": 0.6534531712532043,
"learning_rate": 1.1243353582104556e-05,
"loss": 1.9575,
"step": 154
},
{
"epoch": 0.07931368081040673,
"grad_norm": 0.7147605419158936,
"learning_rate": 1.1045535340560744e-05,
"loss": 1.9211,
"step": 155
},
{
"epoch": 0.07982538197692547,
"grad_norm": 0.7351657748222351,
"learning_rate": 1.0864662381854632e-05,
"loss": 1.9539,
"step": 156
},
{
"epoch": 0.08033708314344423,
"grad_norm": 0.8152849674224854,
"learning_rate": 1.070080443595488e-05,
"loss": 2.0232,
"step": 157
},
{
"epoch": 0.08084878430996298,
"grad_norm": 0.9168938398361206,
"learning_rate": 1.0554024673218807e-05,
"loss": 1.9743,
"step": 158
},
{
"epoch": 0.08136048547648174,
"grad_norm": 1.0510733127593994,
"learning_rate": 1.0424379680039025e-05,
"loss": 2.0778,
"step": 159
},
{
"epoch": 0.08187218664300049,
"grad_norm": 1.2245820760726929,
"learning_rate": 1.0311919437028318e-05,
"loss": 2.2151,
"step": 160
},
{
"epoch": 0.08238388780951925,
"grad_norm": 1.3239753246307373,
"learning_rate": 1.0216687299751144e-05,
"loss": 2.2195,
"step": 161
},
{
"epoch": 0.08289558897603799,
"grad_norm": 1.7548948526382446,
"learning_rate": 1.0138719982009242e-05,
"loss": 2.5937,
"step": 162
},
{
"epoch": 0.08340729014255675,
"grad_norm": 0.4118451774120331,
"learning_rate": 1.007804754168779e-05,
"loss": 1.4343,
"step": 163
},
{
"epoch": 0.0839189913090755,
"grad_norm": 0.5209285616874695,
"learning_rate": 1.003469336916747e-05,
"loss": 1.7331,
"step": 164
},
{
"epoch": 0.08443069247559425,
"grad_norm": 0.5564490556716919,
"learning_rate": 1.0008674178307085e-05,
"loss": 1.8639,
"step": 165
},
{
"epoch": 0.084942393642113,
"grad_norm": 0.6351719498634338,
"learning_rate": 1e-05,
"loss": 1.9287,
"step": 166
}
],
"logging_steps": 1,
"max_steps": 166,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 30,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.944151006450811e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}