gemma2-9b-it-1epoch / checkpoint-198 /trainer_state.json
sssy95's picture
Upload 16 files
3a2600e verified
raw
history blame
35.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 40,
"global_step": 198,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005050505050505051,
"grad_norm": 1.934841513633728,
"learning_rate": 2e-05,
"loss": 2.149,
"step": 1
},
{
"epoch": 0.010101010101010102,
"grad_norm": 2.4779138565063477,
"learning_rate": 4e-05,
"loss": 2.7234,
"step": 2
},
{
"epoch": 0.015151515151515152,
"grad_norm": 3.3968937397003174,
"learning_rate": 6e-05,
"loss": 2.9434,
"step": 3
},
{
"epoch": 0.020202020202020204,
"grad_norm": 2.6924426555633545,
"learning_rate": 8e-05,
"loss": 2.9018,
"step": 4
},
{
"epoch": 0.025252525252525252,
"grad_norm": 2.9207985401153564,
"learning_rate": 0.0001,
"loss": 2.8698,
"step": 5
},
{
"epoch": 0.030303030303030304,
"grad_norm": 2.5849769115448,
"learning_rate": 0.00012,
"loss": 2.6004,
"step": 6
},
{
"epoch": 0.03535353535353535,
"grad_norm": 2.265801191329956,
"learning_rate": 0.00014,
"loss": 1.9435,
"step": 7
},
{
"epoch": 0.04040404040404041,
"grad_norm": 2.9821760654449463,
"learning_rate": 0.00016,
"loss": 2.3524,
"step": 8
},
{
"epoch": 0.045454545454545456,
"grad_norm": 2.209989547729492,
"learning_rate": 0.00018,
"loss": 1.7031,
"step": 9
},
{
"epoch": 0.050505050505050504,
"grad_norm": 2.4582831859588623,
"learning_rate": 0.0002,
"loss": 1.858,
"step": 10
},
{
"epoch": 0.05555555555555555,
"grad_norm": 2.465235471725464,
"learning_rate": 0.00019893617021276595,
"loss": 1.9792,
"step": 11
},
{
"epoch": 0.06060606060606061,
"grad_norm": 2.106245756149292,
"learning_rate": 0.00019787234042553193,
"loss": 1.6035,
"step": 12
},
{
"epoch": 0.06565656565656566,
"grad_norm": 2.0670032501220703,
"learning_rate": 0.00019680851063829787,
"loss": 1.4089,
"step": 13
},
{
"epoch": 0.0707070707070707,
"grad_norm": 1.869535207748413,
"learning_rate": 0.00019574468085106384,
"loss": 1.5282,
"step": 14
},
{
"epoch": 0.07575757575757576,
"grad_norm": 2.4417788982391357,
"learning_rate": 0.00019468085106382982,
"loss": 2.0049,
"step": 15
},
{
"epoch": 0.08080808080808081,
"grad_norm": 2.03873610496521,
"learning_rate": 0.00019361702127659576,
"loss": 1.5979,
"step": 16
},
{
"epoch": 0.08585858585858586,
"grad_norm": 2.6959407329559326,
"learning_rate": 0.0001925531914893617,
"loss": 1.7192,
"step": 17
},
{
"epoch": 0.09090909090909091,
"grad_norm": 2.447222948074341,
"learning_rate": 0.00019148936170212768,
"loss": 1.9945,
"step": 18
},
{
"epoch": 0.09595959595959595,
"grad_norm": 2.0988619327545166,
"learning_rate": 0.00019042553191489362,
"loss": 1.5905,
"step": 19
},
{
"epoch": 0.10101010101010101,
"grad_norm": 2.3013782501220703,
"learning_rate": 0.00018936170212765957,
"loss": 1.5003,
"step": 20
},
{
"epoch": 0.10606060606060606,
"grad_norm": 2.080165147781372,
"learning_rate": 0.00018829787234042554,
"loss": 1.4736,
"step": 21
},
{
"epoch": 0.1111111111111111,
"grad_norm": 4.3709940910339355,
"learning_rate": 0.0001872340425531915,
"loss": 0.9107,
"step": 22
},
{
"epoch": 0.11616161616161616,
"grad_norm": 5.782566070556641,
"learning_rate": 0.00018617021276595746,
"loss": 1.3649,
"step": 23
},
{
"epoch": 0.12121212121212122,
"grad_norm": 2.43636417388916,
"learning_rate": 0.0001851063829787234,
"loss": 1.2051,
"step": 24
},
{
"epoch": 0.12626262626262627,
"grad_norm": 2.063096761703491,
"learning_rate": 0.00018404255319148937,
"loss": 1.1806,
"step": 25
},
{
"epoch": 0.13131313131313133,
"grad_norm": 2.052663564682007,
"learning_rate": 0.00018297872340425532,
"loss": 1.1073,
"step": 26
},
{
"epoch": 0.13636363636363635,
"grad_norm": 2.293409585952759,
"learning_rate": 0.0001819148936170213,
"loss": 1.3441,
"step": 27
},
{
"epoch": 0.1414141414141414,
"grad_norm": 2.016857147216797,
"learning_rate": 0.00018085106382978726,
"loss": 1.1819,
"step": 28
},
{
"epoch": 0.14646464646464646,
"grad_norm": 2.0556704998016357,
"learning_rate": 0.0001797872340425532,
"loss": 1.2543,
"step": 29
},
{
"epoch": 0.15151515151515152,
"grad_norm": 2.157916784286499,
"learning_rate": 0.00017872340425531915,
"loss": 1.1889,
"step": 30
},
{
"epoch": 0.15656565656565657,
"grad_norm": 2.284308671951294,
"learning_rate": 0.00017765957446808512,
"loss": 1.3205,
"step": 31
},
{
"epoch": 0.16161616161616163,
"grad_norm": 2.55147123336792,
"learning_rate": 0.00017659574468085107,
"loss": 1.1276,
"step": 32
},
{
"epoch": 0.16666666666666666,
"grad_norm": 2.595245599746704,
"learning_rate": 0.000175531914893617,
"loss": 1.2703,
"step": 33
},
{
"epoch": 0.1717171717171717,
"grad_norm": 2.480912685394287,
"learning_rate": 0.00017446808510638298,
"loss": 0.9982,
"step": 34
},
{
"epoch": 0.17676767676767677,
"grad_norm": 2.379316806793213,
"learning_rate": 0.00017340425531914896,
"loss": 1.1423,
"step": 35
},
{
"epoch": 0.18181818181818182,
"grad_norm": 2.62664794921875,
"learning_rate": 0.0001723404255319149,
"loss": 1.1058,
"step": 36
},
{
"epoch": 0.18686868686868688,
"grad_norm": 3.0421292781829834,
"learning_rate": 0.00017127659574468087,
"loss": 1.3554,
"step": 37
},
{
"epoch": 0.1919191919191919,
"grad_norm": 2.338921308517456,
"learning_rate": 0.00017021276595744682,
"loss": 1.0576,
"step": 38
},
{
"epoch": 0.19696969696969696,
"grad_norm": 2.2380709648132324,
"learning_rate": 0.00016914893617021276,
"loss": 0.96,
"step": 39
},
{
"epoch": 0.20202020202020202,
"grad_norm": 2.5871872901916504,
"learning_rate": 0.00016808510638297873,
"loss": 1.1717,
"step": 40
},
{
"epoch": 0.20202020202020202,
"eval_loss": 1.2117999792099,
"eval_runtime": 35.7116,
"eval_samples_per_second": 2.8,
"eval_steps_per_second": 2.8,
"step": 40
},
{
"epoch": 0.20707070707070707,
"grad_norm": 3.1568801403045654,
"learning_rate": 0.00016702127659574468,
"loss": 1.2266,
"step": 41
},
{
"epoch": 0.21212121212121213,
"grad_norm": 2.2659265995025635,
"learning_rate": 0.00016595744680851065,
"loss": 0.9661,
"step": 42
},
{
"epoch": 0.21717171717171718,
"grad_norm": 2.648207664489746,
"learning_rate": 0.00016489361702127662,
"loss": 1.4721,
"step": 43
},
{
"epoch": 0.2222222222222222,
"grad_norm": 2.391132116317749,
"learning_rate": 0.00016382978723404257,
"loss": 1.1877,
"step": 44
},
{
"epoch": 0.22727272727272727,
"grad_norm": 2.216590166091919,
"learning_rate": 0.0001627659574468085,
"loss": 1.1156,
"step": 45
},
{
"epoch": 0.23232323232323232,
"grad_norm": 2.7304837703704834,
"learning_rate": 0.00016170212765957446,
"loss": 1.0745,
"step": 46
},
{
"epoch": 0.23737373737373738,
"grad_norm": 2.449009418487549,
"learning_rate": 0.00016063829787234043,
"loss": 0.8261,
"step": 47
},
{
"epoch": 0.24242424242424243,
"grad_norm": 2.8799259662628174,
"learning_rate": 0.00015957446808510637,
"loss": 1.3139,
"step": 48
},
{
"epoch": 0.2474747474747475,
"grad_norm": 3.0433382987976074,
"learning_rate": 0.00015851063829787235,
"loss": 0.8179,
"step": 49
},
{
"epoch": 0.25252525252525254,
"grad_norm": 1.923201322555542,
"learning_rate": 0.00015744680851063832,
"loss": 1.3833,
"step": 50
},
{
"epoch": 0.25757575757575757,
"grad_norm": 2.040708303451538,
"learning_rate": 0.00015638297872340426,
"loss": 1.7501,
"step": 51
},
{
"epoch": 0.26262626262626265,
"grad_norm": 1.4778690338134766,
"learning_rate": 0.0001553191489361702,
"loss": 0.9487,
"step": 52
},
{
"epoch": 0.2676767676767677,
"grad_norm": 1.7248860597610474,
"learning_rate": 0.00015425531914893618,
"loss": 1.0579,
"step": 53
},
{
"epoch": 0.2727272727272727,
"grad_norm": 1.6652686595916748,
"learning_rate": 0.00015319148936170213,
"loss": 0.9751,
"step": 54
},
{
"epoch": 0.2777777777777778,
"grad_norm": 1.6558713912963867,
"learning_rate": 0.0001521276595744681,
"loss": 1.0705,
"step": 55
},
{
"epoch": 0.2828282828282828,
"grad_norm": 2.045135259628296,
"learning_rate": 0.00015106382978723407,
"loss": 1.4438,
"step": 56
},
{
"epoch": 0.2878787878787879,
"grad_norm": 2.0178723335266113,
"learning_rate": 0.00015000000000000001,
"loss": 1.1453,
"step": 57
},
{
"epoch": 0.29292929292929293,
"grad_norm": 1.7096202373504639,
"learning_rate": 0.00014893617021276596,
"loss": 1.0412,
"step": 58
},
{
"epoch": 0.29797979797979796,
"grad_norm": 2.039254665374756,
"learning_rate": 0.00014787234042553193,
"loss": 1.3789,
"step": 59
},
{
"epoch": 0.30303030303030304,
"grad_norm": 2.7002921104431152,
"learning_rate": 0.00014680851063829788,
"loss": 1.9902,
"step": 60
},
{
"epoch": 0.30808080808080807,
"grad_norm": 2.1051278114318848,
"learning_rate": 0.00014574468085106382,
"loss": 1.2287,
"step": 61
},
{
"epoch": 0.31313131313131315,
"grad_norm": 2.6454594135284424,
"learning_rate": 0.0001446808510638298,
"loss": 1.3612,
"step": 62
},
{
"epoch": 0.3181818181818182,
"grad_norm": 1.9458284378051758,
"learning_rate": 0.00014361702127659576,
"loss": 1.1879,
"step": 63
},
{
"epoch": 0.32323232323232326,
"grad_norm": 2.09489369392395,
"learning_rate": 0.0001425531914893617,
"loss": 1.1563,
"step": 64
},
{
"epoch": 0.3282828282828283,
"grad_norm": 1.8626444339752197,
"learning_rate": 0.00014148936170212768,
"loss": 0.8783,
"step": 65
},
{
"epoch": 0.3333333333333333,
"grad_norm": 2.056874990463257,
"learning_rate": 0.00014042553191489363,
"loss": 1.1404,
"step": 66
},
{
"epoch": 0.3383838383838384,
"grad_norm": 2.202636241912842,
"learning_rate": 0.00013936170212765957,
"loss": 1.2132,
"step": 67
},
{
"epoch": 0.3434343434343434,
"grad_norm": 2.452498197555542,
"learning_rate": 0.00013829787234042554,
"loss": 1.451,
"step": 68
},
{
"epoch": 0.3484848484848485,
"grad_norm": 3.745300769805908,
"learning_rate": 0.0001372340425531915,
"loss": 1.4522,
"step": 69
},
{
"epoch": 0.35353535353535354,
"grad_norm": 2.425330638885498,
"learning_rate": 0.00013617021276595746,
"loss": 1.1973,
"step": 70
},
{
"epoch": 0.35858585858585856,
"grad_norm": 3.9445061683654785,
"learning_rate": 0.0001351063829787234,
"loss": 0.6733,
"step": 71
},
{
"epoch": 0.36363636363636365,
"grad_norm": 1.9044334888458252,
"learning_rate": 0.00013404255319148938,
"loss": 0.9999,
"step": 72
},
{
"epoch": 0.3686868686868687,
"grad_norm": 2.4740421772003174,
"learning_rate": 0.00013297872340425532,
"loss": 1.2382,
"step": 73
},
{
"epoch": 0.37373737373737376,
"grad_norm": 2.201266288757324,
"learning_rate": 0.00013191489361702127,
"loss": 0.9838,
"step": 74
},
{
"epoch": 0.3787878787878788,
"grad_norm": 1.951281189918518,
"learning_rate": 0.00013085106382978724,
"loss": 0.888,
"step": 75
},
{
"epoch": 0.3838383838383838,
"grad_norm": 1.6007189750671387,
"learning_rate": 0.00012978723404255318,
"loss": 0.7822,
"step": 76
},
{
"epoch": 0.3888888888888889,
"grad_norm": 2.144460916519165,
"learning_rate": 0.00012872340425531915,
"loss": 0.9619,
"step": 77
},
{
"epoch": 0.3939393939393939,
"grad_norm": 2.2907423973083496,
"learning_rate": 0.00012765957446808513,
"loss": 1.1213,
"step": 78
},
{
"epoch": 0.398989898989899,
"grad_norm": 2.3168041706085205,
"learning_rate": 0.00012659574468085107,
"loss": 1.0892,
"step": 79
},
{
"epoch": 0.40404040404040403,
"grad_norm": 2.5245144367218018,
"learning_rate": 0.00012553191489361702,
"loss": 1.1852,
"step": 80
},
{
"epoch": 0.40404040404040403,
"eval_loss": 1.081834077835083,
"eval_runtime": 35.7648,
"eval_samples_per_second": 2.796,
"eval_steps_per_second": 2.796,
"step": 80
},
{
"epoch": 0.4090909090909091,
"grad_norm": 3.342719316482544,
"learning_rate": 0.000124468085106383,
"loss": 1.5945,
"step": 81
},
{
"epoch": 0.41414141414141414,
"grad_norm": 1.9120285511016846,
"learning_rate": 0.00012340425531914893,
"loss": 0.8123,
"step": 82
},
{
"epoch": 0.41919191919191917,
"grad_norm": 2.2346291542053223,
"learning_rate": 0.0001223404255319149,
"loss": 0.7541,
"step": 83
},
{
"epoch": 0.42424242424242425,
"grad_norm": 2.395176649093628,
"learning_rate": 0.00012127659574468086,
"loss": 1.2209,
"step": 84
},
{
"epoch": 0.4292929292929293,
"grad_norm": 2.275162696838379,
"learning_rate": 0.00012021276595744682,
"loss": 1.06,
"step": 85
},
{
"epoch": 0.43434343434343436,
"grad_norm": 2.27367901802063,
"learning_rate": 0.00011914893617021277,
"loss": 1.0375,
"step": 86
},
{
"epoch": 0.4393939393939394,
"grad_norm": 2.6139156818389893,
"learning_rate": 0.00011808510638297874,
"loss": 1.1906,
"step": 87
},
{
"epoch": 0.4444444444444444,
"grad_norm": 2.0727851390838623,
"learning_rate": 0.00011702127659574468,
"loss": 0.9182,
"step": 88
},
{
"epoch": 0.4494949494949495,
"grad_norm": 2.6906633377075195,
"learning_rate": 0.00011595744680851064,
"loss": 1.0941,
"step": 89
},
{
"epoch": 0.45454545454545453,
"grad_norm": 2.9968316555023193,
"learning_rate": 0.00011489361702127661,
"loss": 0.9502,
"step": 90
},
{
"epoch": 0.4595959595959596,
"grad_norm": 1.800660490989685,
"learning_rate": 0.00011382978723404256,
"loss": 0.7118,
"step": 91
},
{
"epoch": 0.46464646464646464,
"grad_norm": 1.7916479110717773,
"learning_rate": 0.00011276595744680852,
"loss": 0.5925,
"step": 92
},
{
"epoch": 0.4696969696969697,
"grad_norm": 2.894232988357544,
"learning_rate": 0.00011170212765957446,
"loss": 1.1137,
"step": 93
},
{
"epoch": 0.47474747474747475,
"grad_norm": 2.5062479972839355,
"learning_rate": 0.00011063829787234043,
"loss": 0.9837,
"step": 94
},
{
"epoch": 0.4797979797979798,
"grad_norm": 2.5845303535461426,
"learning_rate": 0.00010957446808510638,
"loss": 1.1791,
"step": 95
},
{
"epoch": 0.48484848484848486,
"grad_norm": 2.2351789474487305,
"learning_rate": 0.00010851063829787234,
"loss": 0.7262,
"step": 96
},
{
"epoch": 0.4898989898989899,
"grad_norm": 2.842940330505371,
"learning_rate": 0.00010744680851063831,
"loss": 1.115,
"step": 97
},
{
"epoch": 0.494949494949495,
"grad_norm": 2.390490770339966,
"learning_rate": 0.00010638297872340425,
"loss": 0.7784,
"step": 98
},
{
"epoch": 0.5,
"grad_norm": 1.5551937818527222,
"learning_rate": 0.00010531914893617021,
"loss": 1.323,
"step": 99
},
{
"epoch": 0.5050505050505051,
"grad_norm": 1.6265314817428589,
"learning_rate": 0.00010425531914893618,
"loss": 0.9718,
"step": 100
},
{
"epoch": 0.51010101010101,
"grad_norm": 1.7489873170852661,
"learning_rate": 0.00010319148936170213,
"loss": 1.1333,
"step": 101
},
{
"epoch": 0.5151515151515151,
"grad_norm": 1.857285737991333,
"learning_rate": 0.00010212765957446809,
"loss": 1.2764,
"step": 102
},
{
"epoch": 0.5202020202020202,
"grad_norm": 1.7422451972961426,
"learning_rate": 0.00010106382978723406,
"loss": 1.3637,
"step": 103
},
{
"epoch": 0.5252525252525253,
"grad_norm": 1.1181117296218872,
"learning_rate": 0.0001,
"loss": 0.8533,
"step": 104
},
{
"epoch": 0.5303030303030303,
"grad_norm": 1.7180590629577637,
"learning_rate": 9.893617021276596e-05,
"loss": 1.231,
"step": 105
},
{
"epoch": 0.5353535353535354,
"grad_norm": 1.6332601308822632,
"learning_rate": 9.787234042553192e-05,
"loss": 1.1621,
"step": 106
},
{
"epoch": 0.5404040404040404,
"grad_norm": 2.0528311729431152,
"learning_rate": 9.680851063829788e-05,
"loss": 1.3896,
"step": 107
},
{
"epoch": 0.5454545454545454,
"grad_norm": 1.4702327251434326,
"learning_rate": 9.574468085106384e-05,
"loss": 0.9355,
"step": 108
},
{
"epoch": 0.5505050505050505,
"grad_norm": 1.9625461101531982,
"learning_rate": 9.468085106382978e-05,
"loss": 1.5606,
"step": 109
},
{
"epoch": 0.5555555555555556,
"grad_norm": 1.784745693206787,
"learning_rate": 9.361702127659576e-05,
"loss": 1.1102,
"step": 110
},
{
"epoch": 0.5606060606060606,
"grad_norm": 2.0343940258026123,
"learning_rate": 9.25531914893617e-05,
"loss": 1.2201,
"step": 111
},
{
"epoch": 0.5656565656565656,
"grad_norm": 1.4346604347229004,
"learning_rate": 9.148936170212766e-05,
"loss": 0.7823,
"step": 112
},
{
"epoch": 0.5707070707070707,
"grad_norm": 1.9893382787704468,
"learning_rate": 9.042553191489363e-05,
"loss": 1.0692,
"step": 113
},
{
"epoch": 0.5757575757575758,
"grad_norm": 2.1041550636291504,
"learning_rate": 8.936170212765958e-05,
"loss": 1.4054,
"step": 114
},
{
"epoch": 0.5808080808080808,
"grad_norm": 2.3176534175872803,
"learning_rate": 8.829787234042553e-05,
"loss": 1.4864,
"step": 115
},
{
"epoch": 0.5858585858585859,
"grad_norm": 1.7559845447540283,
"learning_rate": 8.723404255319149e-05,
"loss": 0.8039,
"step": 116
},
{
"epoch": 0.5909090909090909,
"grad_norm": 1.7945481538772583,
"learning_rate": 8.617021276595745e-05,
"loss": 0.9355,
"step": 117
},
{
"epoch": 0.5959595959595959,
"grad_norm": 1.5018421411514282,
"learning_rate": 8.510638297872341e-05,
"loss": 0.8087,
"step": 118
},
{
"epoch": 0.601010101010101,
"grad_norm": 1.6970769166946411,
"learning_rate": 8.404255319148937e-05,
"loss": 0.8562,
"step": 119
},
{
"epoch": 0.6060606060606061,
"grad_norm": 2.8440420627593994,
"learning_rate": 8.297872340425533e-05,
"loss": 1.2596,
"step": 120
},
{
"epoch": 0.6060606060606061,
"eval_loss": 1.025399088859558,
"eval_runtime": 35.717,
"eval_samples_per_second": 2.8,
"eval_steps_per_second": 2.8,
"step": 120
},
{
"epoch": 0.6111111111111112,
"grad_norm": 2.990412712097168,
"learning_rate": 8.191489361702128e-05,
"loss": 1.1597,
"step": 121
},
{
"epoch": 0.6161616161616161,
"grad_norm": 1.3564014434814453,
"learning_rate": 8.085106382978723e-05,
"loss": 0.7068,
"step": 122
},
{
"epoch": 0.6212121212121212,
"grad_norm": 2.5022084712982178,
"learning_rate": 7.978723404255319e-05,
"loss": 1.6883,
"step": 123
},
{
"epoch": 0.6262626262626263,
"grad_norm": 1.8736674785614014,
"learning_rate": 7.872340425531916e-05,
"loss": 0.6924,
"step": 124
},
{
"epoch": 0.6313131313131313,
"grad_norm": 2.2267813682556152,
"learning_rate": 7.76595744680851e-05,
"loss": 1.1193,
"step": 125
},
{
"epoch": 0.6363636363636364,
"grad_norm": 2.134420156478882,
"learning_rate": 7.659574468085106e-05,
"loss": 0.9352,
"step": 126
},
{
"epoch": 0.6414141414141414,
"grad_norm": 1.9499320983886719,
"learning_rate": 7.553191489361703e-05,
"loss": 0.8882,
"step": 127
},
{
"epoch": 0.6464646464646465,
"grad_norm": 2.1963298320770264,
"learning_rate": 7.446808510638298e-05,
"loss": 0.8756,
"step": 128
},
{
"epoch": 0.6515151515151515,
"grad_norm": 2.303020715713501,
"learning_rate": 7.340425531914894e-05,
"loss": 0.903,
"step": 129
},
{
"epoch": 0.6565656565656566,
"grad_norm": 2.4283013343811035,
"learning_rate": 7.23404255319149e-05,
"loss": 1.2209,
"step": 130
},
{
"epoch": 0.6616161616161617,
"grad_norm": 1.8416250944137573,
"learning_rate": 7.127659574468085e-05,
"loss": 0.8485,
"step": 131
},
{
"epoch": 0.6666666666666666,
"grad_norm": 1.958123803138733,
"learning_rate": 7.021276595744681e-05,
"loss": 0.955,
"step": 132
},
{
"epoch": 0.6717171717171717,
"grad_norm": 2.2823963165283203,
"learning_rate": 6.914893617021277e-05,
"loss": 0.9205,
"step": 133
},
{
"epoch": 0.6767676767676768,
"grad_norm": 2.2370712757110596,
"learning_rate": 6.808510638297873e-05,
"loss": 0.7799,
"step": 134
},
{
"epoch": 0.6818181818181818,
"grad_norm": 2.6467502117156982,
"learning_rate": 6.702127659574469e-05,
"loss": 1.0801,
"step": 135
},
{
"epoch": 0.6868686868686869,
"grad_norm": 2.9221208095550537,
"learning_rate": 6.595744680851063e-05,
"loss": 1.2855,
"step": 136
},
{
"epoch": 0.6919191919191919,
"grad_norm": 2.024229049682617,
"learning_rate": 6.489361702127659e-05,
"loss": 0.7463,
"step": 137
},
{
"epoch": 0.696969696969697,
"grad_norm": 2.125941276550293,
"learning_rate": 6.382978723404256e-05,
"loss": 0.8951,
"step": 138
},
{
"epoch": 0.702020202020202,
"grad_norm": 2.9005520343780518,
"learning_rate": 6.276595744680851e-05,
"loss": 0.7212,
"step": 139
},
{
"epoch": 0.7070707070707071,
"grad_norm": 2.088212251663208,
"learning_rate": 6.170212765957447e-05,
"loss": 0.6987,
"step": 140
},
{
"epoch": 0.7121212121212122,
"grad_norm": 2.779832124710083,
"learning_rate": 6.063829787234043e-05,
"loss": 1.2843,
"step": 141
},
{
"epoch": 0.7171717171717171,
"grad_norm": 1.8998260498046875,
"learning_rate": 5.9574468085106384e-05,
"loss": 0.7472,
"step": 142
},
{
"epoch": 0.7222222222222222,
"grad_norm": 2.266793727874756,
"learning_rate": 5.851063829787234e-05,
"loss": 0.7037,
"step": 143
},
{
"epoch": 0.7272727272727273,
"grad_norm": 2.3124566078186035,
"learning_rate": 5.744680851063831e-05,
"loss": 1.0372,
"step": 144
},
{
"epoch": 0.7323232323232324,
"grad_norm": 3.8273227214813232,
"learning_rate": 5.638297872340426e-05,
"loss": 1.1066,
"step": 145
},
{
"epoch": 0.7373737373737373,
"grad_norm": 1.7611407041549683,
"learning_rate": 5.531914893617022e-05,
"loss": 0.5528,
"step": 146
},
{
"epoch": 0.7424242424242424,
"grad_norm": 1.3345385789871216,
"learning_rate": 5.425531914893617e-05,
"loss": 0.533,
"step": 147
},
{
"epoch": 0.7474747474747475,
"grad_norm": 1.5304288864135742,
"learning_rate": 5.319148936170213e-05,
"loss": 1.1614,
"step": 148
},
{
"epoch": 0.7525252525252525,
"grad_norm": 2.2307026386260986,
"learning_rate": 5.212765957446809e-05,
"loss": 1.7027,
"step": 149
},
{
"epoch": 0.7575757575757576,
"grad_norm": 2.0783305168151855,
"learning_rate": 5.1063829787234044e-05,
"loss": 1.5548,
"step": 150
},
{
"epoch": 0.7626262626262627,
"grad_norm": 1.9879854917526245,
"learning_rate": 5e-05,
"loss": 1.3791,
"step": 151
},
{
"epoch": 0.7676767676767676,
"grad_norm": 1.6240109205245972,
"learning_rate": 4.893617021276596e-05,
"loss": 1.0744,
"step": 152
},
{
"epoch": 0.7727272727272727,
"grad_norm": 2.711182117462158,
"learning_rate": 4.787234042553192e-05,
"loss": 1.5953,
"step": 153
},
{
"epoch": 0.7777777777777778,
"grad_norm": 1.7542715072631836,
"learning_rate": 4.680851063829788e-05,
"loss": 0.8801,
"step": 154
},
{
"epoch": 0.7828282828282829,
"grad_norm": 2.7338082790374756,
"learning_rate": 4.574468085106383e-05,
"loss": 2.0285,
"step": 155
},
{
"epoch": 0.7878787878787878,
"grad_norm": 1.4537471532821655,
"learning_rate": 4.468085106382979e-05,
"loss": 0.7417,
"step": 156
},
{
"epoch": 0.7929292929292929,
"grad_norm": 1.9007015228271484,
"learning_rate": 4.3617021276595746e-05,
"loss": 1.1665,
"step": 157
},
{
"epoch": 0.797979797979798,
"grad_norm": 1.6936357021331787,
"learning_rate": 4.2553191489361704e-05,
"loss": 0.9482,
"step": 158
},
{
"epoch": 0.803030303030303,
"grad_norm": 1.923112392425537,
"learning_rate": 4.148936170212766e-05,
"loss": 1.1103,
"step": 159
},
{
"epoch": 0.8080808080808081,
"grad_norm": 1.7348624467849731,
"learning_rate": 4.0425531914893614e-05,
"loss": 0.785,
"step": 160
},
{
"epoch": 0.8080808080808081,
"eval_loss": 1.0044422149658203,
"eval_runtime": 35.7191,
"eval_samples_per_second": 2.8,
"eval_steps_per_second": 2.8,
"step": 160
},
{
"epoch": 0.8131313131313131,
"grad_norm": 2.3853325843811035,
"learning_rate": 3.936170212765958e-05,
"loss": 1.2953,
"step": 161
},
{
"epoch": 0.8181818181818182,
"grad_norm": 1.5539531707763672,
"learning_rate": 3.829787234042553e-05,
"loss": 0.7532,
"step": 162
},
{
"epoch": 0.8232323232323232,
"grad_norm": 2.0018396377563477,
"learning_rate": 3.723404255319149e-05,
"loss": 1.0277,
"step": 163
},
{
"epoch": 0.8282828282828283,
"grad_norm": 1.8975419998168945,
"learning_rate": 3.617021276595745e-05,
"loss": 0.84,
"step": 164
},
{
"epoch": 0.8333333333333334,
"grad_norm": 2.150519847869873,
"learning_rate": 3.5106382978723407e-05,
"loss": 0.9874,
"step": 165
},
{
"epoch": 0.8383838383838383,
"grad_norm": 2.172685384750366,
"learning_rate": 3.4042553191489365e-05,
"loss": 1.1271,
"step": 166
},
{
"epoch": 0.8434343434343434,
"grad_norm": 2.1147804260253906,
"learning_rate": 3.2978723404255317e-05,
"loss": 0.8757,
"step": 167
},
{
"epoch": 0.8484848484848485,
"grad_norm": 2.024099588394165,
"learning_rate": 3.191489361702128e-05,
"loss": 1.2143,
"step": 168
},
{
"epoch": 0.8535353535353535,
"grad_norm": 2.0407485961914062,
"learning_rate": 3.085106382978723e-05,
"loss": 1.0318,
"step": 169
},
{
"epoch": 0.8585858585858586,
"grad_norm": 2.056114435195923,
"learning_rate": 2.9787234042553192e-05,
"loss": 1.0286,
"step": 170
},
{
"epoch": 0.8636363636363636,
"grad_norm": 2.441153049468994,
"learning_rate": 2.8723404255319154e-05,
"loss": 1.3128,
"step": 171
},
{
"epoch": 0.8686868686868687,
"grad_norm": 2.1208486557006836,
"learning_rate": 2.765957446808511e-05,
"loss": 0.7569,
"step": 172
},
{
"epoch": 0.8737373737373737,
"grad_norm": 2.4818294048309326,
"learning_rate": 2.6595744680851064e-05,
"loss": 1.5236,
"step": 173
},
{
"epoch": 0.8787878787878788,
"grad_norm": 1.9016083478927612,
"learning_rate": 2.5531914893617022e-05,
"loss": 0.7904,
"step": 174
},
{
"epoch": 0.8838383838383839,
"grad_norm": 2.479365348815918,
"learning_rate": 2.446808510638298e-05,
"loss": 1.3812,
"step": 175
},
{
"epoch": 0.8888888888888888,
"grad_norm": 2.3161561489105225,
"learning_rate": 2.340425531914894e-05,
"loss": 1.1612,
"step": 176
},
{
"epoch": 0.8939393939393939,
"grad_norm": 1.7003121376037598,
"learning_rate": 2.2340425531914894e-05,
"loss": 0.7709,
"step": 177
},
{
"epoch": 0.898989898989899,
"grad_norm": 2.0750014781951904,
"learning_rate": 2.1276595744680852e-05,
"loss": 1.0438,
"step": 178
},
{
"epoch": 0.9040404040404041,
"grad_norm": 1.7369530200958252,
"learning_rate": 2.0212765957446807e-05,
"loss": 0.711,
"step": 179
},
{
"epoch": 0.9090909090909091,
"grad_norm": 1.7840030193328857,
"learning_rate": 1.9148936170212766e-05,
"loss": 0.7699,
"step": 180
},
{
"epoch": 0.9141414141414141,
"grad_norm": 1.9962245225906372,
"learning_rate": 1.8085106382978724e-05,
"loss": 1.0592,
"step": 181
},
{
"epoch": 0.9191919191919192,
"grad_norm": 1.862483024597168,
"learning_rate": 1.7021276595744682e-05,
"loss": 0.855,
"step": 182
},
{
"epoch": 0.9242424242424242,
"grad_norm": 1.5625897645950317,
"learning_rate": 1.595744680851064e-05,
"loss": 0.6401,
"step": 183
},
{
"epoch": 0.9292929292929293,
"grad_norm": 2.1147549152374268,
"learning_rate": 1.4893617021276596e-05,
"loss": 0.9002,
"step": 184
},
{
"epoch": 0.9343434343434344,
"grad_norm": 1.6960692405700684,
"learning_rate": 1.3829787234042554e-05,
"loss": 0.7592,
"step": 185
},
{
"epoch": 0.9393939393939394,
"grad_norm": 2.3449957370758057,
"learning_rate": 1.2765957446808511e-05,
"loss": 1.1498,
"step": 186
},
{
"epoch": 0.9444444444444444,
"grad_norm": 1.868044376373291,
"learning_rate": 1.170212765957447e-05,
"loss": 0.8678,
"step": 187
},
{
"epoch": 0.9494949494949495,
"grad_norm": 2.05068039894104,
"learning_rate": 1.0638297872340426e-05,
"loss": 0.7825,
"step": 188
},
{
"epoch": 0.9545454545454546,
"grad_norm": 1.92442786693573,
"learning_rate": 9.574468085106383e-06,
"loss": 0.7249,
"step": 189
},
{
"epoch": 0.9595959595959596,
"grad_norm": 2.1448285579681396,
"learning_rate": 8.510638297872341e-06,
"loss": 0.9544,
"step": 190
},
{
"epoch": 0.9646464646464646,
"grad_norm": 1.7718862295150757,
"learning_rate": 7.446808510638298e-06,
"loss": 0.5564,
"step": 191
},
{
"epoch": 0.9696969696969697,
"grad_norm": 2.5203495025634766,
"learning_rate": 6.3829787234042555e-06,
"loss": 0.9661,
"step": 192
},
{
"epoch": 0.9747474747474747,
"grad_norm": 2.2129266262054443,
"learning_rate": 5.319148936170213e-06,
"loss": 0.9277,
"step": 193
},
{
"epoch": 0.9797979797979798,
"grad_norm": 2.0742154121398926,
"learning_rate": 4.255319148936171e-06,
"loss": 0.8203,
"step": 194
},
{
"epoch": 0.9848484848484849,
"grad_norm": 1.7061251401901245,
"learning_rate": 3.1914893617021277e-06,
"loss": 0.4595,
"step": 195
},
{
"epoch": 0.98989898989899,
"grad_norm": 1.7450275421142578,
"learning_rate": 2.1276595744680853e-06,
"loss": 0.5028,
"step": 196
},
{
"epoch": 0.9949494949494949,
"grad_norm": 4.231220245361328,
"learning_rate": 1.0638297872340427e-06,
"loss": 1.0582,
"step": 197
},
{
"epoch": 1.0,
"grad_norm": 2.37825870513916,
"learning_rate": 0.0,
"loss": 0.8417,
"step": 198
}
],
"logging_steps": 1,
"max_steps": 198,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2341997358425088.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}