flame_waterfall_7b / trainer_state.json
liuyashu002
model
ffa9921
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9961696306429548,
"eval_steps": 500,
"global_step": 228,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 2.200422763824463,
"learning_rate": 3.5714285714285714e-06,
"loss": 0.2391,
"step": 1
},
{
"epoch": 0.02,
"grad_norm": 2.270944118499756,
"learning_rate": 7.142857142857143e-06,
"loss": 0.2417,
"step": 2
},
{
"epoch": 0.03,
"grad_norm": 1.8967660665512085,
"learning_rate": 1.0714285714285714e-05,
"loss": 0.2323,
"step": 3
},
{
"epoch": 0.04,
"grad_norm": 0.6448235511779785,
"learning_rate": 1.4285714285714285e-05,
"loss": 0.2077,
"step": 4
},
{
"epoch": 0.04,
"grad_norm": 0.9924548864364624,
"learning_rate": 1.785714285714286e-05,
"loss": 0.2085,
"step": 5
},
{
"epoch": 0.05,
"grad_norm": 0.5292760729789734,
"learning_rate": 2.1428571428571428e-05,
"loss": 0.199,
"step": 6
},
{
"epoch": 0.06,
"grad_norm": 0.28642991185188293,
"learning_rate": 2.5e-05,
"loss": 0.1742,
"step": 7
},
{
"epoch": 0.07,
"grad_norm": 0.4373553693294525,
"learning_rate": 2.4998737044926244e-05,
"loss": 0.1898,
"step": 8
},
{
"epoch": 0.08,
"grad_norm": 0.3418498933315277,
"learning_rate": 2.4994948434913852e-05,
"loss": 0.1841,
"step": 9
},
{
"epoch": 0.09,
"grad_norm": 0.1848132610321045,
"learning_rate": 2.498863493553791e-05,
"loss": 0.1706,
"step": 10
},
{
"epoch": 0.1,
"grad_norm": 0.19469691812992096,
"learning_rate": 2.4979797822584984e-05,
"loss": 0.1722,
"step": 11
},
{
"epoch": 0.11,
"grad_norm": 0.1826794296503067,
"learning_rate": 2.4968438881795332e-05,
"loss": 0.1707,
"step": 12
},
{
"epoch": 0.11,
"grad_norm": 0.14234064519405365,
"learning_rate": 2.4954560408502066e-05,
"loss": 0.1552,
"step": 13
},
{
"epoch": 0.12,
"grad_norm": 0.1327579766511917,
"learning_rate": 2.4938165207167307e-05,
"loss": 0.1639,
"step": 14
},
{
"epoch": 0.13,
"grad_norm": 0.12732426822185516,
"learning_rate": 2.4919256590815486e-05,
"loss": 0.1719,
"step": 15
},
{
"epoch": 0.14,
"grad_norm": 0.10535147786140442,
"learning_rate": 2.489783838036388e-05,
"loss": 0.1626,
"step": 16
},
{
"epoch": 0.15,
"grad_norm": 0.08864012360572815,
"learning_rate": 2.48739149038505e-05,
"loss": 0.1557,
"step": 17
},
{
"epoch": 0.16,
"grad_norm": 0.08972543478012085,
"learning_rate": 2.4847490995559502e-05,
"loss": 0.153,
"step": 18
},
{
"epoch": 0.17,
"grad_norm": 0.08487411588430405,
"learning_rate": 2.4818571995044346e-05,
"loss": 0.1583,
"step": 19
},
{
"epoch": 0.18,
"grad_norm": 0.07328126579523087,
"learning_rate": 2.4787163746048776e-05,
"loss": 0.1483,
"step": 20
},
{
"epoch": 0.18,
"grad_norm": 0.07735934853553772,
"learning_rate": 2.4753272595325976e-05,
"loss": 0.1448,
"step": 21
},
{
"epoch": 0.19,
"grad_norm": 0.08453325182199478,
"learning_rate": 2.471690539135607e-05,
"loss": 0.154,
"step": 22
},
{
"epoch": 0.2,
"grad_norm": 0.06923778355121613,
"learning_rate": 2.467806948296223e-05,
"loss": 0.1441,
"step": 23
},
{
"epoch": 0.21,
"grad_norm": 0.07757125049829483,
"learning_rate": 2.463677271782565e-05,
"loss": 0.1399,
"step": 24
},
{
"epoch": 0.22,
"grad_norm": 0.06422542780637741,
"learning_rate": 2.459302344089979e-05,
"loss": 0.14,
"step": 25
},
{
"epoch": 0.23,
"grad_norm": 0.102714404463768,
"learning_rate": 2.4546830492724048e-05,
"loss": 0.14,
"step": 26
},
{
"epoch": 0.24,
"grad_norm": 0.07906186580657959,
"learning_rate": 2.449820320763735e-05,
"loss": 0.1421,
"step": 27
},
{
"epoch": 0.25,
"grad_norm": 0.07414320111274719,
"learning_rate": 2.4447151411891924e-05,
"loss": 0.1371,
"step": 28
},
{
"epoch": 0.25,
"grad_norm": 0.059447940438985825,
"learning_rate": 2.4393685421667677e-05,
"loss": 0.1365,
"step": 29
},
{
"epoch": 0.26,
"grad_norm": 0.07829699665307999,
"learning_rate": 2.43378160409876e-05,
"loss": 0.1397,
"step": 30
},
{
"epoch": 0.27,
"grad_norm": 0.061849720776081085,
"learning_rate": 2.427955455953453e-05,
"loss": 0.1276,
"step": 31
},
{
"epoch": 0.28,
"grad_norm": 0.06188429519534111,
"learning_rate": 2.4218912750369848e-05,
"loss": 0.131,
"step": 32
},
{
"epoch": 0.29,
"grad_norm": 0.06475334614515305,
"learning_rate": 2.4155902867554447e-05,
"loss": 0.1408,
"step": 33
},
{
"epoch": 0.3,
"grad_norm": 0.060506466776132584,
"learning_rate": 2.409053764367252e-05,
"loss": 0.1413,
"step": 34
},
{
"epoch": 0.31,
"grad_norm": 0.05846070498228073,
"learning_rate": 2.4022830287258646e-05,
"loss": 0.1367,
"step": 35
},
{
"epoch": 0.32,
"grad_norm": 0.10868549346923828,
"learning_rate": 2.3952794480128716e-05,
"loss": 0.1348,
"step": 36
},
{
"epoch": 0.32,
"grad_norm": 0.058679353445768356,
"learning_rate": 2.3880444374615208e-05,
"loss": 0.1349,
"step": 37
},
{
"epoch": 0.33,
"grad_norm": 0.06427464634180069,
"learning_rate": 2.380579459070737e-05,
"loss": 0.1331,
"step": 38
},
{
"epoch": 0.34,
"grad_norm": 0.05250772088766098,
"learning_rate": 2.372886021309694e-05,
"loss": 0.1219,
"step": 39
},
{
"epoch": 0.35,
"grad_norm": 0.05977998301386833,
"learning_rate": 2.3649656788129932e-05,
"loss": 0.1249,
"step": 40
},
{
"epoch": 0.36,
"grad_norm": 0.06137443706393242,
"learning_rate": 2.3568200320665124e-05,
"loss": 0.1283,
"step": 41
},
{
"epoch": 0.37,
"grad_norm": 0.06237027794122696,
"learning_rate": 2.348450727083994e-05,
"loss": 0.1319,
"step": 42
},
{
"epoch": 0.38,
"grad_norm": 0.06364572048187256,
"learning_rate": 2.3398594550744292e-05,
"loss": 0.1255,
"step": 43
},
{
"epoch": 0.39,
"grad_norm": 0.06878168880939484,
"learning_rate": 2.3310479521003087e-05,
"loss": 0.1266,
"step": 44
},
{
"epoch": 0.39,
"grad_norm": 0.05501376464962959,
"learning_rate": 2.3220179987268153e-05,
"loss": 0.1258,
"step": 45
},
{
"epoch": 0.4,
"grad_norm": 0.0640014111995697,
"learning_rate": 2.312771419662018e-05,
"loss": 0.1255,
"step": 46
},
{
"epoch": 0.41,
"grad_norm": 0.07001198083162308,
"learning_rate": 2.303310083388147e-05,
"loss": 0.1238,
"step": 47
},
{
"epoch": 0.42,
"grad_norm": 0.09296014904975891,
"learning_rate": 2.293635901784028e-05,
"loss": 0.1263,
"step": 48
},
{
"epoch": 0.43,
"grad_norm": 0.06383784860372543,
"learning_rate": 2.2837508297387384e-05,
"loss": 0.1307,
"step": 49
},
{
"epoch": 0.44,
"grad_norm": 0.0691072940826416,
"learning_rate": 2.2736568647565813e-05,
"loss": 0.1299,
"step": 50
},
{
"epoch": 0.45,
"grad_norm": 0.09465464949607849,
"learning_rate": 2.2633560465534438e-05,
"loss": 0.1266,
"step": 51
},
{
"epoch": 0.46,
"grad_norm": 0.07103528082370758,
"learning_rate": 2.252850456644623e-05,
"loss": 0.1238,
"step": 52
},
{
"epoch": 0.46,
"grad_norm": 0.07062829285860062,
"learning_rate": 2.2421422179242118e-05,
"loss": 0.1224,
"step": 53
},
{
"epoch": 0.47,
"grad_norm": 0.08078863471746445,
"learning_rate": 2.2312334942361186e-05,
"loss": 0.128,
"step": 54
},
{
"epoch": 0.48,
"grad_norm": 0.06471696496009827,
"learning_rate": 2.220126489936811e-05,
"loss": 0.1285,
"step": 55
},
{
"epoch": 0.49,
"grad_norm": 0.07739695906639099,
"learning_rate": 2.2088234494498793e-05,
"loss": 0.1258,
"step": 56
},
{
"epoch": 0.5,
"grad_norm": 0.07663910835981369,
"learning_rate": 2.1973266568124963e-05,
"loss": 0.1279,
"step": 57
},
{
"epoch": 0.51,
"grad_norm": 0.07442884147167206,
"learning_rate": 2.1856384352138765e-05,
"loss": 0.1212,
"step": 58
},
{
"epoch": 0.52,
"grad_norm": 0.0762372687458992,
"learning_rate": 2.173761146525824e-05,
"loss": 0.1246,
"step": 59
},
{
"epoch": 0.53,
"grad_norm": 0.0701020210981369,
"learning_rate": 2.1616971908254602e-05,
"loss": 0.1208,
"step": 60
},
{
"epoch": 0.53,
"grad_norm": 0.07476264983415604,
"learning_rate": 2.1494490059102352e-05,
"loss": 0.1267,
"step": 61
},
{
"epoch": 0.54,
"grad_norm": 0.08157288283109665,
"learning_rate": 2.137019066805314e-05,
"loss": 0.1214,
"step": 62
},
{
"epoch": 0.55,
"grad_norm": 0.08489346504211426,
"learning_rate": 2.1244098852634424e-05,
"loss": 0.1166,
"step": 63
},
{
"epoch": 0.56,
"grad_norm": 0.07014652341604233,
"learning_rate": 2.1116240092573887e-05,
"loss": 0.1219,
"step": 64
},
{
"epoch": 0.57,
"grad_norm": 0.07439316809177399,
"learning_rate": 2.0986640224650684e-05,
"loss": 0.1226,
"step": 65
},
{
"epoch": 0.58,
"grad_norm": 0.07922874391078949,
"learning_rate": 2.0855325437474545e-05,
"loss": 0.1176,
"step": 66
},
{
"epoch": 0.59,
"grad_norm": 0.07696756720542908,
"learning_rate": 2.072232226619374e-05,
"loss": 0.116,
"step": 67
},
{
"epoch": 0.6,
"grad_norm": 0.07184255868196487,
"learning_rate": 2.058765758713307e-05,
"loss": 0.1205,
"step": 68
},
{
"epoch": 0.6,
"grad_norm": 0.09004761278629303,
"learning_rate": 2.0451358612362883e-05,
"loss": 0.1233,
"step": 69
},
{
"epoch": 0.61,
"grad_norm": 0.08411566913127899,
"learning_rate": 2.0313452884200258e-05,
"loss": 0.1179,
"step": 70
},
{
"epoch": 0.62,
"grad_norm": 0.07372688502073288,
"learning_rate": 2.0173968269643438e-05,
"loss": 0.1156,
"step": 71
},
{
"epoch": 0.63,
"grad_norm": 0.09492815285921097,
"learning_rate": 2.0032932954740708e-05,
"loss": 0.1173,
"step": 72
},
{
"epoch": 0.64,
"grad_norm": 0.07901427894830704,
"learning_rate": 1.98903754388947e-05,
"loss": 0.1152,
"step": 73
},
{
"epoch": 0.65,
"grad_norm": 0.07680545747280121,
"learning_rate": 1.9746324529103484e-05,
"loss": 0.1249,
"step": 74
},
{
"epoch": 0.66,
"grad_norm": 0.07813062518835068,
"learning_rate": 1.9600809334139446e-05,
"loss": 0.1173,
"step": 75
},
{
"epoch": 0.67,
"grad_norm": 0.078568235039711,
"learning_rate": 1.94538592586672e-05,
"loss": 0.1228,
"step": 76
},
{
"epoch": 0.67,
"grad_norm": 0.07648710161447525,
"learning_rate": 1.9305503997301683e-05,
"loss": 0.1295,
"step": 77
},
{
"epoch": 0.68,
"grad_norm": 0.07838192582130432,
"learning_rate": 1.9155773528607706e-05,
"loss": 0.1244,
"step": 78
},
{
"epoch": 0.69,
"grad_norm": 0.07754414528608322,
"learning_rate": 1.9004698109042098e-05,
"loss": 0.1164,
"step": 79
},
{
"epoch": 0.7,
"grad_norm": 0.086074598133564,
"learning_rate": 1.885230826683967e-05,
"loss": 0.1169,
"step": 80
},
{
"epoch": 0.71,
"grad_norm": 0.08760019391775131,
"learning_rate": 1.8698634795844334e-05,
"loss": 0.1142,
"step": 81
},
{
"epoch": 0.72,
"grad_norm": 0.08039297163486481,
"learning_rate": 1.8543708749286472e-05,
"loss": 0.1225,
"step": 82
},
{
"epoch": 0.73,
"grad_norm": 0.08440306782722473,
"learning_rate": 1.8387561433507935e-05,
"loss": 0.1153,
"step": 83
},
{
"epoch": 0.74,
"grad_norm": 0.07035987824201584,
"learning_rate": 1.8230224401635877e-05,
"loss": 0.1154,
"step": 84
},
{
"epoch": 0.74,
"grad_norm": 0.07153236120939255,
"learning_rate": 1.8071729447206733e-05,
"loss": 0.1106,
"step": 85
},
{
"epoch": 0.75,
"grad_norm": 0.0721370279788971,
"learning_rate": 1.791210859774159e-05,
"loss": 0.1174,
"step": 86
},
{
"epoch": 0.76,
"grad_norm": 0.06717287003993988,
"learning_rate": 1.775139410827433e-05,
"loss": 0.1212,
"step": 87
},
{
"epoch": 0.77,
"grad_norm": 0.07079152017831802,
"learning_rate": 1.7589618454833732e-05,
"loss": 0.121,
"step": 88
},
{
"epoch": 0.78,
"grad_norm": 0.07359743118286133,
"learning_rate": 1.742681432788097e-05,
"loss": 0.1153,
"step": 89
},
{
"epoch": 0.79,
"grad_norm": 0.07773274928331375,
"learning_rate": 1.7263014625703755e-05,
"loss": 0.1149,
"step": 90
},
{
"epoch": 0.8,
"grad_norm": 0.11045992374420166,
"learning_rate": 1.7098252447768474e-05,
"loss": 0.1183,
"step": 91
},
{
"epoch": 0.81,
"grad_norm": 0.10004275292158127,
"learning_rate": 1.6932561088031695e-05,
"loss": 0.1097,
"step": 92
},
{
"epoch": 0.81,
"grad_norm": 0.07924434542655945,
"learning_rate": 1.6765974028212384e-05,
"loss": 0.1131,
"step": 93
},
{
"epoch": 0.82,
"grad_norm": 0.08435513079166412,
"learning_rate": 1.6598524931026116e-05,
"loss": 0.1089,
"step": 94
},
{
"epoch": 0.83,
"grad_norm": 0.07069804519414902,
"learning_rate": 1.64302476333828e-05,
"loss": 0.1134,
"step": 95
},
{
"epoch": 0.84,
"grad_norm": 0.09089860320091248,
"learning_rate": 1.626117613954913e-05,
"loss": 0.1087,
"step": 96
},
{
"epoch": 0.85,
"grad_norm": 0.08091191947460175,
"learning_rate": 1.6091344614277258e-05,
"loss": 0.1177,
"step": 97
},
{
"epoch": 0.86,
"grad_norm": 0.35365355014801025,
"learning_rate": 1.5920787375901038e-05,
"loss": 0.1172,
"step": 98
},
{
"epoch": 0.87,
"grad_norm": 0.08567700535058975,
"learning_rate": 1.5749538889401193e-05,
"loss": 0.1148,
"step": 99
},
{
"epoch": 0.88,
"grad_norm": 0.08759979903697968,
"learning_rate": 1.5577633759440906e-05,
"loss": 0.1089,
"step": 100
},
{
"epoch": 0.88,
"grad_norm": 0.08318813890218735,
"learning_rate": 1.540510672337316e-05,
"loss": 0.1196,
"step": 101
},
{
"epoch": 0.89,
"grad_norm": 0.07813245803117752,
"learning_rate": 1.5231992644221237e-05,
"loss": 0.1104,
"step": 102
},
{
"epoch": 0.9,
"grad_norm": 0.08284571766853333,
"learning_rate": 1.5058326503633877e-05,
"loss": 0.117,
"step": 103
},
{
"epoch": 0.91,
"grad_norm": 0.09160487353801727,
"learning_rate": 1.4884143394816422e-05,
"loss": 0.1139,
"step": 104
},
{
"epoch": 0.92,
"grad_norm": 0.22021886706352234,
"learning_rate": 1.4709478515439437e-05,
"loss": 0.1155,
"step": 105
},
{
"epoch": 0.93,
"grad_norm": 0.08781706541776657,
"learning_rate": 1.4534367160526222e-05,
"loss": 0.119,
"step": 106
},
{
"epoch": 0.94,
"grad_norm": 0.08312955498695374,
"learning_rate": 1.4358844715320646e-05,
"loss": 0.11,
"step": 107
},
{
"epoch": 0.95,
"grad_norm": 0.08433741331100464,
"learning_rate": 1.4182946648136743e-05,
"loss": 0.112,
"step": 108
},
{
"epoch": 0.95,
"grad_norm": 0.14074306190013885,
"learning_rate": 1.4006708503191538e-05,
"loss": 0.1179,
"step": 109
},
{
"epoch": 0.96,
"grad_norm": 0.0972323939204216,
"learning_rate": 1.383016589342253e-05,
"loss": 0.1119,
"step": 110
},
{
"epoch": 0.97,
"grad_norm": 0.11136642098426819,
"learning_rate": 1.3653354493291276e-05,
"loss": 0.12,
"step": 111
},
{
"epoch": 0.98,
"grad_norm": 0.0775907039642334,
"learning_rate": 1.347631003157456e-05,
"loss": 0.1106,
"step": 112
},
{
"epoch": 0.99,
"grad_norm": 0.08658675849437714,
"learning_rate": 1.3299068284144572e-05,
"loss": 0.1141,
"step": 113
},
{
"epoch": 1.0,
"grad_norm": 0.08315402269363403,
"learning_rate": 1.3121665066739585e-05,
"loss": 0.1104,
"step": 114
},
{
"epoch": 1.01,
"grad_norm": 0.09935840964317322,
"learning_rate": 1.2944136227726567e-05,
"loss": 0.1122,
"step": 115
},
{
"epoch": 1.02,
"grad_norm": 0.08042165637016296,
"learning_rate": 1.2766517640857185e-05,
"loss": 0.1089,
"step": 116
},
{
"epoch": 1.02,
"grad_norm": 0.08209645003080368,
"learning_rate": 1.258884519801872e-05,
"loss": 0.1091,
"step": 117
},
{
"epoch": 1.03,
"grad_norm": 0.07619890570640564,
"learning_rate": 1.241115480198128e-05,
"loss": 0.1066,
"step": 118
},
{
"epoch": 1.04,
"grad_norm": 0.1068926528096199,
"learning_rate": 1.2233482359142818e-05,
"loss": 0.1044,
"step": 119
},
{
"epoch": 1.05,
"grad_norm": 0.0765824168920517,
"learning_rate": 1.205586377227344e-05,
"loss": 0.1088,
"step": 120
},
{
"epoch": 1.06,
"grad_norm": 0.08393848687410355,
"learning_rate": 1.1878334933260416e-05,
"loss": 0.1092,
"step": 121
},
{
"epoch": 1.07,
"grad_norm": 0.0855385884642601,
"learning_rate": 1.1700931715855431e-05,
"loss": 0.1127,
"step": 122
},
{
"epoch": 1.08,
"grad_norm": 0.08480715751647949,
"learning_rate": 1.1523689968425443e-05,
"loss": 0.1103,
"step": 123
},
{
"epoch": 1.09,
"grad_norm": 0.1015097051858902,
"learning_rate": 1.1346645506708726e-05,
"loss": 0.1079,
"step": 124
},
{
"epoch": 1.09,
"grad_norm": 0.07940834015607834,
"learning_rate": 1.1169834106577472e-05,
"loss": 0.1113,
"step": 125
},
{
"epoch": 1.1,
"grad_norm": 0.0852329432964325,
"learning_rate": 1.0993291496808463e-05,
"loss": 0.1141,
"step": 126
},
{
"epoch": 1.11,
"grad_norm": 0.08691573143005371,
"learning_rate": 1.081705335186326e-05,
"loss": 0.1055,
"step": 127
},
{
"epoch": 1.12,
"grad_norm": 0.1047862097620964,
"learning_rate": 1.0641155284679352e-05,
"loss": 0.111,
"step": 128
},
{
"epoch": 1.13,
"grad_norm": 0.11156802624464035,
"learning_rate": 1.0465632839473777e-05,
"loss": 0.1102,
"step": 129
},
{
"epoch": 1.14,
"grad_norm": 0.09143494814634323,
"learning_rate": 1.0290521484560568e-05,
"loss": 0.1204,
"step": 130
},
{
"epoch": 1.15,
"grad_norm": 0.09311190247535706,
"learning_rate": 1.0115856605183581e-05,
"loss": 0.1064,
"step": 131
},
{
"epoch": 1.16,
"grad_norm": 0.09226440638303757,
"learning_rate": 9.941673496366128e-06,
"loss": 0.1031,
"step": 132
},
{
"epoch": 1.16,
"grad_norm": 0.10318831354379654,
"learning_rate": 9.768007355778764e-06,
"loss": 0.1108,
"step": 133
},
{
"epoch": 1.17,
"grad_norm": 0.07738009840250015,
"learning_rate": 9.594893276626845e-06,
"loss": 0.1065,
"step": 134
},
{
"epoch": 1.18,
"grad_norm": 0.098593570291996,
"learning_rate": 9.422366240559092e-06,
"loss": 0.1032,
"step": 135
},
{
"epoch": 1.19,
"grad_norm": 0.08724181354045868,
"learning_rate": 9.25046111059881e-06,
"loss": 0.1064,
"step": 136
},
{
"epoch": 1.2,
"grad_norm": 0.07838578522205353,
"learning_rate": 9.079212624098965e-06,
"loss": 0.1089,
"step": 137
},
{
"epoch": 1.21,
"grad_norm": 0.09138258546590805,
"learning_rate": 8.908655385722745e-06,
"loss": 0.1009,
"step": 138
},
{
"epoch": 1.22,
"grad_norm": 0.10464809089899063,
"learning_rate": 8.738823860450874e-06,
"loss": 0.1052,
"step": 139
},
{
"epoch": 1.23,
"grad_norm": 0.08973057568073273,
"learning_rate": 8.569752366617206e-06,
"loss": 0.107,
"step": 140
},
{
"epoch": 1.23,
"grad_norm": 0.09044581651687622,
"learning_rate": 8.40147506897389e-06,
"loss": 0.1081,
"step": 141
},
{
"epoch": 1.24,
"grad_norm": 0.10169443488121033,
"learning_rate": 8.23402597178762e-06,
"loss": 0.1079,
"step": 142
},
{
"epoch": 1.25,
"grad_norm": 0.09864594787359238,
"learning_rate": 8.067438911968305e-06,
"loss": 0.1168,
"step": 143
},
{
"epoch": 1.26,
"grad_norm": 0.10119356215000153,
"learning_rate": 7.90174755223153e-06,
"loss": 0.1008,
"step": 144
},
{
"epoch": 1.27,
"grad_norm": 0.07849200069904327,
"learning_rate": 7.736985374296246e-06,
"loss": 0.1098,
"step": 145
},
{
"epoch": 1.28,
"grad_norm": 0.07834988087415695,
"learning_rate": 7.5731856721190305e-06,
"loss": 0.1094,
"step": 146
},
{
"epoch": 1.29,
"grad_norm": 0.07924801856279373,
"learning_rate": 7.41038154516627e-06,
"loss": 0.1086,
"step": 147
},
{
"epoch": 1.3,
"grad_norm": 0.08968242257833481,
"learning_rate": 7.248605891725677e-06,
"loss": 0.1099,
"step": 148
},
{
"epoch": 1.3,
"grad_norm": 0.07991733402013779,
"learning_rate": 7.087891402258412e-06,
"loss": 0.1038,
"step": 149
},
{
"epoch": 1.31,
"grad_norm": 0.07541777938604355,
"learning_rate": 6.928270552793273e-06,
"loss": 0.1108,
"step": 150
},
{
"epoch": 1.32,
"grad_norm": 0.09483880549669266,
"learning_rate": 6.769775598364126e-06,
"loss": 0.1026,
"step": 151
},
{
"epoch": 1.33,
"grad_norm": 0.07294677942991257,
"learning_rate": 6.612438566492066e-06,
"loss": 0.1031,
"step": 152
},
{
"epoch": 1.34,
"grad_norm": 0.0786866620182991,
"learning_rate": 6.456291250713531e-06,
"loss": 0.1095,
"step": 153
},
{
"epoch": 1.35,
"grad_norm": 0.09206941723823547,
"learning_rate": 6.301365204155671e-06,
"loss": 0.1069,
"step": 154
},
{
"epoch": 1.36,
"grad_norm": 0.08430401235818863,
"learning_rate": 6.147691733160338e-06,
"loss": 0.1039,
"step": 155
},
{
"epoch": 1.37,
"grad_norm": 0.08027198910713196,
"learning_rate": 5.995301890957908e-06,
"loss": 0.1096,
"step": 156
},
{
"epoch": 1.37,
"grad_norm": 0.07375296205282211,
"learning_rate": 5.8442264713922956e-06,
"loss": 0.106,
"step": 157
},
{
"epoch": 1.38,
"grad_norm": 0.07248899340629578,
"learning_rate": 5.694496002698316e-06,
"loss": 0.0992,
"step": 158
},
{
"epoch": 1.39,
"grad_norm": 0.08779315650463104,
"learning_rate": 5.5461407413328005e-06,
"loss": 0.112,
"step": 159
},
{
"epoch": 1.4,
"grad_norm": 0.08409138768911362,
"learning_rate": 5.399190665860554e-06,
"loss": 0.1075,
"step": 160
},
{
"epoch": 1.41,
"grad_norm": 0.08023138344287872,
"learning_rate": 5.253675470896521e-06,
"loss": 0.1057,
"step": 161
},
{
"epoch": 1.42,
"grad_norm": 0.08220675587654114,
"learning_rate": 5.10962456110531e-06,
"loss": 0.1052,
"step": 162
},
{
"epoch": 1.43,
"grad_norm": 0.12142012268304825,
"learning_rate": 4.967067045259296e-06,
"loss": 0.1089,
"step": 163
},
{
"epoch": 1.44,
"grad_norm": 0.06706613302230835,
"learning_rate": 4.826031730356559e-06,
"loss": 0.1022,
"step": 164
},
{
"epoch": 1.44,
"grad_norm": 0.086565762758255,
"learning_rate": 4.6865471157997455e-06,
"loss": 0.1074,
"step": 165
},
{
"epoch": 1.45,
"grad_norm": 0.10480491816997528,
"learning_rate": 4.548641387637119e-06,
"loss": 0.1077,
"step": 166
},
{
"epoch": 1.46,
"grad_norm": 0.07782545685768127,
"learning_rate": 4.4123424128669325e-06,
"loss": 0.1081,
"step": 167
},
{
"epoch": 1.47,
"grad_norm": 0.06547481566667557,
"learning_rate": 4.277677733806264e-06,
"loss": 0.1041,
"step": 168
},
{
"epoch": 1.48,
"grad_norm": 0.07722113281488419,
"learning_rate": 4.144674562525456e-06,
"loss": 0.1074,
"step": 169
},
{
"epoch": 1.49,
"grad_norm": 0.0793066918849945,
"learning_rate": 4.013359775349314e-06,
"loss": 0.1015,
"step": 170
},
{
"epoch": 1.5,
"grad_norm": 0.07437407225370407,
"learning_rate": 3.883759907426117e-06,
"loss": 0.1125,
"step": 171
},
{
"epoch": 1.51,
"grad_norm": 0.08786901831626892,
"learning_rate": 3.75590114736558e-06,
"loss": 0.1096,
"step": 172
},
{
"epoch": 1.51,
"grad_norm": 0.07370700687170029,
"learning_rate": 3.629809331946862e-06,
"loss": 0.1063,
"step": 173
},
{
"epoch": 1.52,
"grad_norm": 0.07233896851539612,
"learning_rate": 3.5055099408976485e-06,
"loss": 0.1038,
"step": 174
},
{
"epoch": 1.53,
"grad_norm": 0.10337869077920914,
"learning_rate": 3.3830280917453996e-06,
"loss": 0.0977,
"step": 175
},
{
"epoch": 1.54,
"grad_norm": 0.07350175082683563,
"learning_rate": 3.262388534741763e-06,
"loss": 0.1113,
"step": 176
},
{
"epoch": 1.55,
"grad_norm": 0.07469906657934189,
"learning_rate": 3.143615647861235e-06,
"loss": 0.1051,
"step": 177
},
{
"epoch": 1.56,
"grad_norm": 0.08246980607509613,
"learning_rate": 3.0267334318750422e-06,
"loss": 0.1019,
"step": 178
},
{
"epoch": 1.57,
"grad_norm": 0.08083990961313248,
"learning_rate": 2.9117655055012113e-06,
"loss": 0.1126,
"step": 179
},
{
"epoch": 1.58,
"grad_norm": 0.09245346486568451,
"learning_rate": 2.798735100631891e-06,
"loss": 0.1034,
"step": 180
},
{
"epoch": 1.58,
"grad_norm": 0.09014884382486343,
"learning_rate": 2.6876650576388177e-06,
"loss": 0.1066,
"step": 181
},
{
"epoch": 1.59,
"grad_norm": 0.07952243089675903,
"learning_rate": 2.5785778207578825e-06,
"loss": 0.1104,
"step": 182
},
{
"epoch": 1.6,
"grad_norm": 0.07580631226301193,
"learning_rate": 2.4714954335537734e-06,
"loss": 0.1132,
"step": 183
},
{
"epoch": 1.61,
"grad_norm": 0.07747604697942734,
"learning_rate": 2.3664395344655657e-06,
"loss": 0.1063,
"step": 184
},
{
"epoch": 1.62,
"grad_norm": 0.09613858908414841,
"learning_rate": 2.263431352434188e-06,
"loss": 0.1064,
"step": 185
},
{
"epoch": 1.63,
"grad_norm": 0.07491160929203033,
"learning_rate": 2.162491702612618e-06,
"loss": 0.1114,
"step": 186
},
{
"epoch": 1.64,
"grad_norm": 0.08003370463848114,
"learning_rate": 2.063640982159723e-06,
"loss": 0.1074,
"step": 187
},
{
"epoch": 1.65,
"grad_norm": 0.06978148221969604,
"learning_rate": 1.96689916611853e-06,
"loss": 0.1044,
"step": 188
},
{
"epoch": 1.65,
"grad_norm": 0.07151058316230774,
"learning_rate": 1.8722858033798252e-06,
"loss": 0.1053,
"step": 189
},
{
"epoch": 1.66,
"grad_norm": 0.07286762446165085,
"learning_rate": 1.7798200127318518e-06,
"loss": 0.106,
"step": 190
},
{
"epoch": 1.67,
"grad_norm": 0.0802750289440155,
"learning_rate": 1.6895204789969172e-06,
"loss": 0.1035,
"step": 191
},
{
"epoch": 1.68,
"grad_norm": 0.07332754135131836,
"learning_rate": 1.601405449255712e-06,
"loss": 0.1013,
"step": 192
},
{
"epoch": 1.69,
"grad_norm": 0.07986527681350708,
"learning_rate": 1.515492729160059e-06,
"loss": 0.1055,
"step": 193
},
{
"epoch": 1.7,
"grad_norm": 0.10134772211313248,
"learning_rate": 1.4317996793348776e-06,
"loss": 0.1108,
"step": 194
},
{
"epoch": 1.71,
"grad_norm": 0.0696934163570404,
"learning_rate": 1.3503432118700711e-06,
"loss": 0.0981,
"step": 195
},
{
"epoch": 1.72,
"grad_norm": 0.06954754889011383,
"learning_rate": 1.2711397869030611e-06,
"loss": 0.1036,
"step": 196
},
{
"epoch": 1.72,
"grad_norm": 0.07122325897216797,
"learning_rate": 1.1942054092926314e-06,
"loss": 0.1079,
"step": 197
},
{
"epoch": 1.73,
"grad_norm": 0.07534755021333694,
"learning_rate": 1.1195556253847938e-06,
"loss": 0.1072,
"step": 198
},
{
"epoch": 1.74,
"grad_norm": 0.08263551443815231,
"learning_rate": 1.0472055198712847e-06,
"loss": 0.1054,
"step": 199
},
{
"epoch": 1.75,
"grad_norm": 0.07160968333482742,
"learning_rate": 9.771697127413562e-07,
"loss": 0.1072,
"step": 200
},
{
"epoch": 1.76,
"grad_norm": 0.09845629334449768,
"learning_rate": 9.094623563274826e-07,
"loss": 0.1167,
"step": 201
},
{
"epoch": 1.77,
"grad_norm": 0.0846390426158905,
"learning_rate": 8.440971324455538e-07,
"loss": 0.1083,
"step": 202
},
{
"epoch": 1.78,
"grad_norm": 0.06978083401918411,
"learning_rate": 7.810872496301524e-07,
"loss": 0.1038,
"step": 203
},
{
"epoch": 1.79,
"grad_norm": 0.07085734605789185,
"learning_rate": 7.204454404654712e-07,
"loss": 0.1058,
"step": 204
},
{
"epoch": 1.79,
"grad_norm": 0.14568722248077393,
"learning_rate": 6.621839590124038e-07,
"loss": 0.1065,
"step": 205
},
{
"epoch": 1.8,
"grad_norm": 0.08834976702928543,
"learning_rate": 6.063145783323232e-07,
"loss": 0.111,
"step": 206
},
{
"epoch": 1.81,
"grad_norm": 0.07381124049425125,
"learning_rate": 5.528485881080792e-07,
"loss": 0.1003,
"step": 207
},
{
"epoch": 1.82,
"grad_norm": 0.0735548734664917,
"learning_rate": 5.017967923626507e-07,
"loss": 0.104,
"step": 208
},
{
"epoch": 1.83,
"grad_norm": 0.08148917555809021,
"learning_rate": 4.5316950727595093e-07,
"loss": 0.1008,
"step": 209
},
{
"epoch": 1.84,
"grad_norm": 0.07719023525714874,
"learning_rate": 4.0697655910021116e-07,
"loss": 0.1036,
"step": 210
},
{
"epoch": 1.85,
"grad_norm": 0.07759005576372147,
"learning_rate": 3.6322728217434986e-07,
"loss": 0.1045,
"step": 211
},
{
"epoch": 1.86,
"grad_norm": 0.06520310044288635,
"learning_rate": 3.2193051703777316e-07,
"loss": 0.1024,
"step": 212
},
{
"epoch": 1.86,
"grad_norm": 0.10676794499158859,
"learning_rate": 2.8309460864393013e-07,
"loss": 0.1035,
"step": 213
},
{
"epoch": 1.87,
"grad_norm": 0.09270460903644562,
"learning_rate": 2.467274046740256e-07,
"loss": 0.1053,
"step": 214
},
{
"epoch": 1.88,
"grad_norm": 0.07552586495876312,
"learning_rate": 2.128362539512277e-07,
"loss": 0.1009,
"step": 215
},
{
"epoch": 1.89,
"grad_norm": 0.09857797622680664,
"learning_rate": 1.814280049556555e-07,
"loss": 0.1067,
"step": 216
},
{
"epoch": 1.9,
"grad_norm": 0.07935313880443573,
"learning_rate": 1.5250900444049866e-07,
"loss": 0.1059,
"step": 217
},
{
"epoch": 1.91,
"grad_norm": 0.07399848848581314,
"learning_rate": 1.2608509614950305e-07,
"loss": 0.105,
"step": 218
},
{
"epoch": 1.92,
"grad_norm": 0.07307755947113037,
"learning_rate": 1.0216161963611942e-07,
"loss": 0.1027,
"step": 219
},
{
"epoch": 1.93,
"grad_norm": 0.08035743981599808,
"learning_rate": 8.074340918451245e-08,
"loss": 0.1113,
"step": 220
},
{
"epoch": 1.93,
"grad_norm": 0.08408898860216141,
"learning_rate": 6.183479283269344e-08,
"loss": 0.1037,
"step": 221
},
{
"epoch": 1.94,
"grad_norm": 0.07623443007469177,
"learning_rate": 4.5439591497933864e-08,
"loss": 0.106,
"step": 222
},
{
"epoch": 1.95,
"grad_norm": 0.07881855964660645,
"learning_rate": 3.156111820466906e-08,
"loss": 0.105,
"step": 223
},
{
"epoch": 1.96,
"grad_norm": 0.08038265258073807,
"learning_rate": 2.0202177415021284e-08,
"loss": 0.0999,
"step": 224
},
{
"epoch": 1.97,
"grad_norm": 0.09288769960403442,
"learning_rate": 1.1365064462093289e-08,
"loss": 0.1055,
"step": 225
},
{
"epoch": 1.98,
"grad_norm": 0.1022091954946518,
"learning_rate": 5.05156508614768e-09,
"loss": 0.1107,
"step": 226
},
{
"epoch": 1.99,
"grad_norm": 0.07995381206274033,
"learning_rate": 1.2629550737580653e-09,
"loss": 0.1045,
"step": 227
},
{
"epoch": 2.0,
"grad_norm": 0.06946219503879547,
"learning_rate": 0.0,
"loss": 0.1051,
"step": 228
},
{
"epoch": 2.0,
"step": 228,
"total_flos": 3.0718940538740933e+19,
"train_loss": 0.12017694063353956,
"train_runtime": 82043.4797,
"train_samples_per_second": 4.278,
"train_steps_per_second": 0.003
}
],
"logging_steps": 1.0,
"max_steps": 228,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"total_flos": 3.0718940538740933e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}