GaetanMichelet's picture
Model save
6253f58 verified
raw
history blame
No virus
44.5 kB
{
"best_metric": 0.7140286564826965,
"best_model_checkpoint": "data/Llama-31-8B_task-2_180-samples_config-3/checkpoint-323",
"epoch": 26.0,
"eval_steps": 500,
"global_step": 442,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.058823529411764705,
"grad_norm": 0.7734732031822205,
"learning_rate": 3.9215686274509804e-08,
"loss": 1.1412,
"step": 1
},
{
"epoch": 0.11764705882352941,
"grad_norm": 0.6782335638999939,
"learning_rate": 7.843137254901961e-08,
"loss": 1.1733,
"step": 2
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.8408952951431274,
"learning_rate": 1.5686274509803921e-07,
"loss": 1.1666,
"step": 4
},
{
"epoch": 0.35294117647058826,
"grad_norm": 0.7287867069244385,
"learning_rate": 2.3529411764705883e-07,
"loss": 1.1801,
"step": 6
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.7565770745277405,
"learning_rate": 3.1372549019607843e-07,
"loss": 1.1116,
"step": 8
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.6836811304092407,
"learning_rate": 3.921568627450981e-07,
"loss": 1.1475,
"step": 10
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.6788910627365112,
"learning_rate": 4.7058823529411767e-07,
"loss": 1.0752,
"step": 12
},
{
"epoch": 0.8235294117647058,
"grad_norm": 0.6463932991027832,
"learning_rate": 5.490196078431373e-07,
"loss": 1.1428,
"step": 14
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.7503110766410828,
"learning_rate": 6.274509803921569e-07,
"loss": 1.0365,
"step": 16
},
{
"epoch": 1.0,
"eval_loss": 1.131649374961853,
"eval_runtime": 37.0173,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 17
},
{
"epoch": 1.0588235294117647,
"grad_norm": 0.7130016088485718,
"learning_rate": 7.058823529411766e-07,
"loss": 1.1439,
"step": 18
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.69338458776474,
"learning_rate": 7.843137254901962e-07,
"loss": 1.1374,
"step": 20
},
{
"epoch": 1.2941176470588236,
"grad_norm": 0.6517343521118164,
"learning_rate": 8.627450980392157e-07,
"loss": 1.1213,
"step": 22
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.685614287853241,
"learning_rate": 9.411764705882353e-07,
"loss": 1.1182,
"step": 24
},
{
"epoch": 1.5294117647058822,
"grad_norm": 0.6998293995857239,
"learning_rate": 1.019607843137255e-06,
"loss": 1.0508,
"step": 26
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.6757376194000244,
"learning_rate": 1.0980392156862745e-06,
"loss": 1.0773,
"step": 28
},
{
"epoch": 1.7647058823529411,
"grad_norm": 0.7003068327903748,
"learning_rate": 1.1764705882352942e-06,
"loss": 1.1765,
"step": 30
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.75652676820755,
"learning_rate": 1.2549019607843137e-06,
"loss": 1.0897,
"step": 32
},
{
"epoch": 2.0,
"grad_norm": 0.8910195231437683,
"learning_rate": 1.3333333333333334e-06,
"loss": 1.1746,
"step": 34
},
{
"epoch": 2.0,
"eval_loss": 1.119631290435791,
"eval_runtime": 37.0058,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 34
},
{
"epoch": 2.1176470588235294,
"grad_norm": 0.6963354349136353,
"learning_rate": 1.4117647058823531e-06,
"loss": 1.0914,
"step": 36
},
{
"epoch": 2.235294117647059,
"grad_norm": 0.6639174818992615,
"learning_rate": 1.4901960784313726e-06,
"loss": 1.1038,
"step": 38
},
{
"epoch": 2.3529411764705883,
"grad_norm": 0.7396222949028015,
"learning_rate": 1.5686274509803923e-06,
"loss": 1.0596,
"step": 40
},
{
"epoch": 2.4705882352941178,
"grad_norm": 0.7213342189788818,
"learning_rate": 1.6470588235294118e-06,
"loss": 1.0962,
"step": 42
},
{
"epoch": 2.588235294117647,
"grad_norm": 0.7507779002189636,
"learning_rate": 1.7254901960784315e-06,
"loss": 1.0945,
"step": 44
},
{
"epoch": 2.7058823529411766,
"grad_norm": 0.803626298904419,
"learning_rate": 1.8039215686274512e-06,
"loss": 1.1417,
"step": 46
},
{
"epoch": 2.8235294117647056,
"grad_norm": 0.8166558742523193,
"learning_rate": 1.8823529411764707e-06,
"loss": 1.1391,
"step": 48
},
{
"epoch": 2.9411764705882355,
"grad_norm": 0.7504469752311707,
"learning_rate": 1.96078431372549e-06,
"loss": 1.0933,
"step": 50
},
{
"epoch": 3.0,
"eval_loss": 1.0957257747650146,
"eval_runtime": 37.0081,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 51
},
{
"epoch": 3.0588235294117645,
"grad_norm": 0.7341092228889465,
"learning_rate": 2.03921568627451e-06,
"loss": 1.0645,
"step": 52
},
{
"epoch": 3.176470588235294,
"grad_norm": 0.7262235283851624,
"learning_rate": 2.1176470588235296e-06,
"loss": 1.1047,
"step": 54
},
{
"epoch": 3.2941176470588234,
"grad_norm": 0.6933767795562744,
"learning_rate": 2.196078431372549e-06,
"loss": 1.0305,
"step": 56
},
{
"epoch": 3.411764705882353,
"grad_norm": 0.8481059670448303,
"learning_rate": 2.274509803921569e-06,
"loss": 1.1403,
"step": 58
},
{
"epoch": 3.5294117647058822,
"grad_norm": 0.7597863078117371,
"learning_rate": 2.3529411764705885e-06,
"loss": 1.0537,
"step": 60
},
{
"epoch": 3.6470588235294117,
"grad_norm": 0.7542691826820374,
"learning_rate": 2.431372549019608e-06,
"loss": 1.0815,
"step": 62
},
{
"epoch": 3.764705882352941,
"grad_norm": 0.7457835674285889,
"learning_rate": 2.5098039215686274e-06,
"loss": 1.0747,
"step": 64
},
{
"epoch": 3.8823529411764706,
"grad_norm": 0.6987655758857727,
"learning_rate": 2.5882352941176473e-06,
"loss": 1.1015,
"step": 66
},
{
"epoch": 4.0,
"grad_norm": 0.7653221487998962,
"learning_rate": 2.666666666666667e-06,
"loss": 0.985,
"step": 68
},
{
"epoch": 4.0,
"eval_loss": 1.0540263652801514,
"eval_runtime": 37.0036,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 68
},
{
"epoch": 4.117647058823529,
"grad_norm": 0.6138588190078735,
"learning_rate": 2.7450980392156867e-06,
"loss": 1.0818,
"step": 70
},
{
"epoch": 4.235294117647059,
"grad_norm": 0.7161767482757568,
"learning_rate": 2.8235294117647062e-06,
"loss": 1.0233,
"step": 72
},
{
"epoch": 4.352941176470588,
"grad_norm": 0.5691388845443726,
"learning_rate": 2.901960784313726e-06,
"loss": 1.0152,
"step": 74
},
{
"epoch": 4.470588235294118,
"grad_norm": 0.6562293171882629,
"learning_rate": 2.980392156862745e-06,
"loss": 1.0299,
"step": 76
},
{
"epoch": 4.588235294117647,
"grad_norm": 0.6522495150566101,
"learning_rate": 3.058823529411765e-06,
"loss": 1.0341,
"step": 78
},
{
"epoch": 4.705882352941177,
"grad_norm": 0.6249567270278931,
"learning_rate": 3.1372549019607846e-06,
"loss": 0.9573,
"step": 80
},
{
"epoch": 4.823529411764706,
"grad_norm": 0.6513485312461853,
"learning_rate": 3.2156862745098045e-06,
"loss": 1.0373,
"step": 82
},
{
"epoch": 4.9411764705882355,
"grad_norm": 0.6425153613090515,
"learning_rate": 3.2941176470588236e-06,
"loss": 0.9741,
"step": 84
},
{
"epoch": 5.0,
"eval_loss": 0.99503093957901,
"eval_runtime": 37.0007,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 85
},
{
"epoch": 5.0588235294117645,
"grad_norm": 0.645844042301178,
"learning_rate": 3.3725490196078435e-06,
"loss": 0.9862,
"step": 86
},
{
"epoch": 5.176470588235294,
"grad_norm": 0.6497894525527954,
"learning_rate": 3.450980392156863e-06,
"loss": 0.9873,
"step": 88
},
{
"epoch": 5.294117647058823,
"grad_norm": 0.5283099412918091,
"learning_rate": 3.529411764705883e-06,
"loss": 0.9488,
"step": 90
},
{
"epoch": 5.411764705882353,
"grad_norm": 0.4905494451522827,
"learning_rate": 3.6078431372549024e-06,
"loss": 0.9606,
"step": 92
},
{
"epoch": 5.529411764705882,
"grad_norm": 0.5407238602638245,
"learning_rate": 3.6862745098039223e-06,
"loss": 0.888,
"step": 94
},
{
"epoch": 5.647058823529412,
"grad_norm": 0.5735270977020264,
"learning_rate": 3.7647058823529414e-06,
"loss": 0.916,
"step": 96
},
{
"epoch": 5.764705882352941,
"grad_norm": 0.5201605558395386,
"learning_rate": 3.843137254901962e-06,
"loss": 0.982,
"step": 98
},
{
"epoch": 5.882352941176471,
"grad_norm": 0.4470115303993225,
"learning_rate": 3.92156862745098e-06,
"loss": 0.9389,
"step": 100
},
{
"epoch": 6.0,
"grad_norm": 0.3844221532344818,
"learning_rate": 4.000000000000001e-06,
"loss": 1.0008,
"step": 102
},
{
"epoch": 6.0,
"eval_loss": 0.937701940536499,
"eval_runtime": 37.0036,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 102
},
{
"epoch": 6.117647058823529,
"grad_norm": 0.4652542173862457,
"learning_rate": 4.07843137254902e-06,
"loss": 0.915,
"step": 104
},
{
"epoch": 6.235294117647059,
"grad_norm": 0.4144337773323059,
"learning_rate": 4.15686274509804e-06,
"loss": 0.8833,
"step": 106
},
{
"epoch": 6.352941176470588,
"grad_norm": 0.4218851327896118,
"learning_rate": 4.235294117647059e-06,
"loss": 0.8672,
"step": 108
},
{
"epoch": 6.470588235294118,
"grad_norm": 0.4070567786693573,
"learning_rate": 4.313725490196079e-06,
"loss": 0.9036,
"step": 110
},
{
"epoch": 6.588235294117647,
"grad_norm": 0.36765947937965393,
"learning_rate": 4.392156862745098e-06,
"loss": 0.9384,
"step": 112
},
{
"epoch": 6.705882352941177,
"grad_norm": 0.3613109886646271,
"learning_rate": 4.4705882352941184e-06,
"loss": 0.904,
"step": 114
},
{
"epoch": 6.823529411764706,
"grad_norm": 0.36222243309020996,
"learning_rate": 4.549019607843138e-06,
"loss": 0.8641,
"step": 116
},
{
"epoch": 6.9411764705882355,
"grad_norm": 0.42111915349960327,
"learning_rate": 4.627450980392157e-06,
"loss": 0.8935,
"step": 118
},
{
"epoch": 7.0,
"eval_loss": 0.8938563466072083,
"eval_runtime": 37.0541,
"eval_samples_per_second": 0.972,
"eval_steps_per_second": 0.972,
"step": 119
},
{
"epoch": 7.0588235294117645,
"grad_norm": 0.35050034523010254,
"learning_rate": 4.705882352941177e-06,
"loss": 0.9626,
"step": 120
},
{
"epoch": 7.176470588235294,
"grad_norm": 0.37680429220199585,
"learning_rate": 4.784313725490196e-06,
"loss": 0.8818,
"step": 122
},
{
"epoch": 7.294117647058823,
"grad_norm": 0.3310965895652771,
"learning_rate": 4.862745098039216e-06,
"loss": 0.8631,
"step": 124
},
{
"epoch": 7.411764705882353,
"grad_norm": 0.3843957781791687,
"learning_rate": 4.941176470588236e-06,
"loss": 0.8506,
"step": 126
},
{
"epoch": 7.529411764705882,
"grad_norm": 0.3577839732170105,
"learning_rate": 5.019607843137255e-06,
"loss": 0.841,
"step": 128
},
{
"epoch": 7.647058823529412,
"grad_norm": 0.3117690682411194,
"learning_rate": 5.098039215686274e-06,
"loss": 0.8576,
"step": 130
},
{
"epoch": 7.764705882352941,
"grad_norm": 0.37441667914390564,
"learning_rate": 5.176470588235295e-06,
"loss": 0.8127,
"step": 132
},
{
"epoch": 7.882352941176471,
"grad_norm": 0.3178256154060364,
"learning_rate": 5.254901960784314e-06,
"loss": 0.8554,
"step": 134
},
{
"epoch": 8.0,
"grad_norm": 0.3216207027435303,
"learning_rate": 5.333333333333334e-06,
"loss": 0.8862,
"step": 136
},
{
"epoch": 8.0,
"eval_loss": 0.8579282760620117,
"eval_runtime": 37.0044,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 136
},
{
"epoch": 8.117647058823529,
"grad_norm": 0.341949462890625,
"learning_rate": 5.411764705882353e-06,
"loss": 0.8519,
"step": 138
},
{
"epoch": 8.235294117647058,
"grad_norm": 0.3150944113731384,
"learning_rate": 5.4901960784313735e-06,
"loss": 0.8338,
"step": 140
},
{
"epoch": 8.352941176470589,
"grad_norm": 0.3646303713321686,
"learning_rate": 5.568627450980393e-06,
"loss": 0.8381,
"step": 142
},
{
"epoch": 8.470588235294118,
"grad_norm": 0.334943950176239,
"learning_rate": 5.6470588235294125e-06,
"loss": 0.879,
"step": 144
},
{
"epoch": 8.588235294117647,
"grad_norm": 0.3029642403125763,
"learning_rate": 5.725490196078431e-06,
"loss": 0.7807,
"step": 146
},
{
"epoch": 8.705882352941176,
"grad_norm": 0.30906420946121216,
"learning_rate": 5.803921568627452e-06,
"loss": 0.7946,
"step": 148
},
{
"epoch": 8.823529411764707,
"grad_norm": 0.34527918696403503,
"learning_rate": 5.882352941176471e-06,
"loss": 0.8397,
"step": 150
},
{
"epoch": 8.941176470588236,
"grad_norm": 0.3913155794143677,
"learning_rate": 5.96078431372549e-06,
"loss": 0.8266,
"step": 152
},
{
"epoch": 9.0,
"eval_loss": 0.8293797969818115,
"eval_runtime": 37.0023,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 153
},
{
"epoch": 9.058823529411764,
"grad_norm": 0.3411412835121155,
"learning_rate": 6.03921568627451e-06,
"loss": 0.8249,
"step": 154
},
{
"epoch": 9.176470588235293,
"grad_norm": 0.28964897990226746,
"learning_rate": 6.11764705882353e-06,
"loss": 0.7887,
"step": 156
},
{
"epoch": 9.294117647058824,
"grad_norm": 0.36540400981903076,
"learning_rate": 6.19607843137255e-06,
"loss": 0.7434,
"step": 158
},
{
"epoch": 9.411764705882353,
"grad_norm": 0.3152979016304016,
"learning_rate": 6.274509803921569e-06,
"loss": 0.7864,
"step": 160
},
{
"epoch": 9.529411764705882,
"grad_norm": 0.28152012825012207,
"learning_rate": 6.352941176470589e-06,
"loss": 0.777,
"step": 162
},
{
"epoch": 9.647058823529411,
"grad_norm": 0.37814223766326904,
"learning_rate": 6.431372549019609e-06,
"loss": 0.8319,
"step": 164
},
{
"epoch": 9.764705882352942,
"grad_norm": 0.3085719645023346,
"learning_rate": 6.5098039215686285e-06,
"loss": 0.7986,
"step": 166
},
{
"epoch": 9.882352941176471,
"grad_norm": 0.29185226559638977,
"learning_rate": 6.588235294117647e-06,
"loss": 0.8292,
"step": 168
},
{
"epoch": 10.0,
"grad_norm": 0.30540931224823,
"learning_rate": 6.666666666666667e-06,
"loss": 0.7797,
"step": 170
},
{
"epoch": 10.0,
"eval_loss": 0.807489275932312,
"eval_runtime": 37.0049,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 170
},
{
"epoch": 10.117647058823529,
"grad_norm": 0.3091186285018921,
"learning_rate": 6.745098039215687e-06,
"loss": 0.7765,
"step": 172
},
{
"epoch": 10.235294117647058,
"grad_norm": 0.30727335810661316,
"learning_rate": 6.8235294117647065e-06,
"loss": 0.7505,
"step": 174
},
{
"epoch": 10.352941176470589,
"grad_norm": 0.2999902665615082,
"learning_rate": 6.901960784313726e-06,
"loss": 0.7233,
"step": 176
},
{
"epoch": 10.470588235294118,
"grad_norm": 0.2925303876399994,
"learning_rate": 6.9803921568627454e-06,
"loss": 0.7613,
"step": 178
},
{
"epoch": 10.588235294117647,
"grad_norm": 0.28137609362602234,
"learning_rate": 7.058823529411766e-06,
"loss": 0.7336,
"step": 180
},
{
"epoch": 10.705882352941176,
"grad_norm": 0.3438097834587097,
"learning_rate": 7.137254901960785e-06,
"loss": 0.822,
"step": 182
},
{
"epoch": 10.823529411764707,
"grad_norm": 0.3309740722179413,
"learning_rate": 7.215686274509805e-06,
"loss": 0.8488,
"step": 184
},
{
"epoch": 10.941176470588236,
"grad_norm": 0.33822691440582275,
"learning_rate": 7.294117647058823e-06,
"loss": 0.8158,
"step": 186
},
{
"epoch": 11.0,
"eval_loss": 0.790329098701477,
"eval_runtime": 37.0086,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 187
},
{
"epoch": 11.058823529411764,
"grad_norm": 0.3196800649166107,
"learning_rate": 7.372549019607845e-06,
"loss": 0.7996,
"step": 188
},
{
"epoch": 11.176470588235293,
"grad_norm": 0.2967463731765747,
"learning_rate": 7.450980392156863e-06,
"loss": 0.7102,
"step": 190
},
{
"epoch": 11.294117647058824,
"grad_norm": 0.32051464915275574,
"learning_rate": 7.529411764705883e-06,
"loss": 0.8008,
"step": 192
},
{
"epoch": 11.411764705882353,
"grad_norm": 0.399475634098053,
"learning_rate": 7.607843137254902e-06,
"loss": 0.8271,
"step": 194
},
{
"epoch": 11.529411764705882,
"grad_norm": 0.3100043535232544,
"learning_rate": 7.686274509803923e-06,
"loss": 0.7954,
"step": 196
},
{
"epoch": 11.647058823529411,
"grad_norm": 0.31522953510284424,
"learning_rate": 7.764705882352941e-06,
"loss": 0.712,
"step": 198
},
{
"epoch": 11.764705882352942,
"grad_norm": 0.3450244665145874,
"learning_rate": 7.84313725490196e-06,
"loss": 0.7329,
"step": 200
},
{
"epoch": 11.882352941176471,
"grad_norm": 0.3267311453819275,
"learning_rate": 7.92156862745098e-06,
"loss": 0.7275,
"step": 202
},
{
"epoch": 12.0,
"grad_norm": 0.42111936211586,
"learning_rate": 8.000000000000001e-06,
"loss": 0.6845,
"step": 204
},
{
"epoch": 12.0,
"eval_loss": 0.7741987705230713,
"eval_runtime": 37.0028,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 204
},
{
"epoch": 12.117647058823529,
"grad_norm": 0.33904674649238586,
"learning_rate": 8.07843137254902e-06,
"loss": 0.7635,
"step": 206
},
{
"epoch": 12.235294117647058,
"grad_norm": 0.33812686800956726,
"learning_rate": 8.15686274509804e-06,
"loss": 0.8239,
"step": 208
},
{
"epoch": 12.352941176470589,
"grad_norm": 0.34535664319992065,
"learning_rate": 8.23529411764706e-06,
"loss": 0.7084,
"step": 210
},
{
"epoch": 12.470588235294118,
"grad_norm": 0.330837219953537,
"learning_rate": 8.31372549019608e-06,
"loss": 0.6769,
"step": 212
},
{
"epoch": 12.588235294117647,
"grad_norm": 0.31368908286094666,
"learning_rate": 8.392156862745099e-06,
"loss": 0.7651,
"step": 214
},
{
"epoch": 12.705882352941176,
"grad_norm": 0.35061123967170715,
"learning_rate": 8.470588235294118e-06,
"loss": 0.7452,
"step": 216
},
{
"epoch": 12.823529411764707,
"grad_norm": 0.32111451029777527,
"learning_rate": 8.549019607843138e-06,
"loss": 0.718,
"step": 218
},
{
"epoch": 12.941176470588236,
"grad_norm": 0.3150181174278259,
"learning_rate": 8.627450980392157e-06,
"loss": 0.6819,
"step": 220
},
{
"epoch": 13.0,
"eval_loss": 0.7598351240158081,
"eval_runtime": 37.0068,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 221
},
{
"epoch": 13.058823529411764,
"grad_norm": 0.3078894019126892,
"learning_rate": 8.705882352941177e-06,
"loss": 0.7151,
"step": 222
},
{
"epoch": 13.176470588235293,
"grad_norm": 0.3273066282272339,
"learning_rate": 8.784313725490196e-06,
"loss": 0.6766,
"step": 224
},
{
"epoch": 13.294117647058824,
"grad_norm": 0.30240604281425476,
"learning_rate": 8.862745098039216e-06,
"loss": 0.665,
"step": 226
},
{
"epoch": 13.411764705882353,
"grad_norm": 0.3365361988544464,
"learning_rate": 8.941176470588237e-06,
"loss": 0.73,
"step": 228
},
{
"epoch": 13.529411764705882,
"grad_norm": 0.3776240646839142,
"learning_rate": 9.019607843137256e-06,
"loss": 0.7319,
"step": 230
},
{
"epoch": 13.647058823529411,
"grad_norm": 0.32826074957847595,
"learning_rate": 9.098039215686276e-06,
"loss": 0.7021,
"step": 232
},
{
"epoch": 13.764705882352942,
"grad_norm": 0.3192596435546875,
"learning_rate": 9.176470588235294e-06,
"loss": 0.7333,
"step": 234
},
{
"epoch": 13.882352941176471,
"grad_norm": 0.36111271381378174,
"learning_rate": 9.254901960784315e-06,
"loss": 0.7633,
"step": 236
},
{
"epoch": 14.0,
"grad_norm": 0.3694405257701874,
"learning_rate": 9.333333333333334e-06,
"loss": 0.7241,
"step": 238
},
{
"epoch": 14.0,
"eval_loss": 0.7471711039543152,
"eval_runtime": 37.0047,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 238
},
{
"epoch": 14.117647058823529,
"grad_norm": 0.3688935339450836,
"learning_rate": 9.411764705882354e-06,
"loss": 0.742,
"step": 240
},
{
"epoch": 14.235294117647058,
"grad_norm": 0.41638877987861633,
"learning_rate": 9.490196078431373e-06,
"loss": 0.7049,
"step": 242
},
{
"epoch": 14.352941176470589,
"grad_norm": 0.3489099442958832,
"learning_rate": 9.568627450980393e-06,
"loss": 0.6517,
"step": 244
},
{
"epoch": 14.470588235294118,
"grad_norm": 0.3671415448188782,
"learning_rate": 9.647058823529412e-06,
"loss": 0.7233,
"step": 246
},
{
"epoch": 14.588235294117647,
"grad_norm": 0.37552204728126526,
"learning_rate": 9.725490196078432e-06,
"loss": 0.701,
"step": 248
},
{
"epoch": 14.705882352941176,
"grad_norm": 0.43261757493019104,
"learning_rate": 9.803921568627451e-06,
"loss": 0.6979,
"step": 250
},
{
"epoch": 14.823529411764707,
"grad_norm": 0.37599849700927734,
"learning_rate": 9.882352941176472e-06,
"loss": 0.6462,
"step": 252
},
{
"epoch": 14.941176470588236,
"grad_norm": 0.3583243191242218,
"learning_rate": 9.960784313725492e-06,
"loss": 0.695,
"step": 254
},
{
"epoch": 15.0,
"eval_loss": 0.7365108132362366,
"eval_runtime": 37.003,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 255
},
{
"epoch": 15.058823529411764,
"grad_norm": 0.4206070005893707,
"learning_rate": 9.999995315380667e-06,
"loss": 0.6862,
"step": 256
},
{
"epoch": 15.176470588235293,
"grad_norm": 0.47259286046028137,
"learning_rate": 9.99995783847866e-06,
"loss": 0.6933,
"step": 258
},
{
"epoch": 15.294117647058824,
"grad_norm": 0.3962372839450836,
"learning_rate": 9.999882884955554e-06,
"loss": 0.6643,
"step": 260
},
{
"epoch": 15.411764705882353,
"grad_norm": 0.37228551506996155,
"learning_rate": 9.99977045537315e-06,
"loss": 0.6676,
"step": 262
},
{
"epoch": 15.529411764705882,
"grad_norm": 0.4293304979801178,
"learning_rate": 9.999620550574155e-06,
"loss": 0.6497,
"step": 264
},
{
"epoch": 15.647058823529411,
"grad_norm": 0.4316819906234741,
"learning_rate": 9.999433171682158e-06,
"loss": 0.6687,
"step": 266
},
{
"epoch": 15.764705882352942,
"grad_norm": 0.4474015533924103,
"learning_rate": 9.999208320101643e-06,
"loss": 0.6935,
"step": 268
},
{
"epoch": 15.882352941176471,
"grad_norm": 0.4514879584312439,
"learning_rate": 9.998945997517957e-06,
"loss": 0.6542,
"step": 270
},
{
"epoch": 16.0,
"grad_norm": 0.5015867948532104,
"learning_rate": 9.99864620589731e-06,
"loss": 0.6982,
"step": 272
},
{
"epoch": 16.0,
"eval_loss": 0.7271888256072998,
"eval_runtime": 37.0158,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 272
},
{
"epoch": 16.11764705882353,
"grad_norm": 0.5128858685493469,
"learning_rate": 9.998308947486753e-06,
"loss": 0.6414,
"step": 274
},
{
"epoch": 16.235294117647058,
"grad_norm": 0.40119990706443787,
"learning_rate": 9.997934224814173e-06,
"loss": 0.628,
"step": 276
},
{
"epoch": 16.352941176470587,
"grad_norm": 0.4133683145046234,
"learning_rate": 9.997522040688258e-06,
"loss": 0.7072,
"step": 278
},
{
"epoch": 16.470588235294116,
"grad_norm": 0.41614237427711487,
"learning_rate": 9.997072398198492e-06,
"loss": 0.6526,
"step": 280
},
{
"epoch": 16.58823529411765,
"grad_norm": 0.4795832931995392,
"learning_rate": 9.996585300715117e-06,
"loss": 0.6583,
"step": 282
},
{
"epoch": 16.705882352941178,
"grad_norm": 0.4348108470439911,
"learning_rate": 9.996060751889114e-06,
"loss": 0.6728,
"step": 284
},
{
"epoch": 16.823529411764707,
"grad_norm": 0.4476607143878937,
"learning_rate": 9.995498755652186e-06,
"loss": 0.626,
"step": 286
},
{
"epoch": 16.941176470588236,
"grad_norm": 0.4528913199901581,
"learning_rate": 9.994899316216709e-06,
"loss": 0.622,
"step": 288
},
{
"epoch": 17.0,
"eval_loss": 0.7215262055397034,
"eval_runtime": 36.9993,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 289
},
{
"epoch": 17.058823529411764,
"grad_norm": 0.5009466409683228,
"learning_rate": 9.994262438075713e-06,
"loss": 0.7081,
"step": 290
},
{
"epoch": 17.176470588235293,
"grad_norm": 0.475374698638916,
"learning_rate": 9.993588126002848e-06,
"loss": 0.6229,
"step": 292
},
{
"epoch": 17.294117647058822,
"grad_norm": 0.4642236828804016,
"learning_rate": 9.992876385052346e-06,
"loss": 0.5844,
"step": 294
},
{
"epoch": 17.41176470588235,
"grad_norm": 0.4536580741405487,
"learning_rate": 9.992127220558976e-06,
"loss": 0.6135,
"step": 296
},
{
"epoch": 17.529411764705884,
"grad_norm": 0.45987674593925476,
"learning_rate": 9.991340638138022e-06,
"loss": 0.6511,
"step": 298
},
{
"epoch": 17.647058823529413,
"grad_norm": 0.4978199899196625,
"learning_rate": 9.990516643685222e-06,
"loss": 0.6424,
"step": 300
},
{
"epoch": 17.764705882352942,
"grad_norm": 0.49036839604377747,
"learning_rate": 9.98965524337673e-06,
"loss": 0.6773,
"step": 302
},
{
"epoch": 17.88235294117647,
"grad_norm": 0.5300158858299255,
"learning_rate": 9.988756443669081e-06,
"loss": 0.6364,
"step": 304
},
{
"epoch": 18.0,
"grad_norm": 0.4950679540634155,
"learning_rate": 9.987820251299121e-06,
"loss": 0.5905,
"step": 306
},
{
"epoch": 18.0,
"eval_loss": 0.7155699133872986,
"eval_runtime": 37.0042,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 306
},
{
"epoch": 18.11764705882353,
"grad_norm": 0.5069499611854553,
"learning_rate": 9.98684667328398e-06,
"loss": 0.6123,
"step": 308
},
{
"epoch": 18.235294117647058,
"grad_norm": 0.46550482511520386,
"learning_rate": 9.985835716921e-06,
"loss": 0.6102,
"step": 310
},
{
"epoch": 18.352941176470587,
"grad_norm": 0.7860422730445862,
"learning_rate": 9.984787389787689e-06,
"loss": 0.6273,
"step": 312
},
{
"epoch": 18.470588235294116,
"grad_norm": 0.49825742840766907,
"learning_rate": 9.983701699741668e-06,
"loss": 0.6297,
"step": 314
},
{
"epoch": 18.58823529411765,
"grad_norm": 0.5290573239326477,
"learning_rate": 9.982578654920601e-06,
"loss": 0.6238,
"step": 316
},
{
"epoch": 18.705882352941178,
"grad_norm": 0.5185476541519165,
"learning_rate": 9.981418263742148e-06,
"loss": 0.5702,
"step": 318
},
{
"epoch": 18.823529411764707,
"grad_norm": 0.5360794067382812,
"learning_rate": 9.980220534903889e-06,
"loss": 0.6082,
"step": 320
},
{
"epoch": 18.941176470588236,
"grad_norm": 0.5598728656768799,
"learning_rate": 9.978985477383264e-06,
"loss": 0.6121,
"step": 322
},
{
"epoch": 19.0,
"eval_loss": 0.7140286564826965,
"eval_runtime": 36.9943,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 323
},
{
"epoch": 19.058823529411764,
"grad_norm": 0.5005812048912048,
"learning_rate": 9.97771310043751e-06,
"loss": 0.5964,
"step": 324
},
{
"epoch": 19.176470588235293,
"grad_norm": 0.5243679881095886,
"learning_rate": 9.97640341360358e-06,
"loss": 0.643,
"step": 326
},
{
"epoch": 19.294117647058822,
"grad_norm": 0.5746588706970215,
"learning_rate": 9.975056426698094e-06,
"loss": 0.587,
"step": 328
},
{
"epoch": 19.41176470588235,
"grad_norm": 0.5213414430618286,
"learning_rate": 9.973672149817232e-06,
"loss": 0.5835,
"step": 330
},
{
"epoch": 19.529411764705884,
"grad_norm": 0.6261019706726074,
"learning_rate": 9.972250593336689e-06,
"loss": 0.5884,
"step": 332
},
{
"epoch": 19.647058823529413,
"grad_norm": 0.5905741453170776,
"learning_rate": 9.970791767911581e-06,
"loss": 0.588,
"step": 334
},
{
"epoch": 19.764705882352942,
"grad_norm": 0.5681843757629395,
"learning_rate": 9.96929568447637e-06,
"loss": 0.554,
"step": 336
},
{
"epoch": 19.88235294117647,
"grad_norm": 0.6130782961845398,
"learning_rate": 9.967762354244778e-06,
"loss": 0.6003,
"step": 338
},
{
"epoch": 20.0,
"grad_norm": 0.6039074659347534,
"learning_rate": 9.966191788709716e-06,
"loss": 0.567,
"step": 340
},
{
"epoch": 20.0,
"eval_loss": 0.7166150808334351,
"eval_runtime": 36.9981,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 340
},
{
"epoch": 20.11764705882353,
"grad_norm": 0.661340594291687,
"learning_rate": 9.964583999643174e-06,
"loss": 0.5771,
"step": 342
},
{
"epoch": 20.235294117647058,
"grad_norm": 0.6773268580436707,
"learning_rate": 9.962938999096159e-06,
"loss": 0.5611,
"step": 344
},
{
"epoch": 20.352941176470587,
"grad_norm": 0.6907221674919128,
"learning_rate": 9.961256799398584e-06,
"loss": 0.5961,
"step": 346
},
{
"epoch": 20.470588235294116,
"grad_norm": 0.6799242496490479,
"learning_rate": 9.95953741315919e-06,
"loss": 0.605,
"step": 348
},
{
"epoch": 20.58823529411765,
"grad_norm": 0.6507272720336914,
"learning_rate": 9.957780853265441e-06,
"loss": 0.5564,
"step": 350
},
{
"epoch": 20.705882352941178,
"grad_norm": 0.7043225765228271,
"learning_rate": 9.955987132883435e-06,
"loss": 0.5069,
"step": 352
},
{
"epoch": 20.823529411764707,
"grad_norm": 0.7170704007148743,
"learning_rate": 9.954156265457801e-06,
"loss": 0.5671,
"step": 354
},
{
"epoch": 20.941176470588236,
"grad_norm": 0.6098182797431946,
"learning_rate": 9.952288264711601e-06,
"loss": 0.5471,
"step": 356
},
{
"epoch": 21.0,
"eval_loss": 0.7171775698661804,
"eval_runtime": 36.9995,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 357
},
{
"epoch": 21.058823529411764,
"grad_norm": 0.7199786305427551,
"learning_rate": 9.950383144646221e-06,
"loss": 0.5856,
"step": 358
},
{
"epoch": 21.176470588235293,
"grad_norm": 0.748616635799408,
"learning_rate": 9.948440919541277e-06,
"loss": 0.5347,
"step": 360
},
{
"epoch": 21.294117647058822,
"grad_norm": 0.7035663723945618,
"learning_rate": 9.946461603954499e-06,
"loss": 0.5433,
"step": 362
},
{
"epoch": 21.41176470588235,
"grad_norm": 0.7293503880500793,
"learning_rate": 9.944445212721619e-06,
"loss": 0.5473,
"step": 364
},
{
"epoch": 21.529411764705884,
"grad_norm": 0.7407766580581665,
"learning_rate": 9.942391760956277e-06,
"loss": 0.527,
"step": 366
},
{
"epoch": 21.647058823529413,
"grad_norm": 0.7856358289718628,
"learning_rate": 9.940301264049885e-06,
"loss": 0.5372,
"step": 368
},
{
"epoch": 21.764705882352942,
"grad_norm": 0.7008533477783203,
"learning_rate": 9.938173737671531e-06,
"loss": 0.5375,
"step": 370
},
{
"epoch": 21.88235294117647,
"grad_norm": 0.7586143612861633,
"learning_rate": 9.936009197767847e-06,
"loss": 0.5467,
"step": 372
},
{
"epoch": 22.0,
"grad_norm": 0.8423267602920532,
"learning_rate": 9.933807660562898e-06,
"loss": 0.4761,
"step": 374
},
{
"epoch": 22.0,
"eval_loss": 0.7233606576919556,
"eval_runtime": 37.0054,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 374
},
{
"epoch": 22.11764705882353,
"grad_norm": 0.882168710231781,
"learning_rate": 9.931569142558057e-06,
"loss": 0.562,
"step": 376
},
{
"epoch": 22.235294117647058,
"grad_norm": 0.8616783618927002,
"learning_rate": 9.929293660531889e-06,
"loss": 0.5039,
"step": 378
},
{
"epoch": 22.352941176470587,
"grad_norm": 0.9095832705497742,
"learning_rate": 9.926981231540007e-06,
"loss": 0.4744,
"step": 380
},
{
"epoch": 22.470588235294116,
"grad_norm": 0.8473853468894958,
"learning_rate": 9.924631872914967e-06,
"loss": 0.4976,
"step": 382
},
{
"epoch": 22.58823529411765,
"grad_norm": 0.844835102558136,
"learning_rate": 9.922245602266119e-06,
"loss": 0.4721,
"step": 384
},
{
"epoch": 22.705882352941178,
"grad_norm": 0.820750892162323,
"learning_rate": 9.919822437479488e-06,
"loss": 0.4995,
"step": 386
},
{
"epoch": 22.823529411764707,
"grad_norm": 0.823648989200592,
"learning_rate": 9.91736239671763e-06,
"loss": 0.5061,
"step": 388
},
{
"epoch": 22.941176470588236,
"grad_norm": 0.8103899359703064,
"learning_rate": 9.91486549841951e-06,
"loss": 0.4967,
"step": 390
},
{
"epoch": 23.0,
"eval_loss": 0.7357540726661682,
"eval_runtime": 37.0101,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 391
},
{
"epoch": 23.058823529411764,
"grad_norm": 0.7696477174758911,
"learning_rate": 9.912331761300341e-06,
"loss": 0.5004,
"step": 392
},
{
"epoch": 23.176470588235293,
"grad_norm": 0.7803938984870911,
"learning_rate": 9.909761204351469e-06,
"loss": 0.4903,
"step": 394
},
{
"epoch": 23.294117647058822,
"grad_norm": 0.9065766930580139,
"learning_rate": 9.90715384684021e-06,
"loss": 0.505,
"step": 396
},
{
"epoch": 23.41176470588235,
"grad_norm": 1.1152830123901367,
"learning_rate": 9.904509708309723e-06,
"loss": 0.4312,
"step": 398
},
{
"epoch": 23.529411764705884,
"grad_norm": 1.0776093006134033,
"learning_rate": 9.901828808578846e-06,
"loss": 0.4503,
"step": 400
},
{
"epoch": 23.647058823529413,
"grad_norm": 1.0322400331497192,
"learning_rate": 9.899111167741966e-06,
"loss": 0.4685,
"step": 402
},
{
"epoch": 23.764705882352942,
"grad_norm": 0.9414217472076416,
"learning_rate": 9.896356806168851e-06,
"loss": 0.4757,
"step": 404
},
{
"epoch": 23.88235294117647,
"grad_norm": 1.1830779314041138,
"learning_rate": 9.89356574450451e-06,
"loss": 0.4408,
"step": 406
},
{
"epoch": 24.0,
"grad_norm": 0.9795882105827332,
"learning_rate": 9.890738003669029e-06,
"loss": 0.4833,
"step": 408
},
{
"epoch": 24.0,
"eval_loss": 0.7643568515777588,
"eval_runtime": 37.0004,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 408
},
{
"epoch": 24.11764705882353,
"grad_norm": 1.2952356338500977,
"learning_rate": 9.887873604857424e-06,
"loss": 0.4299,
"step": 410
},
{
"epoch": 24.235294117647058,
"grad_norm": 1.2539509534835815,
"learning_rate": 9.884972569539471e-06,
"loss": 0.4488,
"step": 412
},
{
"epoch": 24.352941176470587,
"grad_norm": 1.0663094520568848,
"learning_rate": 9.882034919459556e-06,
"loss": 0.4152,
"step": 414
},
{
"epoch": 24.470588235294116,
"grad_norm": 1.03883957862854,
"learning_rate": 9.879060676636502e-06,
"loss": 0.4435,
"step": 416
},
{
"epoch": 24.58823529411765,
"grad_norm": 0.9475598931312561,
"learning_rate": 9.876049863363415e-06,
"loss": 0.4408,
"step": 418
},
{
"epoch": 24.705882352941178,
"grad_norm": 0.9946249723434448,
"learning_rate": 9.873002502207502e-06,
"loss": 0.4292,
"step": 420
},
{
"epoch": 24.823529411764707,
"grad_norm": 1.135332465171814,
"learning_rate": 9.86991861600992e-06,
"loss": 0.4057,
"step": 422
},
{
"epoch": 24.941176470588236,
"grad_norm": 1.7003220319747925,
"learning_rate": 9.866798227885588e-06,
"loss": 0.4071,
"step": 424
},
{
"epoch": 25.0,
"eval_loss": 0.8012258410453796,
"eval_runtime": 37.0059,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 425
},
{
"epoch": 25.058823529411764,
"grad_norm": 1.124939203262329,
"learning_rate": 9.863641361223025e-06,
"loss": 0.3795,
"step": 426
},
{
"epoch": 25.176470588235293,
"grad_norm": 1.2946455478668213,
"learning_rate": 9.860448039684169e-06,
"loss": 0.3631,
"step": 428
},
{
"epoch": 25.294117647058822,
"grad_norm": 1.2063255310058594,
"learning_rate": 9.857218287204204e-06,
"loss": 0.3786,
"step": 430
},
{
"epoch": 25.41176470588235,
"grad_norm": 1.1833475828170776,
"learning_rate": 9.853952127991374e-06,
"loss": 0.403,
"step": 432
},
{
"epoch": 25.529411764705884,
"grad_norm": 1.2458750009536743,
"learning_rate": 9.850649586526808e-06,
"loss": 0.3626,
"step": 434
},
{
"epoch": 25.647058823529413,
"grad_norm": 1.2201582193374634,
"learning_rate": 9.847310687564335e-06,
"loss": 0.3774,
"step": 436
},
{
"epoch": 25.764705882352942,
"grad_norm": 1.2703592777252197,
"learning_rate": 9.843935456130295e-06,
"loss": 0.4078,
"step": 438
},
{
"epoch": 25.88235294117647,
"grad_norm": 1.8023614883422852,
"learning_rate": 9.840523917523354e-06,
"loss": 0.406,
"step": 440
},
{
"epoch": 26.0,
"grad_norm": 1.3355423212051392,
"learning_rate": 9.83707609731432e-06,
"loss": 0.3567,
"step": 442
},
{
"epoch": 26.0,
"eval_loss": 0.8288628458976746,
"eval_runtime": 37.0058,
"eval_samples_per_second": 0.973,
"eval_steps_per_second": 0.973,
"step": 442
},
{
"epoch": 26.0,
"step": 442,
"total_flos": 3.38458179962667e+17,
"train_loss": 0.7497456199173475,
"train_runtime": 12081.7026,
"train_samples_per_second": 1.689,
"train_steps_per_second": 0.211
}
],
"logging_steps": 2,
"max_steps": 2550,
"num_input_tokens_seen": 0,
"num_train_epochs": 150,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 7,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.38458179962667e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}