riskmanagementv1 / checkpoint-155 /trainer_state.json
lomov's picture
Upload folder using huggingface_hub
fcd3f79 verified
{
"best_metric": 0.10599859803915024,
"best_model_checkpoint": "riskmanagementv1/checkpoint-155",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 155,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03225806451612903,
"grad_norm": 1.648207187652588,
"learning_rate": 6.25e-07,
"loss": 1.0951,
"step": 1
},
{
"epoch": 0.06451612903225806,
"grad_norm": 1.5398106575012207,
"learning_rate": 1.25e-06,
"loss": 1.116,
"step": 2
},
{
"epoch": 0.0967741935483871,
"grad_norm": 4.653311729431152,
"learning_rate": 1.8750000000000003e-06,
"loss": 1.0443,
"step": 3
},
{
"epoch": 0.12903225806451613,
"grad_norm": 3.8882646560668945,
"learning_rate": 2.5e-06,
"loss": 1.113,
"step": 4
},
{
"epoch": 0.16129032258064516,
"grad_norm": 3.754126787185669,
"learning_rate": 3.125e-06,
"loss": 1.1012,
"step": 5
},
{
"epoch": 0.1935483870967742,
"grad_norm": 1.5717527866363525,
"learning_rate": 3.7500000000000005e-06,
"loss": 1.0946,
"step": 6
},
{
"epoch": 0.22580645161290322,
"grad_norm": 2.106679677963257,
"learning_rate": 4.3750000000000005e-06,
"loss": 1.0793,
"step": 7
},
{
"epoch": 0.25806451612903225,
"grad_norm": 2.2854700088500977,
"learning_rate": 5e-06,
"loss": 1.1227,
"step": 8
},
{
"epoch": 0.2903225806451613,
"grad_norm": 2.0785741806030273,
"learning_rate": 5.625e-06,
"loss": 1.0784,
"step": 9
},
{
"epoch": 0.3225806451612903,
"grad_norm": 3.856192111968994,
"learning_rate": 6.25e-06,
"loss": 1.1229,
"step": 10
},
{
"epoch": 0.3548387096774194,
"grad_norm": 2.9804818630218506,
"learning_rate": 6.875e-06,
"loss": 1.0941,
"step": 11
},
{
"epoch": 0.3870967741935484,
"grad_norm": 2.701162099838257,
"learning_rate": 7.500000000000001e-06,
"loss": 1.0792,
"step": 12
},
{
"epoch": 0.41935483870967744,
"grad_norm": 5.755159854888916,
"learning_rate": 8.125000000000001e-06,
"loss": 1.08,
"step": 13
},
{
"epoch": 0.45161290322580644,
"grad_norm": 4.275572299957275,
"learning_rate": 8.750000000000001e-06,
"loss": 1.0991,
"step": 14
},
{
"epoch": 0.4838709677419355,
"grad_norm": 1.5670201778411865,
"learning_rate": 9.375000000000001e-06,
"loss": 1.0728,
"step": 15
},
{
"epoch": 0.5161290322580645,
"grad_norm": 2.7937171459198,
"learning_rate": 1e-05,
"loss": 1.1006,
"step": 16
},
{
"epoch": 0.5483870967741935,
"grad_norm": 2.048818349838257,
"learning_rate": 9.928057553956835e-06,
"loss": 1.0779,
"step": 17
},
{
"epoch": 0.5806451612903226,
"grad_norm": 4.063783168792725,
"learning_rate": 9.85611510791367e-06,
"loss": 1.0858,
"step": 18
},
{
"epoch": 0.6129032258064516,
"grad_norm": 2.6299703121185303,
"learning_rate": 9.784172661870505e-06,
"loss": 1.0715,
"step": 19
},
{
"epoch": 0.6451612903225806,
"grad_norm": 2.324522018432617,
"learning_rate": 9.712230215827338e-06,
"loss": 1.0964,
"step": 20
},
{
"epoch": 0.6774193548387096,
"grad_norm": 2.1976382732391357,
"learning_rate": 9.640287769784174e-06,
"loss": 1.032,
"step": 21
},
{
"epoch": 0.7096774193548387,
"grad_norm": 2.2792370319366455,
"learning_rate": 9.568345323741008e-06,
"loss": 1.0902,
"step": 22
},
{
"epoch": 0.7419354838709677,
"grad_norm": 1.996429204940796,
"learning_rate": 9.496402877697842e-06,
"loss": 1.0665,
"step": 23
},
{
"epoch": 0.7741935483870968,
"grad_norm": 4.1794891357421875,
"learning_rate": 9.424460431654678e-06,
"loss": 1.0737,
"step": 24
},
{
"epoch": 0.8064516129032258,
"grad_norm": 4.505804061889648,
"learning_rate": 9.35251798561151e-06,
"loss": 1.0948,
"step": 25
},
{
"epoch": 0.8387096774193549,
"grad_norm": 3.4325923919677734,
"learning_rate": 9.280575539568346e-06,
"loss": 1.0494,
"step": 26
},
{
"epoch": 0.8709677419354839,
"grad_norm": 2.4758219718933105,
"learning_rate": 9.20863309352518e-06,
"loss": 1.052,
"step": 27
},
{
"epoch": 0.9032258064516129,
"grad_norm": 3.858365297317505,
"learning_rate": 9.136690647482015e-06,
"loss": 1.051,
"step": 28
},
{
"epoch": 0.9354838709677419,
"grad_norm": 2.598480701446533,
"learning_rate": 9.064748201438849e-06,
"loss": 1.0568,
"step": 29
},
{
"epoch": 0.967741935483871,
"grad_norm": 1.8019946813583374,
"learning_rate": 8.992805755395683e-06,
"loss": 1.0231,
"step": 30
},
{
"epoch": 1.0,
"grad_norm": 3.940290927886963,
"learning_rate": 8.92086330935252e-06,
"loss": 1.0415,
"step": 31
},
{
"epoch": 1.0,
"eval_accuracy": 0.9032258064516129,
"eval_f1_macro": 0.9035612535612536,
"eval_f1_micro": 0.9032258064516129,
"eval_f1_weighted": 0.9040736145574856,
"eval_loss": 1.0341954231262207,
"eval_precision_macro": 0.9093567251461989,
"eval_precision_micro": 0.9032258064516129,
"eval_precision_weighted": 0.9095925297113752,
"eval_recall_macro": 0.9023809523809523,
"eval_recall_micro": 0.9032258064516129,
"eval_recall_weighted": 0.9032258064516129,
"eval_runtime": 0.3613,
"eval_samples_per_second": 171.597,
"eval_steps_per_second": 11.071,
"step": 31
},
{
"epoch": 1.032258064516129,
"grad_norm": 4.133128643035889,
"learning_rate": 8.848920863309353e-06,
"loss": 1.0548,
"step": 32
},
{
"epoch": 1.064516129032258,
"grad_norm": 3.782399892807007,
"learning_rate": 8.776978417266188e-06,
"loss": 1.0703,
"step": 33
},
{
"epoch": 1.096774193548387,
"grad_norm": 4.876072883605957,
"learning_rate": 8.705035971223022e-06,
"loss": 1.0334,
"step": 34
},
{
"epoch": 1.129032258064516,
"grad_norm": 4.667634963989258,
"learning_rate": 8.633093525179856e-06,
"loss": 0.9948,
"step": 35
},
{
"epoch": 1.1612903225806452,
"grad_norm": 2.1760268211364746,
"learning_rate": 8.561151079136692e-06,
"loss": 0.972,
"step": 36
},
{
"epoch": 1.1935483870967742,
"grad_norm": 4.767606735229492,
"learning_rate": 8.489208633093526e-06,
"loss": 0.9336,
"step": 37
},
{
"epoch": 1.2258064516129032,
"grad_norm": 7.815669536590576,
"learning_rate": 8.41726618705036e-06,
"loss": 0.9686,
"step": 38
},
{
"epoch": 1.2580645161290323,
"grad_norm": 4.481734752655029,
"learning_rate": 8.345323741007195e-06,
"loss": 0.9709,
"step": 39
},
{
"epoch": 1.2903225806451613,
"grad_norm": 2.848688840866089,
"learning_rate": 8.273381294964029e-06,
"loss": 0.962,
"step": 40
},
{
"epoch": 1.3225806451612903,
"grad_norm": 4.002137184143066,
"learning_rate": 8.201438848920865e-06,
"loss": 0.937,
"step": 41
},
{
"epoch": 1.3548387096774195,
"grad_norm": 4.138007640838623,
"learning_rate": 8.129496402877699e-06,
"loss": 0.9601,
"step": 42
},
{
"epoch": 1.3870967741935485,
"grad_norm": 4.144357681274414,
"learning_rate": 8.057553956834533e-06,
"loss": 0.9457,
"step": 43
},
{
"epoch": 1.4193548387096775,
"grad_norm": 5.553256034851074,
"learning_rate": 7.985611510791367e-06,
"loss": 0.9491,
"step": 44
},
{
"epoch": 1.4516129032258065,
"grad_norm": 3.699176549911499,
"learning_rate": 7.913669064748202e-06,
"loss": 0.9378,
"step": 45
},
{
"epoch": 1.4838709677419355,
"grad_norm": 4.687703609466553,
"learning_rate": 7.841726618705036e-06,
"loss": 0.8746,
"step": 46
},
{
"epoch": 1.5161290322580645,
"grad_norm": 4.312544822692871,
"learning_rate": 7.769784172661872e-06,
"loss": 0.9033,
"step": 47
},
{
"epoch": 1.5483870967741935,
"grad_norm": 7.0601067543029785,
"learning_rate": 7.697841726618706e-06,
"loss": 0.7922,
"step": 48
},
{
"epoch": 1.5806451612903225,
"grad_norm": 5.145197868347168,
"learning_rate": 7.62589928057554e-06,
"loss": 0.7994,
"step": 49
},
{
"epoch": 1.6129032258064515,
"grad_norm": 7.315432071685791,
"learning_rate": 7.5539568345323745e-06,
"loss": 0.9052,
"step": 50
},
{
"epoch": 1.6451612903225805,
"grad_norm": 5.903252601623535,
"learning_rate": 7.48201438848921e-06,
"loss": 0.7659,
"step": 51
},
{
"epoch": 1.6774193548387095,
"grad_norm": 6.903425216674805,
"learning_rate": 7.410071942446043e-06,
"loss": 0.6835,
"step": 52
},
{
"epoch": 1.7096774193548387,
"grad_norm": 5.649851322174072,
"learning_rate": 7.338129496402878e-06,
"loss": 0.7886,
"step": 53
},
{
"epoch": 1.7419354838709677,
"grad_norm": 7.038477420806885,
"learning_rate": 7.266187050359713e-06,
"loss": 0.8199,
"step": 54
},
{
"epoch": 1.7741935483870968,
"grad_norm": 5.405828475952148,
"learning_rate": 7.194244604316547e-06,
"loss": 0.7793,
"step": 55
},
{
"epoch": 1.8064516129032258,
"grad_norm": 4.914242744445801,
"learning_rate": 7.122302158273382e-06,
"loss": 0.6027,
"step": 56
},
{
"epoch": 1.838709677419355,
"grad_norm": 3.8369204998016357,
"learning_rate": 7.050359712230216e-06,
"loss": 0.6166,
"step": 57
},
{
"epoch": 1.870967741935484,
"grad_norm": 5.300614356994629,
"learning_rate": 6.978417266187051e-06,
"loss": 0.5933,
"step": 58
},
{
"epoch": 1.903225806451613,
"grad_norm": 6.020833969116211,
"learning_rate": 6.906474820143886e-06,
"loss": 0.6644,
"step": 59
},
{
"epoch": 1.935483870967742,
"grad_norm": 7.810927867889404,
"learning_rate": 6.834532374100719e-06,
"loss": 0.6088,
"step": 60
},
{
"epoch": 1.967741935483871,
"grad_norm": 4.888729572296143,
"learning_rate": 6.762589928057554e-06,
"loss": 0.601,
"step": 61
},
{
"epoch": 2.0,
"grad_norm": 9.534357070922852,
"learning_rate": 6.6906474820143886e-06,
"loss": 0.5048,
"step": 62
},
{
"epoch": 2.0,
"eval_accuracy": 0.9032258064516129,
"eval_f1_macro": 0.9009661835748792,
"eval_f1_micro": 0.9032258064516129,
"eval_f1_weighted": 0.90205703599813,
"eval_loss": 0.5168083310127258,
"eval_precision_macro": 0.9125,
"eval_precision_micro": 0.9032258064516129,
"eval_precision_weighted": 0.9120967741935483,
"eval_recall_macro": 0.9007936507936508,
"eval_recall_micro": 0.9032258064516129,
"eval_recall_weighted": 0.9032258064516129,
"eval_runtime": 0.362,
"eval_samples_per_second": 171.293,
"eval_steps_per_second": 11.051,
"step": 62
},
{
"epoch": 2.032258064516129,
"grad_norm": 5.264218807220459,
"learning_rate": 6.618705035971224e-06,
"loss": 0.5298,
"step": 63
},
{
"epoch": 2.064516129032258,
"grad_norm": 4.860868453979492,
"learning_rate": 6.546762589928059e-06,
"loss": 0.4793,
"step": 64
},
{
"epoch": 2.096774193548387,
"grad_norm": 4.750568389892578,
"learning_rate": 6.474820143884892e-06,
"loss": 0.6173,
"step": 65
},
{
"epoch": 2.129032258064516,
"grad_norm": 6.425321102142334,
"learning_rate": 6.402877697841727e-06,
"loss": 0.6019,
"step": 66
},
{
"epoch": 2.161290322580645,
"grad_norm": 9.132750511169434,
"learning_rate": 6.330935251798561e-06,
"loss": 0.5513,
"step": 67
},
{
"epoch": 2.193548387096774,
"grad_norm": 3.2714285850524902,
"learning_rate": 6.2589928057553964e-06,
"loss": 0.5036,
"step": 68
},
{
"epoch": 2.225806451612903,
"grad_norm": 6.814634323120117,
"learning_rate": 6.1870503597122315e-06,
"loss": 0.537,
"step": 69
},
{
"epoch": 2.258064516129032,
"grad_norm": 3.728926420211792,
"learning_rate": 6.115107913669065e-06,
"loss": 0.4946,
"step": 70
},
{
"epoch": 2.2903225806451615,
"grad_norm": 6.5363545417785645,
"learning_rate": 6.0431654676259e-06,
"loss": 0.4285,
"step": 71
},
{
"epoch": 2.3225806451612905,
"grad_norm": 3.544241428375244,
"learning_rate": 5.971223021582734e-06,
"loss": 0.5499,
"step": 72
},
{
"epoch": 2.3548387096774195,
"grad_norm": 4.439380168914795,
"learning_rate": 5.899280575539568e-06,
"loss": 0.4158,
"step": 73
},
{
"epoch": 2.3870967741935485,
"grad_norm": 3.382199287414551,
"learning_rate": 5.8273381294964035e-06,
"loss": 0.5042,
"step": 74
},
{
"epoch": 2.4193548387096775,
"grad_norm": 3.844313144683838,
"learning_rate": 5.755395683453238e-06,
"loss": 0.4456,
"step": 75
},
{
"epoch": 2.4516129032258065,
"grad_norm": 6.994678497314453,
"learning_rate": 5.683453237410073e-06,
"loss": 0.3322,
"step": 76
},
{
"epoch": 2.4838709677419355,
"grad_norm": 3.745391845703125,
"learning_rate": 5.611510791366906e-06,
"loss": 0.4974,
"step": 77
},
{
"epoch": 2.5161290322580645,
"grad_norm": 3.9794974327087402,
"learning_rate": 5.539568345323741e-06,
"loss": 0.6066,
"step": 78
},
{
"epoch": 2.5483870967741935,
"grad_norm": 4.3371901512146,
"learning_rate": 5.467625899280576e-06,
"loss": 0.3384,
"step": 79
},
{
"epoch": 2.5806451612903225,
"grad_norm": 3.778343915939331,
"learning_rate": 5.3956834532374105e-06,
"loss": 0.3747,
"step": 80
},
{
"epoch": 2.6129032258064515,
"grad_norm": 5.110599994659424,
"learning_rate": 5.3237410071942456e-06,
"loss": 0.3761,
"step": 81
},
{
"epoch": 2.6451612903225805,
"grad_norm": 4.0779128074646,
"learning_rate": 5.251798561151079e-06,
"loss": 0.3718,
"step": 82
},
{
"epoch": 2.6774193548387095,
"grad_norm": 3.576948881149292,
"learning_rate": 5.179856115107914e-06,
"loss": 0.3232,
"step": 83
},
{
"epoch": 2.709677419354839,
"grad_norm": 4.824243068695068,
"learning_rate": 5.107913669064749e-06,
"loss": 0.3021,
"step": 84
},
{
"epoch": 2.741935483870968,
"grad_norm": 7.285948276519775,
"learning_rate": 5.035971223021583e-06,
"loss": 0.3227,
"step": 85
},
{
"epoch": 2.774193548387097,
"grad_norm": 4.5859246253967285,
"learning_rate": 4.9640287769784175e-06,
"loss": 0.2029,
"step": 86
},
{
"epoch": 2.806451612903226,
"grad_norm": 3.3746910095214844,
"learning_rate": 4.892086330935253e-06,
"loss": 0.3087,
"step": 87
},
{
"epoch": 2.838709677419355,
"grad_norm": 5.0139875411987305,
"learning_rate": 4.820143884892087e-06,
"loss": 0.3919,
"step": 88
},
{
"epoch": 2.870967741935484,
"grad_norm": 3.168724775314331,
"learning_rate": 4.748201438848921e-06,
"loss": 0.2469,
"step": 89
},
{
"epoch": 2.903225806451613,
"grad_norm": 6.2994914054870605,
"learning_rate": 4.676258992805755e-06,
"loss": 0.2812,
"step": 90
},
{
"epoch": 2.935483870967742,
"grad_norm": 1.8329119682312012,
"learning_rate": 4.60431654676259e-06,
"loss": 0.148,
"step": 91
},
{
"epoch": 2.967741935483871,
"grad_norm": 3.138279914855957,
"learning_rate": 4.5323741007194245e-06,
"loss": 0.1359,
"step": 92
},
{
"epoch": 3.0,
"grad_norm": 4.396747589111328,
"learning_rate": 4.46043165467626e-06,
"loss": 0.1879,
"step": 93
},
{
"epoch": 3.0,
"eval_accuracy": 0.967741935483871,
"eval_f1_macro": 0.9674603174603175,
"eval_f1_micro": 0.967741935483871,
"eval_f1_weighted": 0.967741935483871,
"eval_loss": 0.22156503796577454,
"eval_precision_macro": 0.9674603174603175,
"eval_precision_micro": 0.967741935483871,
"eval_precision_weighted": 0.967741935483871,
"eval_recall_macro": 0.9674603174603175,
"eval_recall_micro": 0.967741935483871,
"eval_recall_weighted": 0.967741935483871,
"eval_runtime": 0.3612,
"eval_samples_per_second": 171.656,
"eval_steps_per_second": 11.075,
"step": 93
},
{
"epoch": 3.032258064516129,
"grad_norm": 2.482665538787842,
"learning_rate": 4.388489208633094e-06,
"loss": 0.203,
"step": 94
},
{
"epoch": 3.064516129032258,
"grad_norm": 3.7393405437469482,
"learning_rate": 4.316546762589928e-06,
"loss": 0.4162,
"step": 95
},
{
"epoch": 3.096774193548387,
"grad_norm": 4.992405414581299,
"learning_rate": 4.244604316546763e-06,
"loss": 0.272,
"step": 96
},
{
"epoch": 3.129032258064516,
"grad_norm": 5.053823471069336,
"learning_rate": 4.172661870503597e-06,
"loss": 0.3839,
"step": 97
},
{
"epoch": 3.161290322580645,
"grad_norm": 9.411662101745605,
"learning_rate": 4.100719424460432e-06,
"loss": 0.2929,
"step": 98
},
{
"epoch": 3.193548387096774,
"grad_norm": 3.2983734607696533,
"learning_rate": 4.028776978417267e-06,
"loss": 0.1844,
"step": 99
},
{
"epoch": 3.225806451612903,
"grad_norm": 9.005085945129395,
"learning_rate": 3.956834532374101e-06,
"loss": 0.2335,
"step": 100
},
{
"epoch": 3.258064516129032,
"grad_norm": 2.7752511501312256,
"learning_rate": 3.884892086330936e-06,
"loss": 0.1636,
"step": 101
},
{
"epoch": 3.2903225806451615,
"grad_norm": 2.129927635192871,
"learning_rate": 3.81294964028777e-06,
"loss": 0.1674,
"step": 102
},
{
"epoch": 3.3225806451612905,
"grad_norm": 3.201385736465454,
"learning_rate": 3.741007194244605e-06,
"loss": 0.2321,
"step": 103
},
{
"epoch": 3.3548387096774195,
"grad_norm": 6.4212775230407715,
"learning_rate": 3.669064748201439e-06,
"loss": 0.3104,
"step": 104
},
{
"epoch": 3.3870967741935485,
"grad_norm": 9.587445259094238,
"learning_rate": 3.5971223021582737e-06,
"loss": 0.3252,
"step": 105
},
{
"epoch": 3.4193548387096775,
"grad_norm": 4.780144214630127,
"learning_rate": 3.525179856115108e-06,
"loss": 0.2342,
"step": 106
},
{
"epoch": 3.4516129032258065,
"grad_norm": 6.432891845703125,
"learning_rate": 3.453237410071943e-06,
"loss": 0.3754,
"step": 107
},
{
"epoch": 3.4838709677419355,
"grad_norm": 1.6028318405151367,
"learning_rate": 3.381294964028777e-06,
"loss": 0.0951,
"step": 108
},
{
"epoch": 3.5161290322580645,
"grad_norm": 3.9140098094940186,
"learning_rate": 3.309352517985612e-06,
"loss": 0.1739,
"step": 109
},
{
"epoch": 3.5483870967741935,
"grad_norm": 5.9437785148620605,
"learning_rate": 3.237410071942446e-06,
"loss": 0.3609,
"step": 110
},
{
"epoch": 3.5806451612903225,
"grad_norm": 2.725947380065918,
"learning_rate": 3.1654676258992807e-06,
"loss": 0.2015,
"step": 111
},
{
"epoch": 3.6129032258064515,
"grad_norm": 5.769122123718262,
"learning_rate": 3.0935251798561158e-06,
"loss": 0.2124,
"step": 112
},
{
"epoch": 3.6451612903225805,
"grad_norm": 3.645589590072632,
"learning_rate": 3.02158273381295e-06,
"loss": 0.157,
"step": 113
},
{
"epoch": 3.6774193548387095,
"grad_norm": 2.2191619873046875,
"learning_rate": 2.949640287769784e-06,
"loss": 0.1301,
"step": 114
},
{
"epoch": 3.709677419354839,
"grad_norm": 2.9101061820983887,
"learning_rate": 2.877697841726619e-06,
"loss": 0.1281,
"step": 115
},
{
"epoch": 3.741935483870968,
"grad_norm": 8.459104537963867,
"learning_rate": 2.805755395683453e-06,
"loss": 0.2189,
"step": 116
},
{
"epoch": 3.774193548387097,
"grad_norm": 4.622738361358643,
"learning_rate": 2.733812949640288e-06,
"loss": 0.1275,
"step": 117
},
{
"epoch": 3.806451612903226,
"grad_norm": 5.437811374664307,
"learning_rate": 2.6618705035971228e-06,
"loss": 0.2634,
"step": 118
},
{
"epoch": 3.838709677419355,
"grad_norm": 1.6734240055084229,
"learning_rate": 2.589928057553957e-06,
"loss": 0.1186,
"step": 119
},
{
"epoch": 3.870967741935484,
"grad_norm": 1.7176499366760254,
"learning_rate": 2.5179856115107916e-06,
"loss": 0.1065,
"step": 120
},
{
"epoch": 3.903225806451613,
"grad_norm": Infinity,
"learning_rate": 2.5179856115107916e-06,
"loss": 0.2718,
"step": 121
},
{
"epoch": 3.935483870967742,
"grad_norm": 5.398061752319336,
"learning_rate": 2.4460431654676263e-06,
"loss": 0.1756,
"step": 122
},
{
"epoch": 3.967741935483871,
"grad_norm": 5.624890327453613,
"learning_rate": 2.3741007194244605e-06,
"loss": 0.177,
"step": 123
},
{
"epoch": 4.0,
"grad_norm": 2.5228195190429688,
"learning_rate": 2.302158273381295e-06,
"loss": 0.1125,
"step": 124
},
{
"epoch": 4.0,
"eval_accuracy": 0.9838709677419355,
"eval_f1_macro": 0.983739837398374,
"eval_f1_micro": 0.9838709677419355,
"eval_f1_weighted": 0.9838709677419355,
"eval_loss": 0.12518411874771118,
"eval_precision_macro": 0.9841269841269842,
"eval_precision_micro": 0.9838709677419355,
"eval_precision_weighted": 0.9846390168970814,
"eval_recall_macro": 0.9841269841269842,
"eval_recall_micro": 0.9838709677419355,
"eval_recall_weighted": 0.9838709677419355,
"eval_runtime": 0.361,
"eval_samples_per_second": 171.764,
"eval_steps_per_second": 11.082,
"step": 124
},
{
"epoch": 4.032258064516129,
"grad_norm": 7.587864398956299,
"learning_rate": 2.23021582733813e-06,
"loss": 0.3132,
"step": 125
},
{
"epoch": 4.064516129032258,
"grad_norm": 9.148059844970703,
"learning_rate": 2.158273381294964e-06,
"loss": 0.3178,
"step": 126
},
{
"epoch": 4.096774193548387,
"grad_norm": 1.7947614192962646,
"learning_rate": 2.0863309352517987e-06,
"loss": 0.112,
"step": 127
},
{
"epoch": 4.129032258064516,
"grad_norm": 1.4539235830307007,
"learning_rate": 2.0143884892086333e-06,
"loss": 0.0894,
"step": 128
},
{
"epoch": 4.161290322580645,
"grad_norm": 3.562119245529175,
"learning_rate": 1.942446043165468e-06,
"loss": 0.1412,
"step": 129
},
{
"epoch": 4.193548387096774,
"grad_norm": 3.7732717990875244,
"learning_rate": 1.8705035971223024e-06,
"loss": 0.1285,
"step": 130
},
{
"epoch": 4.225806451612903,
"grad_norm": 8.253504753112793,
"learning_rate": 1.7985611510791368e-06,
"loss": 0.1619,
"step": 131
},
{
"epoch": 4.258064516129032,
"grad_norm": 1.0998966693878174,
"learning_rate": 1.7266187050359715e-06,
"loss": 0.0796,
"step": 132
},
{
"epoch": 4.290322580645161,
"grad_norm": 1.808113694190979,
"learning_rate": 1.654676258992806e-06,
"loss": 0.0901,
"step": 133
},
{
"epoch": 4.32258064516129,
"grad_norm": 1.456236720085144,
"learning_rate": 1.5827338129496403e-06,
"loss": 0.0853,
"step": 134
},
{
"epoch": 4.354838709677419,
"grad_norm": 8.323379516601562,
"learning_rate": 1.510791366906475e-06,
"loss": 0.2426,
"step": 135
},
{
"epoch": 4.387096774193548,
"grad_norm": 6.862564563751221,
"learning_rate": 1.4388489208633094e-06,
"loss": 0.179,
"step": 136
},
{
"epoch": 4.419354838709677,
"grad_norm": 8.144110679626465,
"learning_rate": 1.366906474820144e-06,
"loss": 0.2786,
"step": 137
},
{
"epoch": 4.451612903225806,
"grad_norm": 1.4601564407348633,
"learning_rate": 1.2949640287769785e-06,
"loss": 0.0765,
"step": 138
},
{
"epoch": 4.483870967741936,
"grad_norm": 8.350536346435547,
"learning_rate": 1.2230215827338131e-06,
"loss": 0.1893,
"step": 139
},
{
"epoch": 4.516129032258064,
"grad_norm": 6.004292011260986,
"learning_rate": 1.1510791366906476e-06,
"loss": 0.1283,
"step": 140
},
{
"epoch": 4.548387096774194,
"grad_norm": 4.625925540924072,
"learning_rate": 1.079136690647482e-06,
"loss": 0.1,
"step": 141
},
{
"epoch": 4.580645161290323,
"grad_norm": 4.438725471496582,
"learning_rate": 1.0071942446043167e-06,
"loss": 0.1802,
"step": 142
},
{
"epoch": 4.612903225806452,
"grad_norm": 15.338831901550293,
"learning_rate": 9.352517985611512e-07,
"loss": 0.2354,
"step": 143
},
{
"epoch": 4.645161290322581,
"grad_norm": 4.097437381744385,
"learning_rate": 8.633093525179857e-07,
"loss": 0.1042,
"step": 144
},
{
"epoch": 4.67741935483871,
"grad_norm": 4.476722240447998,
"learning_rate": 7.913669064748202e-07,
"loss": 0.1137,
"step": 145
},
{
"epoch": 4.709677419354839,
"grad_norm": 8.289087295532227,
"learning_rate": 7.194244604316547e-07,
"loss": 0.273,
"step": 146
},
{
"epoch": 4.741935483870968,
"grad_norm": 1.9783574342727661,
"learning_rate": 6.474820143884893e-07,
"loss": 0.1018,
"step": 147
},
{
"epoch": 4.774193548387097,
"grad_norm": 4.377150535583496,
"learning_rate": 5.755395683453238e-07,
"loss": 0.1195,
"step": 148
},
{
"epoch": 4.806451612903226,
"grad_norm": 3.9018876552581787,
"learning_rate": 5.035971223021583e-07,
"loss": 0.1093,
"step": 149
},
{
"epoch": 4.838709677419355,
"grad_norm": 13.00845718383789,
"learning_rate": 4.3165467625899287e-07,
"loss": 0.3638,
"step": 150
},
{
"epoch": 4.870967741935484,
"grad_norm": 4.976847171783447,
"learning_rate": 3.5971223021582736e-07,
"loss": 0.0991,
"step": 151
},
{
"epoch": 4.903225806451613,
"grad_norm": 10.16610336303711,
"learning_rate": 2.877697841726619e-07,
"loss": 0.1559,
"step": 152
},
{
"epoch": 4.935483870967742,
"grad_norm": 1.9939817190170288,
"learning_rate": 2.1582733812949643e-07,
"loss": 0.1162,
"step": 153
},
{
"epoch": 4.967741935483871,
"grad_norm": 5.235056400299072,
"learning_rate": 1.4388489208633095e-07,
"loss": 0.133,
"step": 154
},
{
"epoch": 5.0,
"grad_norm": 2.054269790649414,
"learning_rate": 7.194244604316547e-08,
"loss": 0.0845,
"step": 155
},
{
"epoch": 5.0,
"eval_accuracy": 0.9838709677419355,
"eval_f1_macro": 0.983739837398374,
"eval_f1_micro": 0.9838709677419355,
"eval_f1_weighted": 0.9838709677419355,
"eval_loss": 0.10599859803915024,
"eval_precision_macro": 0.9841269841269842,
"eval_precision_micro": 0.9838709677419355,
"eval_precision_weighted": 0.9846390168970814,
"eval_recall_macro": 0.9841269841269842,
"eval_recall_micro": 0.9838709677419355,
"eval_recall_weighted": 0.9838709677419355,
"eval_runtime": 0.3592,
"eval_samples_per_second": 172.607,
"eval_steps_per_second": 11.136,
"step": 155
}
],
"logging_steps": 1,
"max_steps": 155,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 161613108449280.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}