Vistral-7B-ChatML / checkpoint-300 /trainer_state.json
ngxson
init
ae1a83e
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.5,
"eval_steps": 500,
"global_step": 300,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 2.5e-05,
"loss": 2.6897,
"step": 1
},
{
"epoch": 0.01,
"learning_rate": 2.4968710888610763e-05,
"loss": 2.6671,
"step": 2
},
{
"epoch": 0.01,
"learning_rate": 2.493742177722153e-05,
"loss": 2.5698,
"step": 3
},
{
"epoch": 0.02,
"learning_rate": 2.490613266583229e-05,
"loss": 2.0954,
"step": 4
},
{
"epoch": 0.03,
"learning_rate": 2.4874843554443056e-05,
"loss": 1.8796,
"step": 5
},
{
"epoch": 0.03,
"learning_rate": 2.484355444305382e-05,
"loss": 1.7986,
"step": 6
},
{
"epoch": 0.04,
"learning_rate": 2.4812265331664584e-05,
"loss": 1.7025,
"step": 7
},
{
"epoch": 0.04,
"learning_rate": 2.4780976220275346e-05,
"loss": 1.6202,
"step": 8
},
{
"epoch": 0.04,
"learning_rate": 2.4749687108886108e-05,
"loss": 1.4594,
"step": 9
},
{
"epoch": 0.05,
"learning_rate": 2.4718397997496874e-05,
"loss": 1.1673,
"step": 10
},
{
"epoch": 0.06,
"learning_rate": 2.4687108886107636e-05,
"loss": 1.2507,
"step": 11
},
{
"epoch": 0.06,
"learning_rate": 2.46558197747184e-05,
"loss": 1.2693,
"step": 12
},
{
"epoch": 0.07,
"learning_rate": 2.4624530663329163e-05,
"loss": 1.3959,
"step": 13
},
{
"epoch": 0.07,
"learning_rate": 2.459324155193993e-05,
"loss": 1.2798,
"step": 14
},
{
"epoch": 0.07,
"learning_rate": 2.456195244055069e-05,
"loss": 1.1193,
"step": 15
},
{
"epoch": 0.08,
"learning_rate": 2.4530663329161453e-05,
"loss": 1.1838,
"step": 16
},
{
"epoch": 0.09,
"learning_rate": 2.449937421777222e-05,
"loss": 1.0905,
"step": 17
},
{
"epoch": 0.09,
"learning_rate": 2.446808510638298e-05,
"loss": 1.0863,
"step": 18
},
{
"epoch": 0.1,
"learning_rate": 2.4436795994993742e-05,
"loss": 1.1153,
"step": 19
},
{
"epoch": 0.1,
"learning_rate": 2.4405506883604508e-05,
"loss": 1.0519,
"step": 20
},
{
"epoch": 0.1,
"learning_rate": 2.437421777221527e-05,
"loss": 1.0254,
"step": 21
},
{
"epoch": 0.11,
"learning_rate": 2.4342928660826032e-05,
"loss": 1.15,
"step": 22
},
{
"epoch": 0.12,
"learning_rate": 2.4311639549436798e-05,
"loss": 0.999,
"step": 23
},
{
"epoch": 0.12,
"learning_rate": 2.428035043804756e-05,
"loss": 0.9753,
"step": 24
},
{
"epoch": 0.12,
"learning_rate": 2.4249061326658322e-05,
"loss": 0.9843,
"step": 25
},
{
"epoch": 0.13,
"learning_rate": 2.4217772215269087e-05,
"loss": 0.8658,
"step": 26
},
{
"epoch": 0.14,
"learning_rate": 2.418648310387985e-05,
"loss": 0.8473,
"step": 27
},
{
"epoch": 0.14,
"learning_rate": 2.4155193992490615e-05,
"loss": 0.9926,
"step": 28
},
{
"epoch": 0.14,
"learning_rate": 2.4123904881101377e-05,
"loss": 1.0976,
"step": 29
},
{
"epoch": 0.15,
"learning_rate": 2.409261576971214e-05,
"loss": 1.0307,
"step": 30
},
{
"epoch": 0.15,
"learning_rate": 2.4061326658322904e-05,
"loss": 0.9448,
"step": 31
},
{
"epoch": 0.16,
"learning_rate": 2.4030037546933667e-05,
"loss": 0.9706,
"step": 32
},
{
"epoch": 0.17,
"learning_rate": 2.3998748435544432e-05,
"loss": 1.0063,
"step": 33
},
{
"epoch": 0.17,
"learning_rate": 2.3967459324155194e-05,
"loss": 0.9862,
"step": 34
},
{
"epoch": 0.17,
"learning_rate": 2.393617021276596e-05,
"loss": 0.9794,
"step": 35
},
{
"epoch": 0.18,
"learning_rate": 2.390488110137672e-05,
"loss": 1.0715,
"step": 36
},
{
"epoch": 0.18,
"learning_rate": 2.3873591989987484e-05,
"loss": 0.8954,
"step": 37
},
{
"epoch": 0.19,
"learning_rate": 2.384230287859825e-05,
"loss": 0.9088,
"step": 38
},
{
"epoch": 0.2,
"learning_rate": 2.381101376720901e-05,
"loss": 0.9877,
"step": 39
},
{
"epoch": 0.2,
"learning_rate": 2.3779724655819777e-05,
"loss": 0.9352,
"step": 40
},
{
"epoch": 0.2,
"learning_rate": 2.374843554443054e-05,
"loss": 1.0049,
"step": 41
},
{
"epoch": 0.21,
"learning_rate": 2.3717146433041304e-05,
"loss": 1.095,
"step": 42
},
{
"epoch": 0.21,
"learning_rate": 2.3685857321652066e-05,
"loss": 0.8134,
"step": 43
},
{
"epoch": 0.22,
"learning_rate": 2.365456821026283e-05,
"loss": 0.8944,
"step": 44
},
{
"epoch": 0.23,
"learning_rate": 2.3623279098873594e-05,
"loss": 0.9799,
"step": 45
},
{
"epoch": 0.23,
"learning_rate": 2.3591989987484356e-05,
"loss": 0.8573,
"step": 46
},
{
"epoch": 0.23,
"learning_rate": 2.356070087609512e-05,
"loss": 0.9628,
"step": 47
},
{
"epoch": 0.24,
"learning_rate": 2.3529411764705884e-05,
"loss": 0.847,
"step": 48
},
{
"epoch": 0.24,
"learning_rate": 2.349812265331665e-05,
"loss": 0.8553,
"step": 49
},
{
"epoch": 0.25,
"learning_rate": 2.346683354192741e-05,
"loss": 0.9287,
"step": 50
},
{
"epoch": 0.26,
"learning_rate": 2.3435544430538173e-05,
"loss": 0.8922,
"step": 51
},
{
"epoch": 0.26,
"learning_rate": 2.340425531914894e-05,
"loss": 0.8925,
"step": 52
},
{
"epoch": 0.27,
"learning_rate": 2.33729662077597e-05,
"loss": 0.9052,
"step": 53
},
{
"epoch": 0.27,
"learning_rate": 2.3341677096370466e-05,
"loss": 0.9616,
"step": 54
},
{
"epoch": 0.28,
"learning_rate": 2.331038798498123e-05,
"loss": 0.8965,
"step": 55
},
{
"epoch": 0.28,
"learning_rate": 2.3279098873591994e-05,
"loss": 0.8449,
"step": 56
},
{
"epoch": 0.28,
"learning_rate": 2.3247809762202756e-05,
"loss": 0.9513,
"step": 57
},
{
"epoch": 0.29,
"learning_rate": 2.3216520650813518e-05,
"loss": 0.9176,
"step": 58
},
{
"epoch": 0.29,
"learning_rate": 2.3185231539424284e-05,
"loss": 0.8795,
"step": 59
},
{
"epoch": 0.3,
"learning_rate": 2.3153942428035046e-05,
"loss": 0.9245,
"step": 60
},
{
"epoch": 0.3,
"learning_rate": 2.3122653316645808e-05,
"loss": 0.8279,
"step": 61
},
{
"epoch": 0.31,
"learning_rate": 2.309136420525657e-05,
"loss": 0.89,
"step": 62
},
{
"epoch": 0.32,
"learning_rate": 2.3060075093867335e-05,
"loss": 0.916,
"step": 63
},
{
"epoch": 0.32,
"learning_rate": 2.3028785982478097e-05,
"loss": 0.9223,
"step": 64
},
{
"epoch": 0.33,
"learning_rate": 2.299749687108886e-05,
"loss": 1.0349,
"step": 65
},
{
"epoch": 0.33,
"learning_rate": 2.2966207759699625e-05,
"loss": 0.8693,
"step": 66
},
{
"epoch": 0.34,
"learning_rate": 2.2934918648310387e-05,
"loss": 0.8737,
"step": 67
},
{
"epoch": 0.34,
"learning_rate": 2.2903629536921153e-05,
"loss": 0.9092,
"step": 68
},
{
"epoch": 0.34,
"learning_rate": 2.2872340425531915e-05,
"loss": 0.8561,
"step": 69
},
{
"epoch": 0.35,
"learning_rate": 2.284105131414268e-05,
"loss": 1.0239,
"step": 70
},
{
"epoch": 0.35,
"learning_rate": 2.2809762202753442e-05,
"loss": 0.7588,
"step": 71
},
{
"epoch": 0.36,
"learning_rate": 2.2778473091364204e-05,
"loss": 0.9493,
"step": 72
},
{
"epoch": 0.36,
"learning_rate": 2.274718397997497e-05,
"loss": 0.9049,
"step": 73
},
{
"epoch": 0.37,
"learning_rate": 2.2715894868585732e-05,
"loss": 0.8972,
"step": 74
},
{
"epoch": 0.38,
"learning_rate": 2.2684605757196497e-05,
"loss": 0.8886,
"step": 75
},
{
"epoch": 0.38,
"learning_rate": 2.265331664580726e-05,
"loss": 0.7319,
"step": 76
},
{
"epoch": 0.39,
"learning_rate": 2.2622027534418025e-05,
"loss": 0.842,
"step": 77
},
{
"epoch": 0.39,
"learning_rate": 2.2590738423028787e-05,
"loss": 0.788,
"step": 78
},
{
"epoch": 0.4,
"learning_rate": 2.255944931163955e-05,
"loss": 0.9804,
"step": 79
},
{
"epoch": 0.4,
"learning_rate": 2.2528160200250315e-05,
"loss": 0.7391,
"step": 80
},
{
"epoch": 0.41,
"learning_rate": 2.2496871088861077e-05,
"loss": 0.7924,
"step": 81
},
{
"epoch": 0.41,
"learning_rate": 2.2465581977471842e-05,
"loss": 0.812,
"step": 82
},
{
"epoch": 0.41,
"learning_rate": 2.2434292866082604e-05,
"loss": 0.8704,
"step": 83
},
{
"epoch": 0.42,
"learning_rate": 2.240300375469337e-05,
"loss": 0.9598,
"step": 84
},
{
"epoch": 0.42,
"learning_rate": 2.2371714643304132e-05,
"loss": 0.8249,
"step": 85
},
{
"epoch": 0.43,
"learning_rate": 2.2340425531914894e-05,
"loss": 0.9608,
"step": 86
},
{
"epoch": 0.43,
"learning_rate": 2.230913642052566e-05,
"loss": 0.9153,
"step": 87
},
{
"epoch": 0.44,
"learning_rate": 2.227784730913642e-05,
"loss": 0.9486,
"step": 88
},
{
"epoch": 0.45,
"learning_rate": 2.2246558197747187e-05,
"loss": 0.9806,
"step": 89
},
{
"epoch": 0.45,
"learning_rate": 2.221526908635795e-05,
"loss": 0.8544,
"step": 90
},
{
"epoch": 0.46,
"learning_rate": 2.2183979974968714e-05,
"loss": 0.8845,
"step": 91
},
{
"epoch": 0.46,
"learning_rate": 2.2152690863579477e-05,
"loss": 0.926,
"step": 92
},
{
"epoch": 0.47,
"learning_rate": 2.212140175219024e-05,
"loss": 0.8515,
"step": 93
},
{
"epoch": 0.47,
"learning_rate": 2.2090112640801004e-05,
"loss": 0.7667,
"step": 94
},
{
"epoch": 0.47,
"learning_rate": 2.2058823529411766e-05,
"loss": 0.813,
"step": 95
},
{
"epoch": 0.48,
"learning_rate": 2.202753441802253e-05,
"loss": 0.785,
"step": 96
},
{
"epoch": 0.48,
"learning_rate": 2.1996245306633294e-05,
"loss": 0.87,
"step": 97
},
{
"epoch": 0.49,
"learning_rate": 2.1964956195244056e-05,
"loss": 0.8281,
"step": 98
},
{
"epoch": 0.49,
"learning_rate": 2.193366708385482e-05,
"loss": 0.9875,
"step": 99
},
{
"epoch": 0.5,
"learning_rate": 2.1902377972465583e-05,
"loss": 0.8709,
"step": 100
},
{
"epoch": 0.51,
"learning_rate": 2.1871088861076345e-05,
"loss": 0.9377,
"step": 101
},
{
"epoch": 0.51,
"learning_rate": 2.183979974968711e-05,
"loss": 0.7962,
"step": 102
},
{
"epoch": 0.52,
"learning_rate": 2.1808510638297873e-05,
"loss": 0.7485,
"step": 103
},
{
"epoch": 0.52,
"learning_rate": 2.1777221526908635e-05,
"loss": 0.7911,
"step": 104
},
{
"epoch": 0.53,
"learning_rate": 2.17459324155194e-05,
"loss": 0.8777,
"step": 105
},
{
"epoch": 0.53,
"learning_rate": 2.1714643304130163e-05,
"loss": 0.8287,
"step": 106
},
{
"epoch": 0.54,
"learning_rate": 2.1683354192740925e-05,
"loss": 0.8716,
"step": 107
},
{
"epoch": 0.54,
"learning_rate": 2.165206508135169e-05,
"loss": 0.8574,
"step": 108
},
{
"epoch": 0.55,
"learning_rate": 2.1620775969962452e-05,
"loss": 0.9586,
"step": 109
},
{
"epoch": 0.55,
"learning_rate": 2.1589486858573218e-05,
"loss": 0.867,
"step": 110
},
{
"epoch": 0.56,
"learning_rate": 2.155819774718398e-05,
"loss": 0.852,
"step": 111
},
{
"epoch": 0.56,
"learning_rate": 2.1526908635794745e-05,
"loss": 0.9795,
"step": 112
},
{
"epoch": 0.56,
"learning_rate": 2.1495619524405507e-05,
"loss": 0.8867,
"step": 113
},
{
"epoch": 0.57,
"learning_rate": 2.146433041301627e-05,
"loss": 0.819,
"step": 114
},
{
"epoch": 0.57,
"learning_rate": 2.1433041301627035e-05,
"loss": 0.9421,
"step": 115
},
{
"epoch": 0.58,
"learning_rate": 2.1401752190237797e-05,
"loss": 1.0068,
"step": 116
},
{
"epoch": 0.58,
"learning_rate": 2.1370463078848563e-05,
"loss": 0.8312,
"step": 117
},
{
"epoch": 0.59,
"learning_rate": 2.1339173967459325e-05,
"loss": 0.9313,
"step": 118
},
{
"epoch": 0.59,
"learning_rate": 2.130788485607009e-05,
"loss": 0.858,
"step": 119
},
{
"epoch": 0.6,
"learning_rate": 2.1276595744680852e-05,
"loss": 0.8108,
"step": 120
},
{
"epoch": 0.6,
"learning_rate": 2.1245306633291614e-05,
"loss": 0.8538,
"step": 121
},
{
"epoch": 0.61,
"learning_rate": 2.121401752190238e-05,
"loss": 0.8773,
"step": 122
},
{
"epoch": 0.61,
"learning_rate": 2.1182728410513142e-05,
"loss": 0.9329,
"step": 123
},
{
"epoch": 0.62,
"learning_rate": 2.1151439299123907e-05,
"loss": 1.0055,
"step": 124
},
{
"epoch": 0.62,
"learning_rate": 2.112015018773467e-05,
"loss": 0.859,
"step": 125
},
{
"epoch": 0.63,
"learning_rate": 2.1088861076345435e-05,
"loss": 0.6981,
"step": 126
},
{
"epoch": 0.64,
"learning_rate": 2.1057571964956197e-05,
"loss": 0.7186,
"step": 127
},
{
"epoch": 0.64,
"learning_rate": 2.102628285356696e-05,
"loss": 0.8779,
"step": 128
},
{
"epoch": 0.65,
"learning_rate": 2.0994993742177725e-05,
"loss": 0.8209,
"step": 129
},
{
"epoch": 0.65,
"learning_rate": 2.0963704630788487e-05,
"loss": 0.8902,
"step": 130
},
{
"epoch": 0.66,
"learning_rate": 2.0932415519399252e-05,
"loss": 0.8439,
"step": 131
},
{
"epoch": 0.66,
"learning_rate": 2.0901126408010014e-05,
"loss": 0.8764,
"step": 132
},
{
"epoch": 0.67,
"learning_rate": 2.086983729662078e-05,
"loss": 0.8853,
"step": 133
},
{
"epoch": 0.67,
"learning_rate": 2.0838548185231542e-05,
"loss": 0.9366,
"step": 134
},
{
"epoch": 0.68,
"learning_rate": 2.0807259073842304e-05,
"loss": 0.8731,
"step": 135
},
{
"epoch": 0.68,
"learning_rate": 2.077596996245307e-05,
"loss": 0.9189,
"step": 136
},
{
"epoch": 0.69,
"learning_rate": 2.074468085106383e-05,
"loss": 0.9024,
"step": 137
},
{
"epoch": 0.69,
"learning_rate": 2.0713391739674597e-05,
"loss": 0.7801,
"step": 138
},
{
"epoch": 0.69,
"learning_rate": 2.068210262828536e-05,
"loss": 0.7881,
"step": 139
},
{
"epoch": 0.7,
"learning_rate": 2.065081351689612e-05,
"loss": 0.9681,
"step": 140
},
{
"epoch": 0.7,
"learning_rate": 2.0619524405506883e-05,
"loss": 0.8103,
"step": 141
},
{
"epoch": 0.71,
"learning_rate": 2.058823529411765e-05,
"loss": 0.755,
"step": 142
},
{
"epoch": 0.71,
"learning_rate": 2.055694618272841e-05,
"loss": 0.9589,
"step": 143
},
{
"epoch": 0.72,
"learning_rate": 2.0525657071339173e-05,
"loss": 1.0164,
"step": 144
},
{
"epoch": 0.72,
"learning_rate": 2.0494367959949938e-05,
"loss": 0.8323,
"step": 145
},
{
"epoch": 0.73,
"learning_rate": 2.04630788485607e-05,
"loss": 0.7985,
"step": 146
},
{
"epoch": 0.73,
"learning_rate": 2.0431789737171462e-05,
"loss": 1.0317,
"step": 147
},
{
"epoch": 0.74,
"learning_rate": 2.0400500625782228e-05,
"loss": 0.7405,
"step": 148
},
{
"epoch": 0.74,
"learning_rate": 2.036921151439299e-05,
"loss": 0.8311,
"step": 149
},
{
"epoch": 0.75,
"learning_rate": 2.0337922403003756e-05,
"loss": 0.79,
"step": 150
},
{
"epoch": 0.76,
"learning_rate": 2.0306633291614518e-05,
"loss": 0.8272,
"step": 151
},
{
"epoch": 0.76,
"learning_rate": 2.0275344180225283e-05,
"loss": 0.777,
"step": 152
},
{
"epoch": 0.77,
"learning_rate": 2.0244055068836045e-05,
"loss": 0.7773,
"step": 153
},
{
"epoch": 0.77,
"learning_rate": 2.0212765957446807e-05,
"loss": 0.7078,
"step": 154
},
{
"epoch": 0.78,
"learning_rate": 2.0181476846057573e-05,
"loss": 0.9022,
"step": 155
},
{
"epoch": 0.78,
"learning_rate": 2.0150187734668335e-05,
"loss": 0.8121,
"step": 156
},
{
"epoch": 0.79,
"learning_rate": 2.01188986232791e-05,
"loss": 0.8438,
"step": 157
},
{
"epoch": 0.79,
"learning_rate": 2.0087609511889862e-05,
"loss": 0.8567,
"step": 158
},
{
"epoch": 0.8,
"learning_rate": 2.0056320400500628e-05,
"loss": 0.7968,
"step": 159
},
{
"epoch": 0.8,
"learning_rate": 2.002503128911139e-05,
"loss": 0.8846,
"step": 160
},
{
"epoch": 0.81,
"learning_rate": 1.9993742177722152e-05,
"loss": 0.7853,
"step": 161
},
{
"epoch": 0.81,
"learning_rate": 1.9962453066332917e-05,
"loss": 0.8335,
"step": 162
},
{
"epoch": 0.81,
"learning_rate": 1.993116395494368e-05,
"loss": 0.9056,
"step": 163
},
{
"epoch": 0.82,
"learning_rate": 1.9899874843554445e-05,
"loss": 0.826,
"step": 164
},
{
"epoch": 0.82,
"learning_rate": 1.9868585732165207e-05,
"loss": 0.7589,
"step": 165
},
{
"epoch": 0.83,
"learning_rate": 1.9837296620775973e-05,
"loss": 0.8544,
"step": 166
},
{
"epoch": 0.83,
"learning_rate": 1.9806007509386735e-05,
"loss": 0.7743,
"step": 167
},
{
"epoch": 0.84,
"learning_rate": 1.9774718397997497e-05,
"loss": 0.7362,
"step": 168
},
{
"epoch": 0.84,
"learning_rate": 1.9743429286608262e-05,
"loss": 0.7873,
"step": 169
},
{
"epoch": 0.85,
"learning_rate": 1.9712140175219024e-05,
"loss": 0.861,
"step": 170
},
{
"epoch": 0.85,
"learning_rate": 1.968085106382979e-05,
"loss": 1.001,
"step": 171
},
{
"epoch": 0.86,
"learning_rate": 1.9649561952440552e-05,
"loss": 0.9549,
"step": 172
},
{
"epoch": 0.86,
"learning_rate": 1.9618272841051317e-05,
"loss": 0.7536,
"step": 173
},
{
"epoch": 0.87,
"learning_rate": 1.958698372966208e-05,
"loss": 0.7567,
"step": 174
},
{
"epoch": 0.88,
"learning_rate": 1.955569461827284e-05,
"loss": 0.8637,
"step": 175
},
{
"epoch": 0.88,
"learning_rate": 1.9524405506883607e-05,
"loss": 0.8629,
"step": 176
},
{
"epoch": 0.89,
"learning_rate": 1.949311639549437e-05,
"loss": 0.8645,
"step": 177
},
{
"epoch": 0.89,
"learning_rate": 1.9461827284105135e-05,
"loss": 0.7941,
"step": 178
},
{
"epoch": 0.9,
"learning_rate": 1.9430538172715897e-05,
"loss": 0.9089,
"step": 179
},
{
"epoch": 0.9,
"learning_rate": 1.939924906132666e-05,
"loss": 0.8458,
"step": 180
},
{
"epoch": 0.91,
"learning_rate": 1.9367959949937424e-05,
"loss": 0.7946,
"step": 181
},
{
"epoch": 0.91,
"learning_rate": 1.9336670838548186e-05,
"loss": 0.7927,
"step": 182
},
{
"epoch": 0.92,
"learning_rate": 1.930538172715895e-05,
"loss": 0.9227,
"step": 183
},
{
"epoch": 0.92,
"learning_rate": 1.927409261576971e-05,
"loss": 0.7854,
"step": 184
},
{
"epoch": 0.93,
"learning_rate": 1.9242803504380476e-05,
"loss": 0.7407,
"step": 185
},
{
"epoch": 0.93,
"learning_rate": 1.9211514392991238e-05,
"loss": 0.8731,
"step": 186
},
{
"epoch": 0.94,
"learning_rate": 1.9180225281602004e-05,
"loss": 0.817,
"step": 187
},
{
"epoch": 0.94,
"learning_rate": 1.9148936170212766e-05,
"loss": 0.7726,
"step": 188
},
{
"epoch": 0.94,
"learning_rate": 1.9117647058823528e-05,
"loss": 0.8113,
"step": 189
},
{
"epoch": 0.95,
"learning_rate": 1.9086357947434293e-05,
"loss": 0.7673,
"step": 190
},
{
"epoch": 0.95,
"learning_rate": 1.9055068836045055e-05,
"loss": 0.9078,
"step": 191
},
{
"epoch": 0.96,
"learning_rate": 1.902377972465582e-05,
"loss": 0.7135,
"step": 192
},
{
"epoch": 0.96,
"learning_rate": 1.8992490613266583e-05,
"loss": 0.8699,
"step": 193
},
{
"epoch": 0.97,
"learning_rate": 1.896120150187735e-05,
"loss": 0.7306,
"step": 194
},
{
"epoch": 0.97,
"learning_rate": 1.892991239048811e-05,
"loss": 0.9502,
"step": 195
},
{
"epoch": 0.98,
"learning_rate": 1.8898623279098873e-05,
"loss": 0.8298,
"step": 196
},
{
"epoch": 0.98,
"learning_rate": 1.8867334167709638e-05,
"loss": 0.8624,
"step": 197
},
{
"epoch": 0.99,
"learning_rate": 1.88360450563204e-05,
"loss": 0.9778,
"step": 198
},
{
"epoch": 0.99,
"learning_rate": 1.8804755944931166e-05,
"loss": 0.7804,
"step": 199
},
{
"epoch": 1.0,
"learning_rate": 1.8773466833541928e-05,
"loss": 0.8165,
"step": 200
},
{
"epoch": 1.0,
"learning_rate": 1.8742177722152693e-05,
"loss": 0.8972,
"step": 201
},
{
"epoch": 1.01,
"learning_rate": 1.8710888610763455e-05,
"loss": 0.8584,
"step": 202
},
{
"epoch": 1.01,
"learning_rate": 1.8679599499374217e-05,
"loss": 0.8229,
"step": 203
},
{
"epoch": 1.02,
"learning_rate": 1.8648310387984983e-05,
"loss": 0.8804,
"step": 204
},
{
"epoch": 1.02,
"learning_rate": 1.8617021276595745e-05,
"loss": 0.7828,
"step": 205
},
{
"epoch": 1.03,
"learning_rate": 1.858573216520651e-05,
"loss": 0.6817,
"step": 206
},
{
"epoch": 1.03,
"learning_rate": 1.8554443053817272e-05,
"loss": 0.7627,
"step": 207
},
{
"epoch": 1.04,
"learning_rate": 1.8523153942428038e-05,
"loss": 0.707,
"step": 208
},
{
"epoch": 1.04,
"learning_rate": 1.84918648310388e-05,
"loss": 0.8976,
"step": 209
},
{
"epoch": 1.05,
"learning_rate": 1.8460575719649562e-05,
"loss": 0.8772,
"step": 210
},
{
"epoch": 1.05,
"learning_rate": 1.8429286608260328e-05,
"loss": 0.9126,
"step": 211
},
{
"epoch": 1.06,
"learning_rate": 1.839799749687109e-05,
"loss": 0.7939,
"step": 212
},
{
"epoch": 1.06,
"learning_rate": 1.8366708385481855e-05,
"loss": 0.7075,
"step": 213
},
{
"epoch": 1.07,
"learning_rate": 1.8335419274092617e-05,
"loss": 0.7325,
"step": 214
},
{
"epoch": 1.07,
"learning_rate": 1.8304130162703383e-05,
"loss": 0.7835,
"step": 215
},
{
"epoch": 1.08,
"learning_rate": 1.8272841051314145e-05,
"loss": 0.8144,
"step": 216
},
{
"epoch": 1.08,
"learning_rate": 1.8241551939924907e-05,
"loss": 0.7313,
"step": 217
},
{
"epoch": 1.09,
"learning_rate": 1.8210262828535672e-05,
"loss": 0.885,
"step": 218
},
{
"epoch": 1.09,
"learning_rate": 1.8178973717146434e-05,
"loss": 0.8518,
"step": 219
},
{
"epoch": 1.1,
"learning_rate": 1.8147684605757196e-05,
"loss": 0.7664,
"step": 220
},
{
"epoch": 1.1,
"learning_rate": 1.8116395494367962e-05,
"loss": 0.8554,
"step": 221
},
{
"epoch": 1.11,
"learning_rate": 1.8085106382978724e-05,
"loss": 0.7364,
"step": 222
},
{
"epoch": 1.11,
"learning_rate": 1.8053817271589486e-05,
"loss": 0.7854,
"step": 223
},
{
"epoch": 1.12,
"learning_rate": 1.8022528160200248e-05,
"loss": 0.7999,
"step": 224
},
{
"epoch": 1.12,
"learning_rate": 1.7991239048811014e-05,
"loss": 0.8567,
"step": 225
},
{
"epoch": 1.13,
"learning_rate": 1.7959949937421776e-05,
"loss": 0.8642,
"step": 226
},
{
"epoch": 1.14,
"learning_rate": 1.792866082603254e-05,
"loss": 0.7586,
"step": 227
},
{
"epoch": 1.14,
"learning_rate": 1.7897371714643303e-05,
"loss": 0.7328,
"step": 228
},
{
"epoch": 1.15,
"learning_rate": 1.786608260325407e-05,
"loss": 0.8685,
"step": 229
},
{
"epoch": 1.15,
"learning_rate": 1.783479349186483e-05,
"loss": 0.6846,
"step": 230
},
{
"epoch": 1.16,
"learning_rate": 1.7803504380475593e-05,
"loss": 0.7963,
"step": 231
},
{
"epoch": 1.16,
"learning_rate": 1.777221526908636e-05,
"loss": 0.762,
"step": 232
},
{
"epoch": 1.17,
"learning_rate": 1.774092615769712e-05,
"loss": 0.8083,
"step": 233
},
{
"epoch": 1.17,
"learning_rate": 1.7709637046307886e-05,
"loss": 0.8685,
"step": 234
},
{
"epoch": 1.18,
"learning_rate": 1.7678347934918648e-05,
"loss": 0.6171,
"step": 235
},
{
"epoch": 1.18,
"learning_rate": 1.7647058823529414e-05,
"loss": 0.7341,
"step": 236
},
{
"epoch": 1.19,
"learning_rate": 1.7615769712140176e-05,
"loss": 0.7801,
"step": 237
},
{
"epoch": 1.19,
"learning_rate": 1.7584480600750938e-05,
"loss": 0.8321,
"step": 238
},
{
"epoch": 1.2,
"learning_rate": 1.7553191489361703e-05,
"loss": 0.7637,
"step": 239
},
{
"epoch": 1.2,
"learning_rate": 1.7521902377972465e-05,
"loss": 0.7872,
"step": 240
},
{
"epoch": 1.21,
"learning_rate": 1.749061326658323e-05,
"loss": 0.8981,
"step": 241
},
{
"epoch": 1.21,
"learning_rate": 1.7459324155193993e-05,
"loss": 0.8244,
"step": 242
},
{
"epoch": 1.22,
"learning_rate": 1.742803504380476e-05,
"loss": 0.7284,
"step": 243
},
{
"epoch": 1.22,
"learning_rate": 1.739674593241552e-05,
"loss": 0.8948,
"step": 244
},
{
"epoch": 1.23,
"learning_rate": 1.7365456821026283e-05,
"loss": 0.7505,
"step": 245
},
{
"epoch": 1.23,
"learning_rate": 1.7334167709637048e-05,
"loss": 0.6836,
"step": 246
},
{
"epoch": 1.23,
"learning_rate": 1.730287859824781e-05,
"loss": 0.825,
"step": 247
},
{
"epoch": 1.24,
"learning_rate": 1.7271589486858576e-05,
"loss": 0.954,
"step": 248
},
{
"epoch": 1.25,
"learning_rate": 1.7240300375469338e-05,
"loss": 0.7165,
"step": 249
},
{
"epoch": 1.25,
"learning_rate": 1.7209011264080103e-05,
"loss": 0.8173,
"step": 250
},
{
"epoch": 1.25,
"learning_rate": 1.7177722152690865e-05,
"loss": 0.8108,
"step": 251
},
{
"epoch": 1.26,
"learning_rate": 1.7146433041301627e-05,
"loss": 0.8313,
"step": 252
},
{
"epoch": 1.27,
"learning_rate": 1.7115143929912393e-05,
"loss": 0.9048,
"step": 253
},
{
"epoch": 1.27,
"learning_rate": 1.7083854818523155e-05,
"loss": 0.8831,
"step": 254
},
{
"epoch": 1.27,
"learning_rate": 1.705256570713392e-05,
"loss": 0.717,
"step": 255
},
{
"epoch": 1.28,
"learning_rate": 1.7021276595744682e-05,
"loss": 0.838,
"step": 256
},
{
"epoch": 1.28,
"learning_rate": 1.6989987484355448e-05,
"loss": 0.8352,
"step": 257
},
{
"epoch": 1.29,
"learning_rate": 1.695869837296621e-05,
"loss": 0.6925,
"step": 258
},
{
"epoch": 1.29,
"learning_rate": 1.6927409261576972e-05,
"loss": 0.6983,
"step": 259
},
{
"epoch": 1.3,
"learning_rate": 1.6896120150187734e-05,
"loss": 0.7399,
"step": 260
},
{
"epoch": 1.3,
"learning_rate": 1.68648310387985e-05,
"loss": 0.9373,
"step": 261
},
{
"epoch": 1.31,
"learning_rate": 1.6833541927409262e-05,
"loss": 0.807,
"step": 262
},
{
"epoch": 1.31,
"learning_rate": 1.6802252816020024e-05,
"loss": 1.0353,
"step": 263
},
{
"epoch": 1.32,
"learning_rate": 1.677096370463079e-05,
"loss": 0.9005,
"step": 264
},
{
"epoch": 1.32,
"learning_rate": 1.673967459324155e-05,
"loss": 0.7737,
"step": 265
},
{
"epoch": 1.33,
"learning_rate": 1.6708385481852313e-05,
"loss": 0.8731,
"step": 266
},
{
"epoch": 1.33,
"learning_rate": 1.667709637046308e-05,
"loss": 0.9601,
"step": 267
},
{
"epoch": 1.34,
"learning_rate": 1.664580725907384e-05,
"loss": 0.7676,
"step": 268
},
{
"epoch": 1.34,
"learning_rate": 1.6614518147684607e-05,
"loss": 0.7407,
"step": 269
},
{
"epoch": 1.35,
"learning_rate": 1.658322903629537e-05,
"loss": 0.7421,
"step": 270
},
{
"epoch": 1.35,
"learning_rate": 1.6551939924906134e-05,
"loss": 0.7523,
"step": 271
},
{
"epoch": 1.36,
"learning_rate": 1.6520650813516896e-05,
"loss": 0.7475,
"step": 272
},
{
"epoch": 1.36,
"learning_rate": 1.6489361702127658e-05,
"loss": 0.7144,
"step": 273
},
{
"epoch": 1.37,
"learning_rate": 1.6458072590738424e-05,
"loss": 0.9052,
"step": 274
},
{
"epoch": 1.38,
"learning_rate": 1.6426783479349186e-05,
"loss": 0.7974,
"step": 275
},
{
"epoch": 1.38,
"learning_rate": 1.639549436795995e-05,
"loss": 0.8961,
"step": 276
},
{
"epoch": 1.39,
"learning_rate": 1.6364205256570713e-05,
"loss": 0.7951,
"step": 277
},
{
"epoch": 1.39,
"learning_rate": 1.633291614518148e-05,
"loss": 0.7915,
"step": 278
},
{
"epoch": 1.4,
"learning_rate": 1.630162703379224e-05,
"loss": 0.7114,
"step": 279
},
{
"epoch": 1.4,
"learning_rate": 1.6270337922403003e-05,
"loss": 0.766,
"step": 280
},
{
"epoch": 1.41,
"learning_rate": 1.623904881101377e-05,
"loss": 0.7336,
"step": 281
},
{
"epoch": 1.41,
"learning_rate": 1.620775969962453e-05,
"loss": 0.8702,
"step": 282
},
{
"epoch": 1.42,
"learning_rate": 1.6176470588235296e-05,
"loss": 0.8667,
"step": 283
},
{
"epoch": 1.42,
"learning_rate": 1.6145181476846058e-05,
"loss": 0.7561,
"step": 284
},
{
"epoch": 1.43,
"learning_rate": 1.6113892365456824e-05,
"loss": 0.6965,
"step": 285
},
{
"epoch": 1.43,
"learning_rate": 1.6082603254067586e-05,
"loss": 0.7825,
"step": 286
},
{
"epoch": 1.44,
"learning_rate": 1.6051314142678348e-05,
"loss": 0.7628,
"step": 287
},
{
"epoch": 1.44,
"learning_rate": 1.6020025031289113e-05,
"loss": 0.7527,
"step": 288
},
{
"epoch": 1.45,
"learning_rate": 1.5988735919899875e-05,
"loss": 0.7336,
"step": 289
},
{
"epoch": 1.45,
"learning_rate": 1.595744680851064e-05,
"loss": 0.9209,
"step": 290
},
{
"epoch": 1.46,
"learning_rate": 1.5926157697121403e-05,
"loss": 0.8466,
"step": 291
},
{
"epoch": 1.46,
"learning_rate": 1.589486858573217e-05,
"loss": 0.8237,
"step": 292
},
{
"epoch": 1.47,
"learning_rate": 1.586357947434293e-05,
"loss": 0.7912,
"step": 293
},
{
"epoch": 1.47,
"learning_rate": 1.5832290362953693e-05,
"loss": 0.74,
"step": 294
},
{
"epoch": 1.48,
"learning_rate": 1.5801001251564458e-05,
"loss": 0.7227,
"step": 295
},
{
"epoch": 1.48,
"learning_rate": 1.576971214017522e-05,
"loss": 0.8125,
"step": 296
},
{
"epoch": 1.48,
"learning_rate": 1.5738423028785986e-05,
"loss": 0.7384,
"step": 297
},
{
"epoch": 1.49,
"learning_rate": 1.5707133917396748e-05,
"loss": 0.852,
"step": 298
},
{
"epoch": 1.5,
"learning_rate": 1.567584480600751e-05,
"loss": 0.8198,
"step": 299
},
{
"epoch": 1.5,
"learning_rate": 1.5644555694618275e-05,
"loss": 0.8817,
"step": 300
}
],
"logging_steps": 1,
"max_steps": 800,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 10,
"total_flos": 3.140916098758944e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}