wav2vec2-2-bert-grid-search / trainer_state.json
sanchit-gandhi's picture
End of training
df9a60c
raw
history blame
141 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9995796553173603,
"global_step": 1189,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 9.55,
"step": 1
},
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 8.9419,
"step": 2
},
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 9.4113,
"step": 3
},
{
"epoch": 0.0,
"learning_rate": 0.0,
"loss": 9.3807,
"step": 4
},
{
"epoch": 0.0,
"learning_rate": 6e-07,
"loss": 9.162,
"step": 5
},
{
"epoch": 0.01,
"learning_rate": 1.2e-06,
"loss": 9.1601,
"step": 6
},
{
"epoch": 0.01,
"learning_rate": 1.8e-06,
"loss": 8.859,
"step": 7
},
{
"epoch": 0.01,
"learning_rate": 2.4e-06,
"loss": 8.501,
"step": 8
},
{
"epoch": 0.01,
"learning_rate": 2.9999999999999997e-06,
"loss": 8.9694,
"step": 9
},
{
"epoch": 0.01,
"learning_rate": 3.6e-06,
"loss": 8.4321,
"step": 10
},
{
"epoch": 0.01,
"learning_rate": 4.2e-06,
"loss": 8.1065,
"step": 11
},
{
"epoch": 0.01,
"learning_rate": 4.8e-06,
"loss": 8.1458,
"step": 12
},
{
"epoch": 0.01,
"learning_rate": 5.399999999999999e-06,
"loss": 8.0895,
"step": 13
},
{
"epoch": 0.01,
"learning_rate": 5.999999999999999e-06,
"loss": 7.9342,
"step": 14
},
{
"epoch": 0.01,
"learning_rate": 6.599999999999999e-06,
"loss": 7.7382,
"step": 15
},
{
"epoch": 0.01,
"learning_rate": 7.2e-06,
"loss": 7.67,
"step": 16
},
{
"epoch": 0.01,
"learning_rate": 7.799999999999998e-06,
"loss": 7.7227,
"step": 17
},
{
"epoch": 0.02,
"learning_rate": 8.4e-06,
"loss": 7.624,
"step": 18
},
{
"epoch": 0.02,
"learning_rate": 8.999999999999999e-06,
"loss": 7.4774,
"step": 19
},
{
"epoch": 0.02,
"learning_rate": 9.6e-06,
"loss": 7.5997,
"step": 20
},
{
"epoch": 0.02,
"learning_rate": 1.02e-05,
"loss": 7.4215,
"step": 21
},
{
"epoch": 0.02,
"learning_rate": 1.0799999999999998e-05,
"loss": 7.5155,
"step": 22
},
{
"epoch": 0.02,
"learning_rate": 1.14e-05,
"loss": 7.4231,
"step": 23
},
{
"epoch": 0.02,
"learning_rate": 1.1999999999999999e-05,
"loss": 7.3793,
"step": 24
},
{
"epoch": 0.02,
"learning_rate": 1.26e-05,
"loss": 7.2675,
"step": 25
},
{
"epoch": 0.02,
"learning_rate": 1.3199999999999997e-05,
"loss": 7.4587,
"step": 26
},
{
"epoch": 0.02,
"learning_rate": 1.3799999999999998e-05,
"loss": 7.2464,
"step": 27
},
{
"epoch": 0.02,
"learning_rate": 1.44e-05,
"loss": 7.1883,
"step": 28
},
{
"epoch": 0.02,
"learning_rate": 1.4999999999999999e-05,
"loss": 7.3409,
"step": 29
},
{
"epoch": 0.03,
"learning_rate": 1.5599999999999996e-05,
"loss": 7.3998,
"step": 30
},
{
"epoch": 0.03,
"learning_rate": 1.6199999999999997e-05,
"loss": 7.2939,
"step": 31
},
{
"epoch": 0.03,
"learning_rate": 1.68e-05,
"loss": 7.2231,
"step": 32
},
{
"epoch": 0.03,
"learning_rate": 1.74e-05,
"loss": 7.4439,
"step": 33
},
{
"epoch": 0.03,
"learning_rate": 1.7999999999999997e-05,
"loss": 7.084,
"step": 34
},
{
"epoch": 0.03,
"learning_rate": 1.8599999999999998e-05,
"loss": 7.1616,
"step": 35
},
{
"epoch": 0.03,
"learning_rate": 1.92e-05,
"loss": 7.3887,
"step": 36
},
{
"epoch": 0.03,
"learning_rate": 1.98e-05,
"loss": 7.2686,
"step": 37
},
{
"epoch": 0.03,
"learning_rate": 2.04e-05,
"loss": 7.2506,
"step": 38
},
{
"epoch": 0.03,
"learning_rate": 2.1e-05,
"loss": 7.3682,
"step": 39
},
{
"epoch": 0.03,
"learning_rate": 2.1599999999999996e-05,
"loss": 7.1921,
"step": 40
},
{
"epoch": 0.03,
"learning_rate": 2.2199999999999998e-05,
"loss": 7.1297,
"step": 41
},
{
"epoch": 0.04,
"learning_rate": 2.28e-05,
"loss": 7.2948,
"step": 42
},
{
"epoch": 0.04,
"learning_rate": 2.34e-05,
"loss": 7.1493,
"step": 43
},
{
"epoch": 0.04,
"learning_rate": 2.3999999999999997e-05,
"loss": 7.2048,
"step": 44
},
{
"epoch": 0.04,
"learning_rate": 2.4599999999999998e-05,
"loss": 7.3067,
"step": 45
},
{
"epoch": 0.04,
"learning_rate": 2.52e-05,
"loss": 7.3014,
"step": 46
},
{
"epoch": 0.04,
"learning_rate": 2.5799999999999997e-05,
"loss": 7.1704,
"step": 47
},
{
"epoch": 0.04,
"learning_rate": 2.6399999999999995e-05,
"loss": 7.2711,
"step": 48
},
{
"epoch": 0.04,
"learning_rate": 2.6999999999999996e-05,
"loss": 7.0167,
"step": 49
},
{
"epoch": 0.04,
"learning_rate": 2.7599999999999997e-05,
"loss": 6.8768,
"step": 50
},
{
"epoch": 0.04,
"learning_rate": 2.8199999999999998e-05,
"loss": 7.4694,
"step": 51
},
{
"epoch": 0.04,
"learning_rate": 2.88e-05,
"loss": 7.231,
"step": 52
},
{
"epoch": 0.04,
"learning_rate": 2.94e-05,
"loss": 7.1152,
"step": 53
},
{
"epoch": 0.05,
"learning_rate": 2.9999999999999997e-05,
"loss": 7.1691,
"step": 54
},
{
"epoch": 0.05,
"learning_rate": 3.06e-05,
"loss": 7.2726,
"step": 55
},
{
"epoch": 0.05,
"learning_rate": 3.119999999999999e-05,
"loss": 6.9637,
"step": 56
},
{
"epoch": 0.05,
"learning_rate": 3.1799999999999994e-05,
"loss": 7.391,
"step": 57
},
{
"epoch": 0.05,
"learning_rate": 3.2399999999999995e-05,
"loss": 7.036,
"step": 58
},
{
"epoch": 0.05,
"learning_rate": 3.2999999999999996e-05,
"loss": 7.0183,
"step": 59
},
{
"epoch": 0.05,
"learning_rate": 3.36e-05,
"loss": 7.1747,
"step": 60
},
{
"epoch": 0.05,
"learning_rate": 3.42e-05,
"loss": 6.999,
"step": 61
},
{
"epoch": 0.05,
"learning_rate": 3.48e-05,
"loss": 7.0586,
"step": 62
},
{
"epoch": 0.05,
"learning_rate": 3.539999999999999e-05,
"loss": 7.1794,
"step": 63
},
{
"epoch": 0.05,
"learning_rate": 3.5999999999999994e-05,
"loss": 7.0978,
"step": 64
},
{
"epoch": 0.05,
"learning_rate": 3.6599999999999995e-05,
"loss": 7.043,
"step": 65
},
{
"epoch": 0.06,
"learning_rate": 3.7199999999999996e-05,
"loss": 7.1155,
"step": 66
},
{
"epoch": 0.06,
"learning_rate": 3.78e-05,
"loss": 7.0924,
"step": 67
},
{
"epoch": 0.06,
"learning_rate": 3.84e-05,
"loss": 7.0018,
"step": 68
},
{
"epoch": 0.06,
"learning_rate": 3.9e-05,
"loss": 6.9802,
"step": 69
},
{
"epoch": 0.06,
"learning_rate": 3.96e-05,
"loss": 7.0259,
"step": 70
},
{
"epoch": 0.06,
"learning_rate": 4.02e-05,
"loss": 7.1772,
"step": 71
},
{
"epoch": 0.06,
"learning_rate": 4.08e-05,
"loss": 7.2527,
"step": 72
},
{
"epoch": 0.06,
"learning_rate": 4.14e-05,
"loss": 7.086,
"step": 73
},
{
"epoch": 0.06,
"learning_rate": 4.2e-05,
"loss": 7.131,
"step": 74
},
{
"epoch": 0.06,
"learning_rate": 4.259999999999999e-05,
"loss": 6.9208,
"step": 75
},
{
"epoch": 0.06,
"learning_rate": 4.319999999999999e-05,
"loss": 7.2732,
"step": 76
},
{
"epoch": 0.06,
"learning_rate": 4.3799999999999994e-05,
"loss": 7.0671,
"step": 77
},
{
"epoch": 0.07,
"learning_rate": 4.4399999999999995e-05,
"loss": 6.8986,
"step": 78
},
{
"epoch": 0.07,
"learning_rate": 4.4999999999999996e-05,
"loss": 6.9168,
"step": 79
},
{
"epoch": 0.07,
"learning_rate": 4.56e-05,
"loss": 7.0245,
"step": 80
},
{
"epoch": 0.07,
"learning_rate": 4.62e-05,
"loss": 7.1175,
"step": 81
},
{
"epoch": 0.07,
"learning_rate": 4.68e-05,
"loss": 7.1879,
"step": 82
},
{
"epoch": 0.07,
"learning_rate": 4.7399999999999993e-05,
"loss": 6.9614,
"step": 83
},
{
"epoch": 0.07,
"learning_rate": 4.7999999999999994e-05,
"loss": 7.0797,
"step": 84
},
{
"epoch": 0.07,
"learning_rate": 4.8599999999999995e-05,
"loss": 7.0003,
"step": 85
},
{
"epoch": 0.07,
"learning_rate": 4.9199999999999997e-05,
"loss": 7.0737,
"step": 86
},
{
"epoch": 0.07,
"learning_rate": 4.98e-05,
"loss": 6.982,
"step": 87
},
{
"epoch": 0.07,
"learning_rate": 5.04e-05,
"loss": 7.0724,
"step": 88
},
{
"epoch": 0.07,
"learning_rate": 5.1e-05,
"loss": 7.1836,
"step": 89
},
{
"epoch": 0.08,
"learning_rate": 5.1599999999999994e-05,
"loss": 7.006,
"step": 90
},
{
"epoch": 0.08,
"learning_rate": 5.2199999999999995e-05,
"loss": 7.2244,
"step": 91
},
{
"epoch": 0.08,
"learning_rate": 5.279999999999999e-05,
"loss": 7.0901,
"step": 92
},
{
"epoch": 0.08,
"learning_rate": 5.339999999999999e-05,
"loss": 6.7901,
"step": 93
},
{
"epoch": 0.08,
"learning_rate": 5.399999999999999e-05,
"loss": 7.0759,
"step": 94
},
{
"epoch": 0.08,
"learning_rate": 5.459999999999999e-05,
"loss": 7.218,
"step": 95
},
{
"epoch": 0.08,
"learning_rate": 5.519999999999999e-05,
"loss": 6.856,
"step": 96
},
{
"epoch": 0.08,
"learning_rate": 5.5799999999999994e-05,
"loss": 7.0121,
"step": 97
},
{
"epoch": 0.08,
"learning_rate": 5.6399999999999995e-05,
"loss": 6.9158,
"step": 98
},
{
"epoch": 0.08,
"learning_rate": 5.6999999999999996e-05,
"loss": 6.587,
"step": 99
},
{
"epoch": 0.08,
"learning_rate": 5.76e-05,
"loss": 6.3765,
"step": 100
},
{
"epoch": 0.08,
"learning_rate": 5.82e-05,
"loss": 7.1251,
"step": 101
},
{
"epoch": 0.09,
"learning_rate": 5.88e-05,
"loss": 7.0595,
"step": 102
},
{
"epoch": 0.09,
"learning_rate": 5.94e-05,
"loss": 7.1093,
"step": 103
},
{
"epoch": 0.09,
"learning_rate": 5.9999999999999995e-05,
"loss": 7.0153,
"step": 104
},
{
"epoch": 0.09,
"learning_rate": 6.0599999999999996e-05,
"loss": 6.9467,
"step": 105
},
{
"epoch": 0.09,
"learning_rate": 6.12e-05,
"loss": 7.0941,
"step": 106
},
{
"epoch": 0.09,
"learning_rate": 6.18e-05,
"loss": 6.9961,
"step": 107
},
{
"epoch": 0.09,
"learning_rate": 6.239999999999999e-05,
"loss": 7.0825,
"step": 108
},
{
"epoch": 0.09,
"learning_rate": 6.299999999999999e-05,
"loss": 7.1876,
"step": 109
},
{
"epoch": 0.09,
"learning_rate": 6.359999999999999e-05,
"loss": 7.0441,
"step": 110
},
{
"epoch": 0.09,
"learning_rate": 6.419999999999999e-05,
"loss": 7.1209,
"step": 111
},
{
"epoch": 0.09,
"learning_rate": 6.479999999999999e-05,
"loss": 7.0938,
"step": 112
},
{
"epoch": 0.09,
"learning_rate": 6.539999999999999e-05,
"loss": 6.98,
"step": 113
},
{
"epoch": 0.1,
"learning_rate": 6.599999999999999e-05,
"loss": 6.9344,
"step": 114
},
{
"epoch": 0.1,
"learning_rate": 6.659999999999999e-05,
"loss": 7.0758,
"step": 115
},
{
"epoch": 0.1,
"learning_rate": 6.72e-05,
"loss": 7.1522,
"step": 116
},
{
"epoch": 0.1,
"learning_rate": 6.78e-05,
"loss": 7.0122,
"step": 117
},
{
"epoch": 0.1,
"learning_rate": 6.84e-05,
"loss": 6.8646,
"step": 118
},
{
"epoch": 0.1,
"learning_rate": 6.9e-05,
"loss": 6.7966,
"step": 119
},
{
"epoch": 0.1,
"learning_rate": 6.96e-05,
"loss": 6.9892,
"step": 120
},
{
"epoch": 0.1,
"learning_rate": 7.02e-05,
"loss": 7.0826,
"step": 121
},
{
"epoch": 0.1,
"learning_rate": 7.079999999999999e-05,
"loss": 6.9133,
"step": 122
},
{
"epoch": 0.1,
"learning_rate": 7.139999999999999e-05,
"loss": 7.0459,
"step": 123
},
{
"epoch": 0.1,
"learning_rate": 7.199999999999999e-05,
"loss": 6.9899,
"step": 124
},
{
"epoch": 0.11,
"learning_rate": 7.259999999999999e-05,
"loss": 6.9376,
"step": 125
},
{
"epoch": 0.11,
"learning_rate": 7.319999999999999e-05,
"loss": 6.8284,
"step": 126
},
{
"epoch": 0.11,
"learning_rate": 7.379999999999999e-05,
"loss": 7.1698,
"step": 127
},
{
"epoch": 0.11,
"learning_rate": 7.439999999999999e-05,
"loss": 6.9294,
"step": 128
},
{
"epoch": 0.11,
"learning_rate": 7.5e-05,
"loss": 6.9303,
"step": 129
},
{
"epoch": 0.11,
"learning_rate": 7.56e-05,
"loss": 6.8968,
"step": 130
},
{
"epoch": 0.11,
"learning_rate": 7.62e-05,
"loss": 6.7854,
"step": 131
},
{
"epoch": 0.11,
"learning_rate": 7.68e-05,
"loss": 6.9444,
"step": 132
},
{
"epoch": 0.11,
"learning_rate": 7.74e-05,
"loss": 7.1272,
"step": 133
},
{
"epoch": 0.11,
"learning_rate": 7.8e-05,
"loss": 6.9042,
"step": 134
},
{
"epoch": 0.11,
"learning_rate": 7.86e-05,
"loss": 6.9895,
"step": 135
},
{
"epoch": 0.11,
"learning_rate": 7.92e-05,
"loss": 6.9246,
"step": 136
},
{
"epoch": 0.12,
"learning_rate": 7.98e-05,
"loss": 6.9404,
"step": 137
},
{
"epoch": 0.12,
"learning_rate": 8.04e-05,
"loss": 6.9605,
"step": 138
},
{
"epoch": 0.12,
"learning_rate": 8.1e-05,
"loss": 6.9209,
"step": 139
},
{
"epoch": 0.12,
"learning_rate": 8.16e-05,
"loss": 7.0834,
"step": 140
},
{
"epoch": 0.12,
"learning_rate": 8.22e-05,
"loss": 6.9762,
"step": 141
},
{
"epoch": 0.12,
"learning_rate": 8.28e-05,
"loss": 6.8668,
"step": 142
},
{
"epoch": 0.12,
"learning_rate": 8.34e-05,
"loss": 6.8323,
"step": 143
},
{
"epoch": 0.12,
"learning_rate": 8.4e-05,
"loss": 6.9498,
"step": 144
},
{
"epoch": 0.12,
"learning_rate": 8.459999999999998e-05,
"loss": 6.7789,
"step": 145
},
{
"epoch": 0.12,
"learning_rate": 8.519999999999998e-05,
"loss": 6.9855,
"step": 146
},
{
"epoch": 0.12,
"learning_rate": 8.579999999999998e-05,
"loss": 6.7275,
"step": 147
},
{
"epoch": 0.12,
"learning_rate": 8.639999999999999e-05,
"loss": 6.418,
"step": 148
},
{
"epoch": 0.13,
"learning_rate": 8.699999999999999e-05,
"loss": 9.7764,
"step": 149
},
{
"epoch": 0.13,
"learning_rate": 8.759999999999999e-05,
"loss": 6.4339,
"step": 150
},
{
"epoch": 0.13,
"learning_rate": 8.819999999999999e-05,
"loss": 7.2143,
"step": 151
},
{
"epoch": 0.13,
"learning_rate": 8.879999999999999e-05,
"loss": 7.2603,
"step": 152
},
{
"epoch": 0.13,
"learning_rate": 8.939999999999999e-05,
"loss": 7.0635,
"step": 153
},
{
"epoch": 0.13,
"learning_rate": 8.999999999999999e-05,
"loss": 7.1448,
"step": 154
},
{
"epoch": 0.13,
"learning_rate": 9.059999999999999e-05,
"loss": 6.9462,
"step": 155
},
{
"epoch": 0.13,
"learning_rate": 9.12e-05,
"loss": 7.0349,
"step": 156
},
{
"epoch": 0.13,
"learning_rate": 9.18e-05,
"loss": 7.0018,
"step": 157
},
{
"epoch": 0.13,
"learning_rate": 9.24e-05,
"loss": 7.1447,
"step": 158
},
{
"epoch": 0.13,
"learning_rate": 9.3e-05,
"loss": 6.9218,
"step": 159
},
{
"epoch": 0.13,
"learning_rate": 9.36e-05,
"loss": 6.8224,
"step": 160
},
{
"epoch": 0.14,
"learning_rate": 9.419999999999999e-05,
"loss": 6.9101,
"step": 161
},
{
"epoch": 0.14,
"learning_rate": 9.479999999999999e-05,
"loss": 7.0359,
"step": 162
},
{
"epoch": 0.14,
"learning_rate": 9.539999999999999e-05,
"loss": 7.0907,
"step": 163
},
{
"epoch": 0.14,
"learning_rate": 9.599999999999999e-05,
"loss": 6.9293,
"step": 164
},
{
"epoch": 0.14,
"learning_rate": 9.659999999999999e-05,
"loss": 6.9584,
"step": 165
},
{
"epoch": 0.14,
"learning_rate": 9.719999999999999e-05,
"loss": 6.7897,
"step": 166
},
{
"epoch": 0.14,
"learning_rate": 9.779999999999999e-05,
"loss": 6.9411,
"step": 167
},
{
"epoch": 0.14,
"learning_rate": 9.839999999999999e-05,
"loss": 6.9745,
"step": 168
},
{
"epoch": 0.14,
"learning_rate": 9.9e-05,
"loss": 6.9907,
"step": 169
},
{
"epoch": 0.14,
"learning_rate": 9.96e-05,
"loss": 6.8991,
"step": 170
},
{
"epoch": 0.14,
"learning_rate": 0.0001002,
"loss": 6.9265,
"step": 171
},
{
"epoch": 0.14,
"learning_rate": 0.0001008,
"loss": 6.9174,
"step": 172
},
{
"epoch": 0.15,
"learning_rate": 0.0001014,
"loss": 6.7028,
"step": 173
},
{
"epoch": 0.15,
"learning_rate": 0.000102,
"loss": 6.976,
"step": 174
},
{
"epoch": 0.15,
"learning_rate": 0.0001026,
"loss": 6.9988,
"step": 175
},
{
"epoch": 0.15,
"learning_rate": 0.00010319999999999999,
"loss": 7.1287,
"step": 176
},
{
"epoch": 0.15,
"learning_rate": 0.00010379999999999999,
"loss": 7.0902,
"step": 177
},
{
"epoch": 0.15,
"learning_rate": 0.00010439999999999999,
"loss": 7.0567,
"step": 178
},
{
"epoch": 0.15,
"learning_rate": 0.00010499999999999999,
"loss": 6.8533,
"step": 179
},
{
"epoch": 0.15,
"learning_rate": 0.00010559999999999998,
"loss": 6.9569,
"step": 180
},
{
"epoch": 0.15,
"learning_rate": 0.00010619999999999998,
"loss": 7.0492,
"step": 181
},
{
"epoch": 0.15,
"learning_rate": 0.00010679999999999998,
"loss": 7.1597,
"step": 182
},
{
"epoch": 0.15,
"learning_rate": 0.00010739999999999998,
"loss": 6.8855,
"step": 183
},
{
"epoch": 0.15,
"learning_rate": 0.00010799999999999998,
"loss": 6.9183,
"step": 184
},
{
"epoch": 0.16,
"learning_rate": 0.00010859999999999998,
"loss": 6.7531,
"step": 185
},
{
"epoch": 0.16,
"learning_rate": 0.00010919999999999998,
"loss": 6.8515,
"step": 186
},
{
"epoch": 0.16,
"learning_rate": 0.00010979999999999999,
"loss": 7.0631,
"step": 187
},
{
"epoch": 0.16,
"learning_rate": 0.00011039999999999999,
"loss": 6.8417,
"step": 188
},
{
"epoch": 0.16,
"learning_rate": 0.00011099999999999999,
"loss": 6.8889,
"step": 189
},
{
"epoch": 0.16,
"learning_rate": 0.00011159999999999999,
"loss": 6.859,
"step": 190
},
{
"epoch": 0.16,
"learning_rate": 0.00011219999999999999,
"loss": 7.0676,
"step": 191
},
{
"epoch": 0.16,
"learning_rate": 0.00011279999999999999,
"loss": 7.0038,
"step": 192
},
{
"epoch": 0.16,
"learning_rate": 0.00011339999999999999,
"loss": 6.8311,
"step": 193
},
{
"epoch": 0.16,
"learning_rate": 0.00011399999999999999,
"loss": 6.8485,
"step": 194
},
{
"epoch": 0.16,
"learning_rate": 0.0001146,
"loss": 6.7633,
"step": 195
},
{
"epoch": 0.16,
"learning_rate": 0.0001152,
"loss": 6.6829,
"step": 196
},
{
"epoch": 0.17,
"learning_rate": 0.0001158,
"loss": 6.9826,
"step": 197
},
{
"epoch": 0.17,
"learning_rate": 0.0001164,
"loss": 6.747,
"step": 198
},
{
"epoch": 0.17,
"learning_rate": 0.000117,
"loss": 6.6231,
"step": 199
},
{
"epoch": 0.17,
"learning_rate": 0.0001176,
"loss": 6.2205,
"step": 200
},
{
"epoch": 0.17,
"learning_rate": 0.0001182,
"loss": 7.0516,
"step": 201
},
{
"epoch": 0.17,
"learning_rate": 0.0001188,
"loss": 6.9791,
"step": 202
},
{
"epoch": 0.17,
"learning_rate": 0.0001194,
"loss": 7.0014,
"step": 203
},
{
"epoch": 0.17,
"learning_rate": 0.00011999999999999999,
"loss": 6.8763,
"step": 204
},
{
"epoch": 0.17,
"learning_rate": 0.00012059999999999999,
"loss": 6.8784,
"step": 205
},
{
"epoch": 0.17,
"learning_rate": 0.00012119999999999999,
"loss": 6.9652,
"step": 206
},
{
"epoch": 0.17,
"learning_rate": 0.00012179999999999999,
"loss": 7.0076,
"step": 207
},
{
"epoch": 0.17,
"learning_rate": 0.0001224,
"loss": 7.1589,
"step": 208
},
{
"epoch": 0.18,
"learning_rate": 0.00012299999999999998,
"loss": 6.9716,
"step": 209
},
{
"epoch": 0.18,
"learning_rate": 0.0001236,
"loss": 7.1503,
"step": 210
},
{
"epoch": 0.18,
"learning_rate": 0.00012419999999999998,
"loss": 6.8821,
"step": 211
},
{
"epoch": 0.18,
"learning_rate": 0.00012479999999999997,
"loss": 6.8873,
"step": 212
},
{
"epoch": 0.18,
"learning_rate": 0.00012539999999999999,
"loss": 6.9434,
"step": 213
},
{
"epoch": 0.18,
"learning_rate": 0.00012599999999999997,
"loss": 6.937,
"step": 214
},
{
"epoch": 0.18,
"learning_rate": 0.0001266,
"loss": 6.9549,
"step": 215
},
{
"epoch": 0.18,
"learning_rate": 0.00012719999999999997,
"loss": 6.9406,
"step": 216
},
{
"epoch": 0.18,
"learning_rate": 0.0001278,
"loss": 6.9666,
"step": 217
},
{
"epoch": 0.18,
"learning_rate": 0.00012839999999999998,
"loss": 6.9652,
"step": 218
},
{
"epoch": 0.18,
"learning_rate": 0.000129,
"loss": 6.8692,
"step": 219
},
{
"epoch": 0.18,
"learning_rate": 0.00012959999999999998,
"loss": 7.0758,
"step": 220
},
{
"epoch": 0.19,
"learning_rate": 0.0001302,
"loss": 6.8795,
"step": 221
},
{
"epoch": 0.19,
"learning_rate": 0.00013079999999999998,
"loss": 7.2127,
"step": 222
},
{
"epoch": 0.19,
"learning_rate": 0.0001314,
"loss": 7.0779,
"step": 223
},
{
"epoch": 0.19,
"learning_rate": 0.00013199999999999998,
"loss": 7.0581,
"step": 224
},
{
"epoch": 0.19,
"learning_rate": 0.0001326,
"loss": 6.8529,
"step": 225
},
{
"epoch": 0.19,
"learning_rate": 0.00013319999999999999,
"loss": 6.7646,
"step": 226
},
{
"epoch": 0.19,
"learning_rate": 0.0001338,
"loss": 7.0354,
"step": 227
},
{
"epoch": 0.19,
"learning_rate": 0.0001344,
"loss": 7.023,
"step": 228
},
{
"epoch": 0.19,
"learning_rate": 0.000135,
"loss": 6.8544,
"step": 229
},
{
"epoch": 0.19,
"learning_rate": 0.0001356,
"loss": 6.9987,
"step": 230
},
{
"epoch": 0.19,
"learning_rate": 0.0001362,
"loss": 7.0755,
"step": 231
},
{
"epoch": 0.2,
"learning_rate": 0.0001368,
"loss": 6.975,
"step": 232
},
{
"epoch": 0.2,
"learning_rate": 0.0001374,
"loss": 6.9598,
"step": 233
},
{
"epoch": 0.2,
"learning_rate": 0.000138,
"loss": 6.8081,
"step": 234
},
{
"epoch": 0.2,
"learning_rate": 0.0001386,
"loss": 7.0254,
"step": 235
},
{
"epoch": 0.2,
"learning_rate": 0.0001392,
"loss": 6.9681,
"step": 236
},
{
"epoch": 0.2,
"learning_rate": 0.00013979999999999998,
"loss": 6.996,
"step": 237
},
{
"epoch": 0.2,
"learning_rate": 0.0001404,
"loss": 6.9012,
"step": 238
},
{
"epoch": 0.2,
"learning_rate": 0.00014099999999999998,
"loss": 7.0205,
"step": 239
},
{
"epoch": 0.2,
"learning_rate": 0.00014159999999999997,
"loss": 6.7686,
"step": 240
},
{
"epoch": 0.2,
"learning_rate": 0.0001422,
"loss": 6.9025,
"step": 241
},
{
"epoch": 0.2,
"learning_rate": 0.00014279999999999997,
"loss": 6.8842,
"step": 242
},
{
"epoch": 0.2,
"learning_rate": 0.0001434,
"loss": 7.0146,
"step": 243
},
{
"epoch": 0.21,
"learning_rate": 0.00014399999999999998,
"loss": 7.0304,
"step": 244
},
{
"epoch": 0.21,
"learning_rate": 0.0001446,
"loss": 6.763,
"step": 245
},
{
"epoch": 0.21,
"learning_rate": 0.00014519999999999998,
"loss": 6.7243,
"step": 246
},
{
"epoch": 0.21,
"learning_rate": 0.0001458,
"loss": 7.2358,
"step": 247
},
{
"epoch": 0.21,
"learning_rate": 0.00014639999999999998,
"loss": 6.6169,
"step": 248
},
{
"epoch": 0.21,
"learning_rate": 0.000147,
"loss": 6.5757,
"step": 249
},
{
"epoch": 0.21,
"learning_rate": 0.00014759999999999998,
"loss": 6.2448,
"step": 250
},
{
"epoch": 0.21,
"learning_rate": 0.0001482,
"loss": 7.0981,
"step": 251
},
{
"epoch": 0.21,
"learning_rate": 0.00014879999999999998,
"loss": 6.8921,
"step": 252
},
{
"epoch": 0.21,
"learning_rate": 0.0001494,
"loss": 7.1193,
"step": 253
},
{
"epoch": 0.21,
"learning_rate": 0.00015,
"loss": 6.8786,
"step": 254
},
{
"epoch": 0.21,
"learning_rate": 0.00015059999999999997,
"loss": 6.9321,
"step": 255
},
{
"epoch": 0.22,
"learning_rate": 0.0001512,
"loss": 7.0229,
"step": 256
},
{
"epoch": 0.22,
"learning_rate": 0.00015179999999999998,
"loss": 6.8696,
"step": 257
},
{
"epoch": 0.22,
"learning_rate": 0.0001524,
"loss": 7.0135,
"step": 258
},
{
"epoch": 0.22,
"learning_rate": 0.00015299999999999998,
"loss": 6.9056,
"step": 259
},
{
"epoch": 0.22,
"learning_rate": 0.0001536,
"loss": 6.8585,
"step": 260
},
{
"epoch": 0.22,
"learning_rate": 0.00015419999999999998,
"loss": 6.9343,
"step": 261
},
{
"epoch": 0.22,
"learning_rate": 0.0001548,
"loss": 6.822,
"step": 262
},
{
"epoch": 0.22,
"learning_rate": 0.00015539999999999998,
"loss": 6.8312,
"step": 263
},
{
"epoch": 0.22,
"learning_rate": 0.000156,
"loss": 6.9712,
"step": 264
},
{
"epoch": 0.22,
"learning_rate": 0.00015659999999999998,
"loss": 6.8902,
"step": 265
},
{
"epoch": 0.22,
"learning_rate": 0.0001572,
"loss": 6.9703,
"step": 266
},
{
"epoch": 0.22,
"learning_rate": 0.0001578,
"loss": 6.8423,
"step": 267
},
{
"epoch": 0.23,
"learning_rate": 0.0001584,
"loss": 6.8521,
"step": 268
},
{
"epoch": 0.23,
"learning_rate": 0.000159,
"loss": 6.9612,
"step": 269
},
{
"epoch": 0.23,
"learning_rate": 0.0001596,
"loss": 6.9591,
"step": 270
},
{
"epoch": 0.23,
"learning_rate": 0.0001602,
"loss": 7.024,
"step": 271
},
{
"epoch": 0.23,
"learning_rate": 0.0001608,
"loss": 6.8473,
"step": 272
},
{
"epoch": 0.23,
"learning_rate": 0.0001614,
"loss": 7.0068,
"step": 273
},
{
"epoch": 0.23,
"learning_rate": 0.000162,
"loss": 7.0259,
"step": 274
},
{
"epoch": 0.23,
"learning_rate": 0.0001626,
"loss": 7.0017,
"step": 275
},
{
"epoch": 0.23,
"learning_rate": 0.0001632,
"loss": 6.7601,
"step": 276
},
{
"epoch": 0.23,
"learning_rate": 0.0001638,
"loss": 6.8875,
"step": 277
},
{
"epoch": 0.23,
"learning_rate": 0.0001644,
"loss": 6.9374,
"step": 278
},
{
"epoch": 0.23,
"learning_rate": 0.000165,
"loss": 7.0433,
"step": 279
},
{
"epoch": 0.24,
"learning_rate": 0.0001656,
"loss": 6.8445,
"step": 280
},
{
"epoch": 0.24,
"learning_rate": 0.0001662,
"loss": 6.79,
"step": 281
},
{
"epoch": 0.24,
"learning_rate": 0.0001668,
"loss": 6.9064,
"step": 282
},
{
"epoch": 0.24,
"learning_rate": 0.0001674,
"loss": 7.0482,
"step": 283
},
{
"epoch": 0.24,
"learning_rate": 0.000168,
"loss": 7.016,
"step": 284
},
{
"epoch": 0.24,
"learning_rate": 0.0001686,
"loss": 6.9143,
"step": 285
},
{
"epoch": 0.24,
"learning_rate": 0.00016919999999999997,
"loss": 6.8954,
"step": 286
},
{
"epoch": 0.24,
"learning_rate": 0.00016979999999999998,
"loss": 6.8511,
"step": 287
},
{
"epoch": 0.24,
"learning_rate": 0.00017039999999999997,
"loss": 6.8074,
"step": 288
},
{
"epoch": 0.24,
"learning_rate": 0.00017099999999999998,
"loss": 6.9775,
"step": 289
},
{
"epoch": 0.24,
"learning_rate": 0.00017159999999999997,
"loss": 6.8545,
"step": 290
},
{
"epoch": 0.24,
"learning_rate": 0.00017219999999999998,
"loss": 7.0779,
"step": 291
},
{
"epoch": 0.25,
"learning_rate": 0.00017279999999999997,
"loss": 6.9559,
"step": 292
},
{
"epoch": 0.25,
"learning_rate": 0.00017339999999999996,
"loss": 6.6196,
"step": 293
},
{
"epoch": 0.25,
"learning_rate": 0.00017399999999999997,
"loss": 6.6656,
"step": 294
},
{
"epoch": 0.25,
"learning_rate": 0.00017459999999999996,
"loss": 6.8798,
"step": 295
},
{
"epoch": 0.25,
"learning_rate": 0.00017519999999999998,
"loss": 7.0263,
"step": 296
},
{
"epoch": 0.25,
"learning_rate": 0.00017579999999999996,
"loss": 6.5448,
"step": 297
},
{
"epoch": 0.25,
"learning_rate": 0.00017639999999999998,
"loss": 6.6762,
"step": 298
},
{
"epoch": 0.25,
"learning_rate": 0.00017699999999999997,
"loss": 6.8317,
"step": 299
},
{
"epoch": 0.25,
"learning_rate": 0.00017759999999999998,
"loss": 6.4708,
"step": 300
},
{
"epoch": 0.25,
"learning_rate": 0.00017819999999999997,
"loss": 6.9424,
"step": 301
},
{
"epoch": 0.25,
"learning_rate": 0.00017879999999999998,
"loss": 7.038,
"step": 302
},
{
"epoch": 0.25,
"learning_rate": 0.00017939999999999997,
"loss": 7.1493,
"step": 303
},
{
"epoch": 0.26,
"learning_rate": 0.00017999999999999998,
"loss": 6.8401,
"step": 304
},
{
"epoch": 0.26,
"learning_rate": 0.00018059999999999997,
"loss": 6.9445,
"step": 305
},
{
"epoch": 0.26,
"learning_rate": 0.00018119999999999999,
"loss": 6.8652,
"step": 306
},
{
"epoch": 0.26,
"learning_rate": 0.00018179999999999997,
"loss": 6.9303,
"step": 307
},
{
"epoch": 0.26,
"learning_rate": 0.0001824,
"loss": 6.8556,
"step": 308
},
{
"epoch": 0.26,
"learning_rate": 0.00018299999999999998,
"loss": 6.7537,
"step": 309
},
{
"epoch": 0.26,
"learning_rate": 0.0001836,
"loss": 7.0273,
"step": 310
},
{
"epoch": 0.26,
"learning_rate": 0.00018419999999999998,
"loss": 6.7255,
"step": 311
},
{
"epoch": 0.26,
"learning_rate": 0.0001848,
"loss": 6.7813,
"step": 312
},
{
"epoch": 0.26,
"learning_rate": 0.00018539999999999998,
"loss": 6.8344,
"step": 313
},
{
"epoch": 0.26,
"learning_rate": 0.000186,
"loss": 6.9307,
"step": 314
},
{
"epoch": 0.26,
"learning_rate": 0.00018659999999999998,
"loss": 6.8175,
"step": 315
},
{
"epoch": 0.27,
"learning_rate": 0.0001872,
"loss": 7.0431,
"step": 316
},
{
"epoch": 0.27,
"learning_rate": 0.00018779999999999998,
"loss": 6.9237,
"step": 317
},
{
"epoch": 0.27,
"learning_rate": 0.00018839999999999997,
"loss": 6.951,
"step": 318
},
{
"epoch": 0.27,
"learning_rate": 0.00018899999999999999,
"loss": 6.8204,
"step": 319
},
{
"epoch": 0.27,
"learning_rate": 0.00018959999999999997,
"loss": 6.7867,
"step": 320
},
{
"epoch": 0.27,
"learning_rate": 0.0001902,
"loss": 6.832,
"step": 321
},
{
"epoch": 0.27,
"learning_rate": 0.00019079999999999998,
"loss": 6.7993,
"step": 322
},
{
"epoch": 0.27,
"learning_rate": 0.0001914,
"loss": 6.7834,
"step": 323
},
{
"epoch": 0.27,
"learning_rate": 0.00019199999999999998,
"loss": 6.8469,
"step": 324
},
{
"epoch": 0.27,
"learning_rate": 0.0001926,
"loss": 6.7455,
"step": 325
},
{
"epoch": 0.27,
"learning_rate": 0.00019319999999999998,
"loss": 6.9278,
"step": 326
},
{
"epoch": 0.27,
"learning_rate": 0.0001938,
"loss": 6.8588,
"step": 327
},
{
"epoch": 0.28,
"learning_rate": 0.00019439999999999998,
"loss": 6.9268,
"step": 328
},
{
"epoch": 0.28,
"learning_rate": 0.000195,
"loss": 6.8046,
"step": 329
},
{
"epoch": 0.28,
"learning_rate": 0.00019559999999999998,
"loss": 7.0146,
"step": 330
},
{
"epoch": 0.28,
"learning_rate": 0.0001962,
"loss": 6.88,
"step": 331
},
{
"epoch": 0.28,
"learning_rate": 0.00019679999999999999,
"loss": 6.9614,
"step": 332
},
{
"epoch": 0.28,
"learning_rate": 0.0001974,
"loss": 7.0782,
"step": 333
},
{
"epoch": 0.28,
"learning_rate": 0.000198,
"loss": 6.8374,
"step": 334
},
{
"epoch": 0.28,
"learning_rate": 0.0001986,
"loss": 6.873,
"step": 335
},
{
"epoch": 0.28,
"learning_rate": 0.0001992,
"loss": 6.9961,
"step": 336
},
{
"epoch": 0.28,
"learning_rate": 0.0001998,
"loss": 6.7669,
"step": 337
},
{
"epoch": 0.28,
"learning_rate": 0.0002004,
"loss": 7.0401,
"step": 338
},
{
"epoch": 0.28,
"learning_rate": 0.000201,
"loss": 6.8328,
"step": 339
},
{
"epoch": 0.29,
"learning_rate": 0.0002016,
"loss": 6.9227,
"step": 340
},
{
"epoch": 0.29,
"learning_rate": 0.0002022,
"loss": 6.7859,
"step": 341
},
{
"epoch": 0.29,
"learning_rate": 0.0002028,
"loss": 7.0195,
"step": 342
},
{
"epoch": 0.29,
"learning_rate": 0.00020339999999999998,
"loss": 6.8173,
"step": 343
},
{
"epoch": 0.29,
"learning_rate": 0.000204,
"loss": 6.9585,
"step": 344
},
{
"epoch": 0.29,
"learning_rate": 0.00020459999999999999,
"loss": 6.7353,
"step": 345
},
{
"epoch": 0.29,
"learning_rate": 0.0002052,
"loss": 6.6473,
"step": 346
},
{
"epoch": 0.29,
"learning_rate": 0.0002058,
"loss": 6.8274,
"step": 347
},
{
"epoch": 0.29,
"learning_rate": 0.00020639999999999998,
"loss": 6.9286,
"step": 348
},
{
"epoch": 0.29,
"learning_rate": 0.00020699999999999996,
"loss": 6.4948,
"step": 349
},
{
"epoch": 0.29,
"learning_rate": 0.00020759999999999998,
"loss": 6.6249,
"step": 350
},
{
"epoch": 0.3,
"learning_rate": 0.00020819999999999996,
"loss": 7.2221,
"step": 351
},
{
"epoch": 0.3,
"learning_rate": 0.00020879999999999998,
"loss": 7.0396,
"step": 352
},
{
"epoch": 0.3,
"learning_rate": 0.00020939999999999997,
"loss": 6.9473,
"step": 353
},
{
"epoch": 0.3,
"learning_rate": 0.00020999999999999998,
"loss": 7.0059,
"step": 354
},
{
"epoch": 0.3,
"learning_rate": 0.00021059999999999997,
"loss": 6.8829,
"step": 355
},
{
"epoch": 0.3,
"learning_rate": 0.00021119999999999996,
"loss": 6.9656,
"step": 356
},
{
"epoch": 0.3,
"learning_rate": 0.00021179999999999997,
"loss": 6.9957,
"step": 357
},
{
"epoch": 0.3,
"learning_rate": 0.00021239999999999996,
"loss": 6.8132,
"step": 358
},
{
"epoch": 0.3,
"learning_rate": 0.00021299999999999997,
"loss": 6.9307,
"step": 359
},
{
"epoch": 0.3,
"learning_rate": 0.00021359999999999996,
"loss": 7.0266,
"step": 360
},
{
"epoch": 0.3,
"learning_rate": 0.00021419999999999998,
"loss": 6.7915,
"step": 361
},
{
"epoch": 0.3,
"learning_rate": 0.00021479999999999996,
"loss": 6.7976,
"step": 362
},
{
"epoch": 0.31,
"learning_rate": 0.00021539999999999998,
"loss": 6.9853,
"step": 363
},
{
"epoch": 0.31,
"learning_rate": 0.00021599999999999996,
"loss": 6.9288,
"step": 364
},
{
"epoch": 0.31,
"learning_rate": 0.00021659999999999998,
"loss": 6.8851,
"step": 365
},
{
"epoch": 0.31,
"learning_rate": 0.00021719999999999997,
"loss": 6.9602,
"step": 366
},
{
"epoch": 0.31,
"learning_rate": 0.00021779999999999998,
"loss": 6.913,
"step": 367
},
{
"epoch": 0.31,
"learning_rate": 0.00021839999999999997,
"loss": 7.077,
"step": 368
},
{
"epoch": 0.31,
"learning_rate": 0.00021899999999999998,
"loss": 6.9495,
"step": 369
},
{
"epoch": 0.31,
"learning_rate": 0.00021959999999999997,
"loss": 6.9922,
"step": 370
},
{
"epoch": 0.31,
"learning_rate": 0.00022019999999999999,
"loss": 6.7844,
"step": 371
},
{
"epoch": 0.31,
"learning_rate": 0.00022079999999999997,
"loss": 6.8582,
"step": 372
},
{
"epoch": 0.31,
"learning_rate": 0.0002214,
"loss": 6.7354,
"step": 373
},
{
"epoch": 0.31,
"learning_rate": 0.00022199999999999998,
"loss": 6.8776,
"step": 374
},
{
"epoch": 0.32,
"learning_rate": 0.0002226,
"loss": 6.8208,
"step": 375
},
{
"epoch": 0.32,
"learning_rate": 0.00022319999999999998,
"loss": 6.968,
"step": 376
},
{
"epoch": 0.32,
"learning_rate": 0.0002238,
"loss": 6.8381,
"step": 377
},
{
"epoch": 0.32,
"learning_rate": 0.00022439999999999998,
"loss": 6.8864,
"step": 378
},
{
"epoch": 0.32,
"learning_rate": 0.000225,
"loss": 6.9176,
"step": 379
},
{
"epoch": 0.32,
"learning_rate": 0.00022559999999999998,
"loss": 6.8812,
"step": 380
},
{
"epoch": 0.32,
"learning_rate": 0.00022619999999999997,
"loss": 6.8497,
"step": 381
},
{
"epoch": 0.32,
"learning_rate": 0.00022679999999999998,
"loss": 6.8634,
"step": 382
},
{
"epoch": 0.32,
"learning_rate": 0.00022739999999999997,
"loss": 6.7056,
"step": 383
},
{
"epoch": 0.32,
"learning_rate": 0.00022799999999999999,
"loss": 7.0611,
"step": 384
},
{
"epoch": 0.32,
"learning_rate": 0.00022859999999999997,
"loss": 6.8537,
"step": 385
},
{
"epoch": 0.32,
"learning_rate": 0.0002292,
"loss": 6.7578,
"step": 386
},
{
"epoch": 0.33,
"learning_rate": 0.00022979999999999997,
"loss": 6.807,
"step": 387
},
{
"epoch": 0.33,
"learning_rate": 0.0002304,
"loss": 6.883,
"step": 388
},
{
"epoch": 0.33,
"learning_rate": 0.00023099999999999998,
"loss": 6.9673,
"step": 389
},
{
"epoch": 0.33,
"learning_rate": 0.0002316,
"loss": 7.1346,
"step": 390
},
{
"epoch": 0.33,
"learning_rate": 0.00023219999999999998,
"loss": 7.0001,
"step": 391
},
{
"epoch": 0.33,
"learning_rate": 0.0002328,
"loss": 6.8242,
"step": 392
},
{
"epoch": 0.33,
"learning_rate": 0.00023339999999999998,
"loss": 6.7149,
"step": 393
},
{
"epoch": 0.33,
"learning_rate": 0.000234,
"loss": 6.8308,
"step": 394
},
{
"epoch": 0.33,
"learning_rate": 0.00023459999999999998,
"loss": 6.6752,
"step": 395
},
{
"epoch": 0.33,
"learning_rate": 0.0002352,
"loss": 6.7412,
"step": 396
},
{
"epoch": 0.33,
"learning_rate": 0.00023579999999999999,
"loss": 6.4683,
"step": 397
},
{
"epoch": 0.33,
"learning_rate": 0.0002364,
"loss": 6.5592,
"step": 398
},
{
"epoch": 0.34,
"learning_rate": 0.000237,
"loss": 6.4819,
"step": 399
},
{
"epoch": 0.34,
"learning_rate": 0.0002376,
"loss": 6.2644,
"step": 400
},
{
"epoch": 0.34,
"learning_rate": 0.0002382,
"loss": 6.9606,
"step": 401
},
{
"epoch": 0.34,
"learning_rate": 0.0002388,
"loss": 6.937,
"step": 402
},
{
"epoch": 0.34,
"learning_rate": 0.0002394,
"loss": 6.9723,
"step": 403
},
{
"epoch": 0.34,
"learning_rate": 0.00023999999999999998,
"loss": 7.0195,
"step": 404
},
{
"epoch": 0.34,
"learning_rate": 0.0002406,
"loss": 6.7774,
"step": 405
},
{
"epoch": 0.34,
"learning_rate": 0.00024119999999999998,
"loss": 6.8926,
"step": 406
},
{
"epoch": 0.34,
"learning_rate": 0.0002418,
"loss": 6.9179,
"step": 407
},
{
"epoch": 0.34,
"learning_rate": 0.00024239999999999998,
"loss": 6.9387,
"step": 408
},
{
"epoch": 0.34,
"learning_rate": 0.000243,
"loss": 6.8276,
"step": 409
},
{
"epoch": 0.34,
"learning_rate": 0.00024359999999999999,
"loss": 7.0094,
"step": 410
},
{
"epoch": 0.35,
"learning_rate": 0.00024419999999999997,
"loss": 6.9919,
"step": 411
},
{
"epoch": 0.35,
"learning_rate": 0.0002448,
"loss": 6.9548,
"step": 412
},
{
"epoch": 0.35,
"learning_rate": 0.00024539999999999995,
"loss": 6.8655,
"step": 413
},
{
"epoch": 0.35,
"learning_rate": 0.00024599999999999996,
"loss": 6.7854,
"step": 414
},
{
"epoch": 0.35,
"learning_rate": 0.0002466,
"loss": 6.8031,
"step": 415
},
{
"epoch": 0.35,
"learning_rate": 0.0002472,
"loss": 7.0633,
"step": 416
},
{
"epoch": 0.35,
"learning_rate": 0.00024779999999999995,
"loss": 6.8614,
"step": 417
},
{
"epoch": 0.35,
"learning_rate": 0.00024839999999999997,
"loss": 6.7194,
"step": 418
},
{
"epoch": 0.35,
"learning_rate": 0.000249,
"loss": 6.8107,
"step": 419
},
{
"epoch": 0.35,
"learning_rate": 0.00024959999999999994,
"loss": 6.9937,
"step": 420
},
{
"epoch": 0.35,
"learning_rate": 0.00025019999999999996,
"loss": 6.8863,
"step": 421
},
{
"epoch": 0.35,
"learning_rate": 0.00025079999999999997,
"loss": 6.7743,
"step": 422
},
{
"epoch": 0.36,
"learning_rate": 0.0002514,
"loss": 6.8863,
"step": 423
},
{
"epoch": 0.36,
"learning_rate": 0.00025199999999999995,
"loss": 6.7851,
"step": 424
},
{
"epoch": 0.36,
"learning_rate": 0.00025259999999999996,
"loss": 7.0815,
"step": 425
},
{
"epoch": 0.36,
"learning_rate": 0.0002532,
"loss": 6.8696,
"step": 426
},
{
"epoch": 0.36,
"learning_rate": 0.0002538,
"loss": 6.8131,
"step": 427
},
{
"epoch": 0.36,
"learning_rate": 0.00025439999999999995,
"loss": 6.8926,
"step": 428
},
{
"epoch": 0.36,
"learning_rate": 0.00025499999999999996,
"loss": 6.9502,
"step": 429
},
{
"epoch": 0.36,
"learning_rate": 0.0002556,
"loss": 6.8884,
"step": 430
},
{
"epoch": 0.36,
"learning_rate": 0.0002562,
"loss": 6.9901,
"step": 431
},
{
"epoch": 0.36,
"learning_rate": 0.00025679999999999995,
"loss": 6.666,
"step": 432
},
{
"epoch": 0.36,
"learning_rate": 0.00025739999999999997,
"loss": 6.9318,
"step": 433
},
{
"epoch": 0.36,
"learning_rate": 0.000258,
"loss": 6.8993,
"step": 434
},
{
"epoch": 0.37,
"learning_rate": 0.0002586,
"loss": 6.9596,
"step": 435
},
{
"epoch": 0.37,
"learning_rate": 0.00025919999999999996,
"loss": 6.6773,
"step": 436
},
{
"epoch": 0.37,
"learning_rate": 0.00025979999999999997,
"loss": 6.7725,
"step": 437
},
{
"epoch": 0.37,
"learning_rate": 0.0002604,
"loss": 6.7497,
"step": 438
},
{
"epoch": 0.37,
"learning_rate": 0.000261,
"loss": 7.0099,
"step": 439
},
{
"epoch": 0.37,
"learning_rate": 0.00026159999999999996,
"loss": 6.7874,
"step": 440
},
{
"epoch": 0.37,
"learning_rate": 0.0002622,
"loss": 6.8823,
"step": 441
},
{
"epoch": 0.37,
"learning_rate": 0.0002628,
"loss": 6.767,
"step": 442
},
{
"epoch": 0.37,
"learning_rate": 0.00026339999999999995,
"loss": 6.8022,
"step": 443
},
{
"epoch": 0.37,
"learning_rate": 0.00026399999999999997,
"loss": 6.6632,
"step": 444
},
{
"epoch": 0.37,
"learning_rate": 0.0002646,
"loss": 6.5436,
"step": 445
},
{
"epoch": 0.37,
"learning_rate": 0.0002652,
"loss": 6.759,
"step": 446
},
{
"epoch": 0.38,
"learning_rate": 0.00026579999999999996,
"loss": 6.672,
"step": 447
},
{
"epoch": 0.38,
"learning_rate": 0.00026639999999999997,
"loss": 6.8294,
"step": 448
},
{
"epoch": 0.38,
"learning_rate": 0.000267,
"loss": 6.5779,
"step": 449
},
{
"epoch": 0.38,
"learning_rate": 0.0002676,
"loss": 6.3936,
"step": 450
},
{
"epoch": 0.38,
"learning_rate": 0.00026819999999999996,
"loss": 6.9043,
"step": 451
},
{
"epoch": 0.38,
"learning_rate": 0.0002688,
"loss": 6.8242,
"step": 452
},
{
"epoch": 0.38,
"learning_rate": 0.0002694,
"loss": 6.8922,
"step": 453
},
{
"epoch": 0.38,
"learning_rate": 0.00027,
"loss": 6.8742,
"step": 454
},
{
"epoch": 0.38,
"learning_rate": 0.00027059999999999996,
"loss": 6.8983,
"step": 455
},
{
"epoch": 0.38,
"learning_rate": 0.0002712,
"loss": 6.8824,
"step": 456
},
{
"epoch": 0.38,
"learning_rate": 0.0002718,
"loss": 6.8802,
"step": 457
},
{
"epoch": 0.39,
"learning_rate": 0.0002724,
"loss": 7.005,
"step": 458
},
{
"epoch": 0.39,
"learning_rate": 0.00027299999999999997,
"loss": 6.9349,
"step": 459
},
{
"epoch": 0.39,
"learning_rate": 0.0002736,
"loss": 6.9665,
"step": 460
},
{
"epoch": 0.39,
"learning_rate": 0.0002742,
"loss": 6.8687,
"step": 461
},
{
"epoch": 0.39,
"learning_rate": 0.0002748,
"loss": 6.837,
"step": 462
},
{
"epoch": 0.39,
"learning_rate": 0.00027539999999999997,
"loss": 6.9516,
"step": 463
},
{
"epoch": 0.39,
"learning_rate": 0.000276,
"loss": 6.9546,
"step": 464
},
{
"epoch": 0.39,
"learning_rate": 0.0002766,
"loss": 6.9346,
"step": 465
},
{
"epoch": 0.39,
"learning_rate": 0.0002772,
"loss": 6.8403,
"step": 466
},
{
"epoch": 0.39,
"learning_rate": 0.0002778,
"loss": 6.7476,
"step": 467
},
{
"epoch": 0.39,
"learning_rate": 0.0002784,
"loss": 6.814,
"step": 468
},
{
"epoch": 0.39,
"learning_rate": 0.000279,
"loss": 6.6635,
"step": 469
},
{
"epoch": 0.4,
"learning_rate": 0.00027959999999999997,
"loss": 7.0382,
"step": 470
},
{
"epoch": 0.4,
"learning_rate": 0.0002802,
"loss": 6.8237,
"step": 471
},
{
"epoch": 0.4,
"learning_rate": 0.0002808,
"loss": 6.9813,
"step": 472
},
{
"epoch": 0.4,
"learning_rate": 0.00028139999999999996,
"loss": 6.9854,
"step": 473
},
{
"epoch": 0.4,
"learning_rate": 0.00028199999999999997,
"loss": 6.9881,
"step": 474
},
{
"epoch": 0.4,
"learning_rate": 0.0002826,
"loss": 6.8715,
"step": 475
},
{
"epoch": 0.4,
"learning_rate": 0.00028319999999999994,
"loss": 6.7408,
"step": 476
},
{
"epoch": 0.4,
"learning_rate": 0.00028379999999999996,
"loss": 6.661,
"step": 477
},
{
"epoch": 0.4,
"learning_rate": 0.0002844,
"loss": 6.8297,
"step": 478
},
{
"epoch": 0.4,
"learning_rate": 0.000285,
"loss": 6.7972,
"step": 479
},
{
"epoch": 0.4,
"learning_rate": 0.00028559999999999995,
"loss": 6.9775,
"step": 480
},
{
"epoch": 0.4,
"learning_rate": 0.00028619999999999996,
"loss": 6.694,
"step": 481
},
{
"epoch": 0.41,
"learning_rate": 0.0002868,
"loss": 6.7856,
"step": 482
},
{
"epoch": 0.41,
"learning_rate": 0.00028739999999999994,
"loss": 6.8002,
"step": 483
},
{
"epoch": 0.41,
"learning_rate": 0.00028799999999999995,
"loss": 6.903,
"step": 484
},
{
"epoch": 0.41,
"learning_rate": 0.00028859999999999997,
"loss": 6.8838,
"step": 485
},
{
"epoch": 0.41,
"learning_rate": 0.0002892,
"loss": 6.7277,
"step": 486
},
{
"epoch": 0.41,
"learning_rate": 0.00028979999999999994,
"loss": 6.8032,
"step": 487
},
{
"epoch": 0.41,
"learning_rate": 0.00029039999999999996,
"loss": 6.8841,
"step": 488
},
{
"epoch": 0.41,
"learning_rate": 0.00029099999999999997,
"loss": 6.9144,
"step": 489
},
{
"epoch": 0.41,
"learning_rate": 0.0002916,
"loss": 6.783,
"step": 490
},
{
"epoch": 0.41,
"learning_rate": 0.00029219999999999995,
"loss": 6.9195,
"step": 491
},
{
"epoch": 0.41,
"learning_rate": 0.00029279999999999996,
"loss": 6.9088,
"step": 492
},
{
"epoch": 0.41,
"learning_rate": 0.0002934,
"loss": 6.9646,
"step": 493
},
{
"epoch": 0.42,
"learning_rate": 0.000294,
"loss": 6.8541,
"step": 494
},
{
"epoch": 0.42,
"learning_rate": 0.00029459999999999995,
"loss": 6.7656,
"step": 495
},
{
"epoch": 0.42,
"learning_rate": 0.00029519999999999997,
"loss": 6.7223,
"step": 496
},
{
"epoch": 0.42,
"learning_rate": 0.0002958,
"loss": 6.694,
"step": 497
},
{
"epoch": 0.42,
"learning_rate": 0.0002964,
"loss": 6.592,
"step": 498
},
{
"epoch": 0.42,
"learning_rate": 0.00029699999999999996,
"loss": 6.5935,
"step": 499
},
{
"epoch": 0.42,
"learning_rate": 0.00029759999999999997,
"loss": 6.5385,
"step": 500
},
{
"epoch": 0.42,
"eval_loss": 6.84136962890625,
"eval_runtime": 492.9707,
"eval_samples_per_second": 5.359,
"eval_steps_per_second": 0.448,
"eval_wer": 1.0500029763676408,
"step": 500
},
{
"epoch": 0.42,
"learning_rate": 0.0002982,
"loss": 6.9981,
"step": 501
},
{
"epoch": 0.42,
"learning_rate": 0.0002988,
"loss": 6.9229,
"step": 502
},
{
"epoch": 0.42,
"learning_rate": 0.00029939999999999996,
"loss": 6.959,
"step": 503
},
{
"epoch": 0.42,
"learning_rate": 0.0003,
"loss": 6.8615,
"step": 504
},
{
"epoch": 0.42,
"learning_rate": 0.0002995645863570392,
"loss": 6.8716,
"step": 505
},
{
"epoch": 0.43,
"learning_rate": 0.0002991291727140783,
"loss": 6.9566,
"step": 506
},
{
"epoch": 0.43,
"learning_rate": 0.00029869375907111753,
"loss": 6.8458,
"step": 507
},
{
"epoch": 0.43,
"learning_rate": 0.00029825834542815676,
"loss": 6.8495,
"step": 508
},
{
"epoch": 0.43,
"learning_rate": 0.00029782293178519587,
"loss": 6.8151,
"step": 509
},
{
"epoch": 0.43,
"learning_rate": 0.0002973875181422351,
"loss": 6.9038,
"step": 510
},
{
"epoch": 0.43,
"learning_rate": 0.00029695210449927426,
"loss": 6.7452,
"step": 511
},
{
"epoch": 0.43,
"learning_rate": 0.0002965166908563135,
"loss": 6.9479,
"step": 512
},
{
"epoch": 0.43,
"learning_rate": 0.00029608127721335266,
"loss": 6.8531,
"step": 513
},
{
"epoch": 0.43,
"learning_rate": 0.0002956458635703918,
"loss": 6.7814,
"step": 514
},
{
"epoch": 0.43,
"learning_rate": 0.00029521044992743105,
"loss": 6.9125,
"step": 515
},
{
"epoch": 0.43,
"learning_rate": 0.0002947750362844702,
"loss": 6.7181,
"step": 516
},
{
"epoch": 0.43,
"learning_rate": 0.0002943396226415094,
"loss": 6.9138,
"step": 517
},
{
"epoch": 0.44,
"learning_rate": 0.0002939042089985486,
"loss": 6.7617,
"step": 518
},
{
"epoch": 0.44,
"learning_rate": 0.0002934687953555878,
"loss": 7.0497,
"step": 519
},
{
"epoch": 0.44,
"learning_rate": 0.00029303338171262695,
"loss": 6.7293,
"step": 520
},
{
"epoch": 0.44,
"learning_rate": 0.00029259796806966617,
"loss": 6.8915,
"step": 521
},
{
"epoch": 0.44,
"learning_rate": 0.00029216255442670534,
"loss": 6.8637,
"step": 522
},
{
"epoch": 0.44,
"learning_rate": 0.0002917271407837445,
"loss": 6.9329,
"step": 523
},
{
"epoch": 0.44,
"learning_rate": 0.00029129172714078373,
"loss": 6.8185,
"step": 524
},
{
"epoch": 0.44,
"learning_rate": 0.0002908563134978229,
"loss": 6.8207,
"step": 525
},
{
"epoch": 0.44,
"learning_rate": 0.00029042089985486207,
"loss": 6.726,
"step": 526
},
{
"epoch": 0.44,
"learning_rate": 0.0002899854862119013,
"loss": 6.8523,
"step": 527
},
{
"epoch": 0.44,
"learning_rate": 0.00028955007256894046,
"loss": 6.8668,
"step": 528
},
{
"epoch": 0.44,
"learning_rate": 0.0002891146589259796,
"loss": 6.857,
"step": 529
},
{
"epoch": 0.45,
"learning_rate": 0.00028867924528301885,
"loss": 6.8504,
"step": 530
},
{
"epoch": 0.45,
"learning_rate": 0.000288243831640058,
"loss": 6.8464,
"step": 531
},
{
"epoch": 0.45,
"learning_rate": 0.0002878084179970972,
"loss": 6.8743,
"step": 532
},
{
"epoch": 0.45,
"learning_rate": 0.0002873730043541364,
"loss": 6.6959,
"step": 533
},
{
"epoch": 0.45,
"learning_rate": 0.0002869375907111756,
"loss": 6.7821,
"step": 534
},
{
"epoch": 0.45,
"learning_rate": 0.00028650217706821475,
"loss": 6.9036,
"step": 535
},
{
"epoch": 0.45,
"learning_rate": 0.00028606676342525397,
"loss": 6.7307,
"step": 536
},
{
"epoch": 0.45,
"learning_rate": 0.00028563134978229314,
"loss": 6.8073,
"step": 537
},
{
"epoch": 0.45,
"learning_rate": 0.0002851959361393323,
"loss": 6.6503,
"step": 538
},
{
"epoch": 0.45,
"learning_rate": 0.00028476052249637153,
"loss": 6.8456,
"step": 539
},
{
"epoch": 0.45,
"learning_rate": 0.0002843251088534107,
"loss": 6.8926,
"step": 540
},
{
"epoch": 0.45,
"learning_rate": 0.0002838896952104499,
"loss": 6.9259,
"step": 541
},
{
"epoch": 0.46,
"learning_rate": 0.0002834542815674891,
"loss": 6.7483,
"step": 542
},
{
"epoch": 0.46,
"learning_rate": 0.00028301886792452826,
"loss": 6.5999,
"step": 543
},
{
"epoch": 0.46,
"learning_rate": 0.0002825834542815675,
"loss": 6.9423,
"step": 544
},
{
"epoch": 0.46,
"learning_rate": 0.00028214804063860665,
"loss": 6.8099,
"step": 545
},
{
"epoch": 0.46,
"learning_rate": 0.0002817126269956458,
"loss": 6.7497,
"step": 546
},
{
"epoch": 0.46,
"learning_rate": 0.00028127721335268504,
"loss": 6.4958,
"step": 547
},
{
"epoch": 0.46,
"learning_rate": 0.0002808417997097242,
"loss": 6.6541,
"step": 548
},
{
"epoch": 0.46,
"learning_rate": 0.0002804063860667634,
"loss": 6.4972,
"step": 549
},
{
"epoch": 0.46,
"learning_rate": 0.0002799709724238026,
"loss": 6.3706,
"step": 550
},
{
"epoch": 0.46,
"learning_rate": 0.0002795355587808418,
"loss": 7.0303,
"step": 551
},
{
"epoch": 0.46,
"learning_rate": 0.00027910014513788094,
"loss": 6.9142,
"step": 552
},
{
"epoch": 0.46,
"learning_rate": 0.00027866473149492017,
"loss": 6.934,
"step": 553
},
{
"epoch": 0.47,
"learning_rate": 0.00027822931785195933,
"loss": 6.8109,
"step": 554
},
{
"epoch": 0.47,
"learning_rate": 0.0002777939042089985,
"loss": 6.9807,
"step": 555
},
{
"epoch": 0.47,
"learning_rate": 0.0002773584905660377,
"loss": 6.8507,
"step": 556
},
{
"epoch": 0.47,
"learning_rate": 0.0002769230769230769,
"loss": 6.8355,
"step": 557
},
{
"epoch": 0.47,
"learning_rate": 0.00027648766328011606,
"loss": 6.7796,
"step": 558
},
{
"epoch": 0.47,
"learning_rate": 0.0002760522496371553,
"loss": 6.9876,
"step": 559
},
{
"epoch": 0.47,
"learning_rate": 0.00027561683599419446,
"loss": 6.7679,
"step": 560
},
{
"epoch": 0.47,
"learning_rate": 0.0002751814223512336,
"loss": 7.0946,
"step": 561
},
{
"epoch": 0.47,
"learning_rate": 0.00027474600870827285,
"loss": 6.6743,
"step": 562
},
{
"epoch": 0.47,
"learning_rate": 0.000274310595065312,
"loss": 6.946,
"step": 563
},
{
"epoch": 0.47,
"learning_rate": 0.0002738751814223512,
"loss": 6.5808,
"step": 564
},
{
"epoch": 0.47,
"learning_rate": 0.0002734397677793904,
"loss": 6.7715,
"step": 565
},
{
"epoch": 0.48,
"learning_rate": 0.0002730043541364296,
"loss": 6.8293,
"step": 566
},
{
"epoch": 0.48,
"learning_rate": 0.00027256894049346875,
"loss": 6.7443,
"step": 567
},
{
"epoch": 0.48,
"learning_rate": 0.00027213352685050797,
"loss": 6.7721,
"step": 568
},
{
"epoch": 0.48,
"learning_rate": 0.00027169811320754714,
"loss": 6.8,
"step": 569
},
{
"epoch": 0.48,
"learning_rate": 0.00027126269956458636,
"loss": 6.8229,
"step": 570
},
{
"epoch": 0.48,
"learning_rate": 0.00027082728592162553,
"loss": 6.8486,
"step": 571
},
{
"epoch": 0.48,
"learning_rate": 0.0002703918722786647,
"loss": 6.8454,
"step": 572
},
{
"epoch": 0.48,
"learning_rate": 0.0002699564586357039,
"loss": 6.8049,
"step": 573
},
{
"epoch": 0.48,
"learning_rate": 0.0002695210449927431,
"loss": 6.8674,
"step": 574
},
{
"epoch": 0.48,
"learning_rate": 0.00026908563134978226,
"loss": 7.0463,
"step": 575
},
{
"epoch": 0.48,
"learning_rate": 0.0002686502177068215,
"loss": 6.9523,
"step": 576
},
{
"epoch": 0.49,
"learning_rate": 0.00026821480406386065,
"loss": 6.908,
"step": 577
},
{
"epoch": 0.49,
"learning_rate": 0.0002677793904208998,
"loss": 6.8119,
"step": 578
},
{
"epoch": 0.49,
"learning_rate": 0.00026734397677793904,
"loss": 6.7422,
"step": 579
},
{
"epoch": 0.49,
"learning_rate": 0.0002669085631349782,
"loss": 6.9766,
"step": 580
},
{
"epoch": 0.49,
"learning_rate": 0.0002664731494920174,
"loss": 6.9451,
"step": 581
},
{
"epoch": 0.49,
"learning_rate": 0.0002660377358490566,
"loss": 6.7142,
"step": 582
},
{
"epoch": 0.49,
"learning_rate": 0.00026560232220609577,
"loss": 6.7909,
"step": 583
},
{
"epoch": 0.49,
"learning_rate": 0.00026516690856313494,
"loss": 6.7317,
"step": 584
},
{
"epoch": 0.49,
"learning_rate": 0.00026473149492017416,
"loss": 6.9627,
"step": 585
},
{
"epoch": 0.49,
"learning_rate": 0.00026429608127721333,
"loss": 6.8146,
"step": 586
},
{
"epoch": 0.49,
"learning_rate": 0.0002638606676342525,
"loss": 6.8995,
"step": 587
},
{
"epoch": 0.49,
"learning_rate": 0.0002634252539912917,
"loss": 6.7827,
"step": 588
},
{
"epoch": 0.5,
"learning_rate": 0.0002629898403483309,
"loss": 6.6971,
"step": 589
},
{
"epoch": 0.5,
"learning_rate": 0.00026255442670537006,
"loss": 6.7937,
"step": 590
},
{
"epoch": 0.5,
"learning_rate": 0.0002621190130624093,
"loss": 6.8905,
"step": 591
},
{
"epoch": 0.5,
"learning_rate": 0.00026168359941944845,
"loss": 6.7583,
"step": 592
},
{
"epoch": 0.5,
"learning_rate": 0.0002612481857764876,
"loss": 6.8475,
"step": 593
},
{
"epoch": 0.5,
"learning_rate": 0.00026081277213352684,
"loss": 6.6513,
"step": 594
},
{
"epoch": 0.5,
"learning_rate": 0.000260377358490566,
"loss": 6.5678,
"step": 595
},
{
"epoch": 0.5,
"learning_rate": 0.0002599419448476052,
"loss": 6.5821,
"step": 596
},
{
"epoch": 0.5,
"learning_rate": 0.0002595065312046444,
"loss": 6.5131,
"step": 597
},
{
"epoch": 0.5,
"learning_rate": 0.0002590711175616836,
"loss": 6.5356,
"step": 598
},
{
"epoch": 0.5,
"learning_rate": 0.00025863570391872274,
"loss": 6.3458,
"step": 599
},
{
"epoch": 0.5,
"learning_rate": 0.00025820029027576197,
"loss": 6.2131,
"step": 600
},
{
"epoch": 0.51,
"learning_rate": 0.00025776487663280113,
"loss": 6.8324,
"step": 601
},
{
"epoch": 0.51,
"learning_rate": 0.00025732946298984036,
"loss": 7.035,
"step": 602
},
{
"epoch": 0.51,
"learning_rate": 0.0002568940493468795,
"loss": 6.9698,
"step": 603
},
{
"epoch": 0.51,
"learning_rate": 0.0002564586357039187,
"loss": 6.796,
"step": 604
},
{
"epoch": 0.51,
"learning_rate": 0.0002560232220609579,
"loss": 6.9368,
"step": 605
},
{
"epoch": 0.51,
"learning_rate": 0.0002555878084179971,
"loss": 7.0629,
"step": 606
},
{
"epoch": 0.51,
"learning_rate": 0.00025515239477503626,
"loss": 6.7385,
"step": 607
},
{
"epoch": 0.51,
"learning_rate": 0.0002547169811320755,
"loss": 6.9024,
"step": 608
},
{
"epoch": 0.51,
"learning_rate": 0.00025428156748911465,
"loss": 6.8677,
"step": 609
},
{
"epoch": 0.51,
"learning_rate": 0.0002538461538461538,
"loss": 6.8823,
"step": 610
},
{
"epoch": 0.51,
"learning_rate": 0.00025341074020319304,
"loss": 6.9585,
"step": 611
},
{
"epoch": 0.51,
"learning_rate": 0.0002529753265602322,
"loss": 6.7669,
"step": 612
},
{
"epoch": 0.52,
"learning_rate": 0.0002525399129172714,
"loss": 6.7928,
"step": 613
},
{
"epoch": 0.52,
"learning_rate": 0.0002521044992743106,
"loss": 6.7403,
"step": 614
},
{
"epoch": 0.52,
"learning_rate": 0.00025166908563134977,
"loss": 6.9072,
"step": 615
},
{
"epoch": 0.52,
"learning_rate": 0.00025123367198838894,
"loss": 6.7032,
"step": 616
},
{
"epoch": 0.52,
"learning_rate": 0.00025079825834542816,
"loss": 6.7789,
"step": 617
},
{
"epoch": 0.52,
"learning_rate": 0.00025036284470246733,
"loss": 6.7426,
"step": 618
},
{
"epoch": 0.52,
"learning_rate": 0.0002499274310595065,
"loss": 6.8267,
"step": 619
},
{
"epoch": 0.52,
"learning_rate": 0.0002494920174165457,
"loss": 6.921,
"step": 620
},
{
"epoch": 0.52,
"learning_rate": 0.0002490566037735849,
"loss": 6.6063,
"step": 621
},
{
"epoch": 0.52,
"learning_rate": 0.00024862119013062406,
"loss": 6.7041,
"step": 622
},
{
"epoch": 0.52,
"learning_rate": 0.0002481857764876633,
"loss": 6.7416,
"step": 623
},
{
"epoch": 0.52,
"learning_rate": 0.00024775036284470245,
"loss": 6.8333,
"step": 624
},
{
"epoch": 0.53,
"learning_rate": 0.0002473149492017416,
"loss": 6.8254,
"step": 625
},
{
"epoch": 0.53,
"learning_rate": 0.00024687953555878084,
"loss": 6.9049,
"step": 626
},
{
"epoch": 0.53,
"learning_rate": 0.00024644412191582,
"loss": 6.894,
"step": 627
},
{
"epoch": 0.53,
"learning_rate": 0.0002460087082728592,
"loss": 6.6467,
"step": 628
},
{
"epoch": 0.53,
"learning_rate": 0.0002455732946298984,
"loss": 6.7039,
"step": 629
},
{
"epoch": 0.53,
"learning_rate": 0.00024513788098693757,
"loss": 6.7366,
"step": 630
},
{
"epoch": 0.53,
"learning_rate": 0.0002447024673439768,
"loss": 6.835,
"step": 631
},
{
"epoch": 0.53,
"learning_rate": 0.00024426705370101596,
"loss": 6.9096,
"step": 632
},
{
"epoch": 0.53,
"learning_rate": 0.00024383164005805513,
"loss": 6.7752,
"step": 633
},
{
"epoch": 0.53,
"learning_rate": 0.00024339622641509433,
"loss": 6.8582,
"step": 634
},
{
"epoch": 0.53,
"learning_rate": 0.0002429608127721335,
"loss": 6.8206,
"step": 635
},
{
"epoch": 0.53,
"learning_rate": 0.0002425253991291727,
"loss": 6.8518,
"step": 636
},
{
"epoch": 0.54,
"learning_rate": 0.0002420899854862119,
"loss": 6.7277,
"step": 637
},
{
"epoch": 0.54,
"learning_rate": 0.00024165457184325106,
"loss": 6.7397,
"step": 638
},
{
"epoch": 0.54,
"learning_rate": 0.00024121915820029025,
"loss": 6.7723,
"step": 639
},
{
"epoch": 0.54,
"learning_rate": 0.00024078374455732945,
"loss": 6.9939,
"step": 640
},
{
"epoch": 0.54,
"learning_rate": 0.00024034833091436862,
"loss": 6.7244,
"step": 641
},
{
"epoch": 0.54,
"learning_rate": 0.0002399129172714078,
"loss": 6.6818,
"step": 642
},
{
"epoch": 0.54,
"learning_rate": 0.000239477503628447,
"loss": 6.8975,
"step": 643
},
{
"epoch": 0.54,
"learning_rate": 0.0002390420899854862,
"loss": 6.7538,
"step": 644
},
{
"epoch": 0.54,
"learning_rate": 0.00023860667634252537,
"loss": 6.7806,
"step": 645
},
{
"epoch": 0.54,
"learning_rate": 0.00023817126269956457,
"loss": 6.6033,
"step": 646
},
{
"epoch": 0.54,
"learning_rate": 0.00023773584905660377,
"loss": 6.2432,
"step": 647
},
{
"epoch": 0.54,
"learning_rate": 0.00023730043541364293,
"loss": 6.7507,
"step": 648
},
{
"epoch": 0.55,
"learning_rate": 0.00023686502177068213,
"loss": 6.7112,
"step": 649
},
{
"epoch": 0.55,
"learning_rate": 0.00023642960812772133,
"loss": 6.2708,
"step": 650
},
{
"epoch": 0.55,
"learning_rate": 0.0002359941944847605,
"loss": 7.1084,
"step": 651
},
{
"epoch": 0.55,
"learning_rate": 0.0002355587808417997,
"loss": 6.8681,
"step": 652
},
{
"epoch": 0.55,
"learning_rate": 0.00023512336719883889,
"loss": 6.9773,
"step": 653
},
{
"epoch": 0.55,
"learning_rate": 0.00023468795355587805,
"loss": 6.9661,
"step": 654
},
{
"epoch": 0.55,
"learning_rate": 0.00023425253991291725,
"loss": 6.9394,
"step": 655
},
{
"epoch": 0.55,
"learning_rate": 0.00023381712626995645,
"loss": 6.8747,
"step": 656
},
{
"epoch": 0.55,
"learning_rate": 0.00023338171262699562,
"loss": 6.7762,
"step": 657
},
{
"epoch": 0.55,
"learning_rate": 0.0002329462989840348,
"loss": 6.8591,
"step": 658
},
{
"epoch": 0.55,
"learning_rate": 0.000232510885341074,
"loss": 6.8248,
"step": 659
},
{
"epoch": 0.55,
"learning_rate": 0.0002320754716981132,
"loss": 6.8163,
"step": 660
},
{
"epoch": 0.56,
"learning_rate": 0.00023164005805515237,
"loss": 6.7533,
"step": 661
},
{
"epoch": 0.56,
"learning_rate": 0.00023120464441219157,
"loss": 6.8689,
"step": 662
},
{
"epoch": 0.56,
"learning_rate": 0.00023076923076923076,
"loss": 6.6898,
"step": 663
},
{
"epoch": 0.56,
"learning_rate": 0.00023033381712626993,
"loss": 6.9015,
"step": 664
},
{
"epoch": 0.56,
"learning_rate": 0.00022989840348330913,
"loss": 6.8106,
"step": 665
},
{
"epoch": 0.56,
"learning_rate": 0.00022946298984034832,
"loss": 6.6644,
"step": 666
},
{
"epoch": 0.56,
"learning_rate": 0.0002290275761973875,
"loss": 6.8867,
"step": 667
},
{
"epoch": 0.56,
"learning_rate": 0.0002285921625544267,
"loss": 6.7743,
"step": 668
},
{
"epoch": 0.56,
"learning_rate": 0.00022815674891146588,
"loss": 7.0751,
"step": 669
},
{
"epoch": 0.56,
"learning_rate": 0.00022772133526850505,
"loss": 6.9112,
"step": 670
},
{
"epoch": 0.56,
"learning_rate": 0.00022728592162554425,
"loss": 6.808,
"step": 671
},
{
"epoch": 0.56,
"learning_rate": 0.00022685050798258345,
"loss": 6.7739,
"step": 672
},
{
"epoch": 0.57,
"learning_rate": 0.00022641509433962264,
"loss": 6.7714,
"step": 673
},
{
"epoch": 0.57,
"learning_rate": 0.0002259796806966618,
"loss": 6.7218,
"step": 674
},
{
"epoch": 0.57,
"learning_rate": 0.000225544267053701,
"loss": 6.8919,
"step": 675
},
{
"epoch": 0.57,
"learning_rate": 0.0002251088534107402,
"loss": 6.7952,
"step": 676
},
{
"epoch": 0.57,
"learning_rate": 0.00022467343976777937,
"loss": 6.9655,
"step": 677
},
{
"epoch": 0.57,
"learning_rate": 0.00022423802612481857,
"loss": 6.9544,
"step": 678
},
{
"epoch": 0.57,
"learning_rate": 0.00022380261248185776,
"loss": 6.8098,
"step": 679
},
{
"epoch": 0.57,
"learning_rate": 0.00022336719883889693,
"loss": 6.9409,
"step": 680
},
{
"epoch": 0.57,
"learning_rate": 0.00022293178519593613,
"loss": 6.5972,
"step": 681
},
{
"epoch": 0.57,
"learning_rate": 0.00022249637155297532,
"loss": 6.7181,
"step": 682
},
{
"epoch": 0.57,
"learning_rate": 0.0002220609579100145,
"loss": 6.7576,
"step": 683
},
{
"epoch": 0.58,
"learning_rate": 0.0002216255442670537,
"loss": 6.6885,
"step": 684
},
{
"epoch": 0.58,
"learning_rate": 0.00022119013062409288,
"loss": 6.7601,
"step": 685
},
{
"epoch": 0.58,
"learning_rate": 0.00022075471698113205,
"loss": 6.7339,
"step": 686
},
{
"epoch": 0.58,
"learning_rate": 0.00022031930333817125,
"loss": 6.8451,
"step": 687
},
{
"epoch": 0.58,
"learning_rate": 0.00021988388969521044,
"loss": 6.7911,
"step": 688
},
{
"epoch": 0.58,
"learning_rate": 0.00021944847605224964,
"loss": 6.9605,
"step": 689
},
{
"epoch": 0.58,
"learning_rate": 0.0002190130624092888,
"loss": 6.7434,
"step": 690
},
{
"epoch": 0.58,
"learning_rate": 0.000218577648766328,
"loss": 6.9479,
"step": 691
},
{
"epoch": 0.58,
"learning_rate": 0.0002181422351233672,
"loss": 6.7066,
"step": 692
},
{
"epoch": 0.58,
"learning_rate": 0.00021770682148040637,
"loss": 6.7052,
"step": 693
},
{
"epoch": 0.58,
"learning_rate": 0.00021727140783744556,
"loss": 6.8302,
"step": 694
},
{
"epoch": 0.58,
"learning_rate": 0.00021683599419448476,
"loss": 6.7822,
"step": 695
},
{
"epoch": 0.59,
"learning_rate": 0.00021640058055152393,
"loss": 6.4113,
"step": 696
},
{
"epoch": 0.59,
"learning_rate": 0.00021596516690856313,
"loss": 6.8164,
"step": 697
},
{
"epoch": 0.59,
"learning_rate": 0.00021552975326560232,
"loss": 6.7107,
"step": 698
},
{
"epoch": 0.59,
"learning_rate": 0.00021509433962264146,
"loss": 6.2754,
"step": 699
},
{
"epoch": 0.59,
"learning_rate": 0.00021465892597968069,
"loss": 6.5328,
"step": 700
},
{
"epoch": 0.59,
"learning_rate": 0.00021422351233671988,
"loss": 7.0301,
"step": 701
},
{
"epoch": 0.59,
"learning_rate": 0.00021378809869375908,
"loss": 6.795,
"step": 702
},
{
"epoch": 0.59,
"learning_rate": 0.00021335268505079825,
"loss": 6.9513,
"step": 703
},
{
"epoch": 0.59,
"learning_rate": 0.00021291727140783744,
"loss": 6.7841,
"step": 704
},
{
"epoch": 0.59,
"learning_rate": 0.00021248185776487664,
"loss": 6.8209,
"step": 705
},
{
"epoch": 0.59,
"learning_rate": 0.00021204644412191578,
"loss": 6.9158,
"step": 706
},
{
"epoch": 0.59,
"learning_rate": 0.000211611030478955,
"loss": 6.8196,
"step": 707
},
{
"epoch": 0.6,
"learning_rate": 0.0002111756168359942,
"loss": 6.7451,
"step": 708
},
{
"epoch": 0.6,
"learning_rate": 0.00021074020319303334,
"loss": 6.8313,
"step": 709
},
{
"epoch": 0.6,
"learning_rate": 0.00021030478955007256,
"loss": 6.9144,
"step": 710
},
{
"epoch": 0.6,
"learning_rate": 0.00020986937590711176,
"loss": 6.6823,
"step": 711
},
{
"epoch": 0.6,
"learning_rate": 0.0002094339622641509,
"loss": 7.0659,
"step": 712
},
{
"epoch": 0.6,
"learning_rate": 0.0002089985486211901,
"loss": 6.8574,
"step": 713
},
{
"epoch": 0.6,
"learning_rate": 0.00020856313497822932,
"loss": 6.8299,
"step": 714
},
{
"epoch": 0.6,
"learning_rate": 0.00020812772133526846,
"loss": 6.7282,
"step": 715
},
{
"epoch": 0.6,
"learning_rate": 0.00020769230769230766,
"loss": 6.7405,
"step": 716
},
{
"epoch": 0.6,
"learning_rate": 0.00020725689404934685,
"loss": 6.806,
"step": 717
},
{
"epoch": 0.6,
"learning_rate": 0.00020682148040638608,
"loss": 6.8661,
"step": 718
},
{
"epoch": 0.6,
"learning_rate": 0.00020638606676342522,
"loss": 6.8123,
"step": 719
},
{
"epoch": 0.61,
"learning_rate": 0.00020595065312046441,
"loss": 6.7411,
"step": 720
},
{
"epoch": 0.61,
"learning_rate": 0.00020551523947750364,
"loss": 6.6403,
"step": 721
},
{
"epoch": 0.61,
"learning_rate": 0.00020507982583454278,
"loss": 6.6972,
"step": 722
},
{
"epoch": 0.61,
"learning_rate": 0.00020464441219158197,
"loss": 6.75,
"step": 723
},
{
"epoch": 0.61,
"learning_rate": 0.00020420899854862117,
"loss": 6.6767,
"step": 724
},
{
"epoch": 0.61,
"learning_rate": 0.00020377358490566034,
"loss": 6.8202,
"step": 725
},
{
"epoch": 0.61,
"learning_rate": 0.00020333817126269953,
"loss": 6.724,
"step": 726
},
{
"epoch": 0.61,
"learning_rate": 0.00020290275761973873,
"loss": 6.7514,
"step": 727
},
{
"epoch": 0.61,
"learning_rate": 0.0002024673439767779,
"loss": 6.8455,
"step": 728
},
{
"epoch": 0.61,
"learning_rate": 0.0002020319303338171,
"loss": 6.761,
"step": 729
},
{
"epoch": 0.61,
"learning_rate": 0.0002015965166908563,
"loss": 6.8119,
"step": 730
},
{
"epoch": 0.61,
"learning_rate": 0.00020116110304789546,
"loss": 6.7588,
"step": 731
},
{
"epoch": 0.62,
"learning_rate": 0.00020072568940493466,
"loss": 6.795,
"step": 732
},
{
"epoch": 0.62,
"learning_rate": 0.00020029027576197385,
"loss": 6.8161,
"step": 733
},
{
"epoch": 0.62,
"learning_rate": 0.00019985486211901305,
"loss": 6.7052,
"step": 734
},
{
"epoch": 0.62,
"learning_rate": 0.00019941944847605222,
"loss": 6.814,
"step": 735
},
{
"epoch": 0.62,
"learning_rate": 0.0001989840348330914,
"loss": 6.7445,
"step": 736
},
{
"epoch": 0.62,
"learning_rate": 0.0001985486211901306,
"loss": 6.9489,
"step": 737
},
{
"epoch": 0.62,
"learning_rate": 0.00019811320754716978,
"loss": 6.8612,
"step": 738
},
{
"epoch": 0.62,
"learning_rate": 0.00019767779390420897,
"loss": 6.837,
"step": 739
},
{
"epoch": 0.62,
"learning_rate": 0.00019724238026124817,
"loss": 6.7348,
"step": 740
},
{
"epoch": 0.62,
"learning_rate": 0.00019680696661828734,
"loss": 6.8755,
"step": 741
},
{
"epoch": 0.62,
"learning_rate": 0.00019637155297532653,
"loss": 6.8379,
"step": 742
},
{
"epoch": 0.62,
"learning_rate": 0.00019593613933236573,
"loss": 6.8679,
"step": 743
},
{
"epoch": 0.63,
"learning_rate": 0.0001955007256894049,
"loss": 6.6225,
"step": 744
},
{
"epoch": 0.63,
"learning_rate": 0.0001950653120464441,
"loss": 6.5989,
"step": 745
},
{
"epoch": 0.63,
"learning_rate": 0.0001946298984034833,
"loss": 6.5353,
"step": 746
},
{
"epoch": 0.63,
"learning_rate": 0.00019419448476052249,
"loss": 6.6957,
"step": 747
},
{
"epoch": 0.63,
"learning_rate": 0.00019375907111756165,
"loss": 6.836,
"step": 748
},
{
"epoch": 0.63,
"learning_rate": 0.00019332365747460085,
"loss": 6.3925,
"step": 749
},
{
"epoch": 0.63,
"learning_rate": 0.00019288824383164005,
"loss": 5.9944,
"step": 750
},
{
"epoch": 0.63,
"learning_rate": 0.00019245283018867922,
"loss": 6.8929,
"step": 751
},
{
"epoch": 0.63,
"learning_rate": 0.0001920174165457184,
"loss": 7.029,
"step": 752
},
{
"epoch": 0.63,
"learning_rate": 0.0001915820029027576,
"loss": 6.7746,
"step": 753
},
{
"epoch": 0.63,
"learning_rate": 0.00019114658925979678,
"loss": 6.7398,
"step": 754
},
{
"epoch": 0.63,
"learning_rate": 0.00019071117561683597,
"loss": 6.9454,
"step": 755
},
{
"epoch": 0.64,
"learning_rate": 0.00019027576197387517,
"loss": 6.8805,
"step": 756
},
{
"epoch": 0.64,
"learning_rate": 0.00018984034833091434,
"loss": 6.8015,
"step": 757
},
{
"epoch": 0.64,
"learning_rate": 0.00018940493468795353,
"loss": 6.9505,
"step": 758
},
{
"epoch": 0.64,
"learning_rate": 0.00018896952104499273,
"loss": 6.701,
"step": 759
},
{
"epoch": 0.64,
"learning_rate": 0.0001885341074020319,
"loss": 6.8635,
"step": 760
},
{
"epoch": 0.64,
"learning_rate": 0.0001880986937590711,
"loss": 6.7672,
"step": 761
},
{
"epoch": 0.64,
"learning_rate": 0.0001876632801161103,
"loss": 6.6565,
"step": 762
},
{
"epoch": 0.64,
"learning_rate": 0.00018722786647314948,
"loss": 6.7407,
"step": 763
},
{
"epoch": 0.64,
"learning_rate": 0.00018679245283018865,
"loss": 6.6525,
"step": 764
},
{
"epoch": 0.64,
"learning_rate": 0.00018635703918722785,
"loss": 7.0426,
"step": 765
},
{
"epoch": 0.64,
"learning_rate": 0.00018592162554426704,
"loss": 6.8255,
"step": 766
},
{
"epoch": 0.64,
"learning_rate": 0.00018548621190130621,
"loss": 6.8092,
"step": 767
},
{
"epoch": 0.65,
"learning_rate": 0.0001850507982583454,
"loss": 6.6206,
"step": 768
},
{
"epoch": 0.65,
"learning_rate": 0.0001846153846153846,
"loss": 6.7722,
"step": 769
},
{
"epoch": 0.65,
"learning_rate": 0.00018417997097242377,
"loss": 6.8224,
"step": 770
},
{
"epoch": 0.65,
"learning_rate": 0.00018374455732946297,
"loss": 6.7581,
"step": 771
},
{
"epoch": 0.65,
"learning_rate": 0.00018330914368650217,
"loss": 6.892,
"step": 772
},
{
"epoch": 0.65,
"learning_rate": 0.00018287373004354133,
"loss": 6.717,
"step": 773
},
{
"epoch": 0.65,
"learning_rate": 0.00018243831640058053,
"loss": 6.593,
"step": 774
},
{
"epoch": 0.65,
"learning_rate": 0.00018200290275761973,
"loss": 6.851,
"step": 775
},
{
"epoch": 0.65,
"learning_rate": 0.00018156748911465892,
"loss": 6.7269,
"step": 776
},
{
"epoch": 0.65,
"learning_rate": 0.0001811320754716981,
"loss": 6.7105,
"step": 777
},
{
"epoch": 0.65,
"learning_rate": 0.0001806966618287373,
"loss": 6.7603,
"step": 778
},
{
"epoch": 0.65,
"learning_rate": 0.00018026124818577648,
"loss": 6.7945,
"step": 779
},
{
"epoch": 0.66,
"learning_rate": 0.00017982583454281565,
"loss": 6.9823,
"step": 780
},
{
"epoch": 0.66,
"learning_rate": 0.00017939042089985485,
"loss": 6.8197,
"step": 781
},
{
"epoch": 0.66,
"learning_rate": 0.00017895500725689404,
"loss": 6.6885,
"step": 782
},
{
"epoch": 0.66,
"learning_rate": 0.0001785195936139332,
"loss": 6.6007,
"step": 783
},
{
"epoch": 0.66,
"learning_rate": 0.0001780841799709724,
"loss": 6.7959,
"step": 784
},
{
"epoch": 0.66,
"learning_rate": 0.0001776487663280116,
"loss": 6.8707,
"step": 785
},
{
"epoch": 0.66,
"learning_rate": 0.00017721335268505077,
"loss": 6.6443,
"step": 786
},
{
"epoch": 0.66,
"learning_rate": 0.00017677793904208997,
"loss": 6.8777,
"step": 787
},
{
"epoch": 0.66,
"learning_rate": 0.00017634252539912916,
"loss": 6.7187,
"step": 788
},
{
"epoch": 0.66,
"learning_rate": 0.00017590711175616833,
"loss": 6.7757,
"step": 789
},
{
"epoch": 0.66,
"learning_rate": 0.00017547169811320753,
"loss": 6.6934,
"step": 790
},
{
"epoch": 0.66,
"learning_rate": 0.00017503628447024673,
"loss": 6.8231,
"step": 791
},
{
"epoch": 0.67,
"learning_rate": 0.00017460087082728592,
"loss": 6.7268,
"step": 792
},
{
"epoch": 0.67,
"learning_rate": 0.0001741654571843251,
"loss": 6.6948,
"step": 793
},
{
"epoch": 0.67,
"learning_rate": 0.00017373004354136429,
"loss": 6.548,
"step": 794
},
{
"epoch": 0.67,
"learning_rate": 0.00017329462989840348,
"loss": 6.5613,
"step": 795
},
{
"epoch": 0.67,
"learning_rate": 0.00017285921625544265,
"loss": 6.4696,
"step": 796
},
{
"epoch": 0.67,
"learning_rate": 0.00017242380261248185,
"loss": 6.4884,
"step": 797
},
{
"epoch": 0.67,
"learning_rate": 0.00017198838896952104,
"loss": 6.4172,
"step": 798
},
{
"epoch": 0.67,
"learning_rate": 0.0001715529753265602,
"loss": 6.3739,
"step": 799
},
{
"epoch": 0.67,
"learning_rate": 0.0001711175616835994,
"loss": 6.0278,
"step": 800
},
{
"epoch": 0.67,
"learning_rate": 0.0001706821480406386,
"loss": 6.8293,
"step": 801
},
{
"epoch": 0.67,
"learning_rate": 0.00017024673439767777,
"loss": 6.6272,
"step": 802
},
{
"epoch": 0.68,
"learning_rate": 0.00016981132075471697,
"loss": 6.7779,
"step": 803
},
{
"epoch": 0.68,
"learning_rate": 0.00016937590711175616,
"loss": 6.8186,
"step": 804
},
{
"epoch": 0.68,
"learning_rate": 0.00016894049346879536,
"loss": 6.8187,
"step": 805
},
{
"epoch": 0.68,
"learning_rate": 0.00016850507982583453,
"loss": 6.7101,
"step": 806
},
{
"epoch": 0.68,
"learning_rate": 0.00016806966618287372,
"loss": 6.7569,
"step": 807
},
{
"epoch": 0.68,
"learning_rate": 0.00016763425253991292,
"loss": 6.7976,
"step": 808
},
{
"epoch": 0.68,
"learning_rate": 0.0001671988388969521,
"loss": 6.7636,
"step": 809
},
{
"epoch": 0.68,
"learning_rate": 0.00016676342525399128,
"loss": 6.7901,
"step": 810
},
{
"epoch": 0.68,
"learning_rate": 0.00016632801161103048,
"loss": 6.6903,
"step": 811
},
{
"epoch": 0.68,
"learning_rate": 0.00016589259796806965,
"loss": 6.7479,
"step": 812
},
{
"epoch": 0.68,
"learning_rate": 0.00016545718432510884,
"loss": 6.6714,
"step": 813
},
{
"epoch": 0.68,
"learning_rate": 0.00016502177068214804,
"loss": 6.9813,
"step": 814
},
{
"epoch": 0.69,
"learning_rate": 0.0001645863570391872,
"loss": 6.9063,
"step": 815
},
{
"epoch": 0.69,
"learning_rate": 0.0001641509433962264,
"loss": 6.6002,
"step": 816
},
{
"epoch": 0.69,
"learning_rate": 0.0001637155297532656,
"loss": 6.8063,
"step": 817
},
{
"epoch": 0.69,
"learning_rate": 0.00016328011611030477,
"loss": 6.5225,
"step": 818
},
{
"epoch": 0.69,
"learning_rate": 0.00016284470246734397,
"loss": 6.7463,
"step": 819
},
{
"epoch": 0.69,
"learning_rate": 0.00016240928882438316,
"loss": 6.7756,
"step": 820
},
{
"epoch": 0.69,
"learning_rate": 0.00016197387518142236,
"loss": 6.7644,
"step": 821
},
{
"epoch": 0.69,
"learning_rate": 0.00016153846153846153,
"loss": 6.6968,
"step": 822
},
{
"epoch": 0.69,
"learning_rate": 0.00016110304789550072,
"loss": 6.7608,
"step": 823
},
{
"epoch": 0.69,
"learning_rate": 0.00016066763425253992,
"loss": 6.8758,
"step": 824
},
{
"epoch": 0.69,
"learning_rate": 0.0001602322206095791,
"loss": 6.7152,
"step": 825
},
{
"epoch": 0.69,
"learning_rate": 0.00015979680696661828,
"loss": 6.7612,
"step": 826
},
{
"epoch": 0.7,
"learning_rate": 0.00015936139332365748,
"loss": 6.8222,
"step": 827
},
{
"epoch": 0.7,
"learning_rate": 0.00015892597968069665,
"loss": 6.8338,
"step": 828
},
{
"epoch": 0.7,
"learning_rate": 0.00015849056603773584,
"loss": 6.762,
"step": 829
},
{
"epoch": 0.7,
"learning_rate": 0.00015805515239477504,
"loss": 6.9651,
"step": 830
},
{
"epoch": 0.7,
"learning_rate": 0.0001576197387518142,
"loss": 6.7731,
"step": 831
},
{
"epoch": 0.7,
"learning_rate": 0.0001571843251088534,
"loss": 6.5439,
"step": 832
},
{
"epoch": 0.7,
"learning_rate": 0.0001567489114658926,
"loss": 6.6829,
"step": 833
},
{
"epoch": 0.7,
"learning_rate": 0.0001563134978229318,
"loss": 6.7925,
"step": 834
},
{
"epoch": 0.7,
"learning_rate": 0.00015587808417997096,
"loss": 6.8057,
"step": 835
},
{
"epoch": 0.7,
"learning_rate": 0.00015544267053701016,
"loss": 6.8506,
"step": 836
},
{
"epoch": 0.7,
"learning_rate": 0.00015500725689404936,
"loss": 6.7867,
"step": 837
},
{
"epoch": 0.7,
"learning_rate": 0.00015457184325108852,
"loss": 6.4587,
"step": 838
},
{
"epoch": 0.71,
"learning_rate": 0.00015413642960812772,
"loss": 6.9501,
"step": 839
},
{
"epoch": 0.71,
"learning_rate": 0.00015370101596516692,
"loss": 6.6549,
"step": 840
},
{
"epoch": 0.71,
"learning_rate": 0.00015326560232220606,
"loss": 6.7495,
"step": 841
},
{
"epoch": 0.71,
"learning_rate": 0.00015283018867924528,
"loss": 6.6763,
"step": 842
},
{
"epoch": 0.71,
"learning_rate": 0.00015239477503628448,
"loss": 6.944,
"step": 843
},
{
"epoch": 0.71,
"learning_rate": 0.00015195936139332362,
"loss": 6.5565,
"step": 844
},
{
"epoch": 0.71,
"learning_rate": 0.00015152394775036284,
"loss": 6.6403,
"step": 845
},
{
"epoch": 0.71,
"learning_rate": 0.00015108853410740204,
"loss": 6.6138,
"step": 846
},
{
"epoch": 0.71,
"learning_rate": 0.00015065312046444118,
"loss": 6.5652,
"step": 847
},
{
"epoch": 0.71,
"learning_rate": 0.00015021770682148038,
"loss": 6.2956,
"step": 848
},
{
"epoch": 0.71,
"learning_rate": 0.0001497822931785196,
"loss": 6.6097,
"step": 849
},
{
"epoch": 0.71,
"learning_rate": 0.00014934687953555877,
"loss": 6.4595,
"step": 850
},
{
"epoch": 0.72,
"learning_rate": 0.00014891146589259794,
"loss": 7.0019,
"step": 851
},
{
"epoch": 0.72,
"learning_rate": 0.00014847605224963713,
"loss": 6.7696,
"step": 852
},
{
"epoch": 0.72,
"learning_rate": 0.00014804063860667633,
"loss": 6.774,
"step": 853
},
{
"epoch": 0.72,
"learning_rate": 0.00014760522496371552,
"loss": 6.8851,
"step": 854
},
{
"epoch": 0.72,
"learning_rate": 0.0001471698113207547,
"loss": 6.7991,
"step": 855
},
{
"epoch": 0.72,
"learning_rate": 0.0001467343976777939,
"loss": 6.8332,
"step": 856
},
{
"epoch": 0.72,
"learning_rate": 0.00014629898403483308,
"loss": 6.669,
"step": 857
},
{
"epoch": 0.72,
"learning_rate": 0.00014586357039187225,
"loss": 6.7986,
"step": 858
},
{
"epoch": 0.72,
"learning_rate": 0.00014542815674891145,
"loss": 6.6709,
"step": 859
},
{
"epoch": 0.72,
"learning_rate": 0.00014499274310595064,
"loss": 6.7066,
"step": 860
},
{
"epoch": 0.72,
"learning_rate": 0.0001445573294629898,
"loss": 6.744,
"step": 861
},
{
"epoch": 0.72,
"learning_rate": 0.000144121915820029,
"loss": 6.7785,
"step": 862
},
{
"epoch": 0.73,
"learning_rate": 0.0001436865021770682,
"loss": 6.6285,
"step": 863
},
{
"epoch": 0.73,
"learning_rate": 0.00014325108853410737,
"loss": 6.8053,
"step": 864
},
{
"epoch": 0.73,
"learning_rate": 0.00014281567489114657,
"loss": 6.8246,
"step": 865
},
{
"epoch": 0.73,
"learning_rate": 0.00014238026124818577,
"loss": 6.6805,
"step": 866
},
{
"epoch": 0.73,
"learning_rate": 0.00014194484760522496,
"loss": 6.6297,
"step": 867
},
{
"epoch": 0.73,
"learning_rate": 0.00014150943396226413,
"loss": 6.6603,
"step": 868
},
{
"epoch": 0.73,
"learning_rate": 0.00014107402031930333,
"loss": 6.6392,
"step": 869
},
{
"epoch": 0.73,
"learning_rate": 0.00014063860667634252,
"loss": 6.5813,
"step": 870
},
{
"epoch": 0.73,
"learning_rate": 0.0001402031930333817,
"loss": 6.6803,
"step": 871
},
{
"epoch": 0.73,
"learning_rate": 0.0001397677793904209,
"loss": 6.5655,
"step": 872
},
{
"epoch": 0.73,
"learning_rate": 0.00013933236574746008,
"loss": 6.8468,
"step": 873
},
{
"epoch": 0.73,
"learning_rate": 0.00013889695210449925,
"loss": 6.7848,
"step": 874
},
{
"epoch": 0.74,
"learning_rate": 0.00013846153846153845,
"loss": 6.82,
"step": 875
},
{
"epoch": 0.74,
"learning_rate": 0.00013802612481857764,
"loss": 6.599,
"step": 876
},
{
"epoch": 0.74,
"learning_rate": 0.0001375907111756168,
"loss": 6.6633,
"step": 877
},
{
"epoch": 0.74,
"learning_rate": 0.000137155297532656,
"loss": 6.8072,
"step": 878
},
{
"epoch": 0.74,
"learning_rate": 0.0001367198838896952,
"loss": 6.6585,
"step": 879
},
{
"epoch": 0.74,
"learning_rate": 0.00013628447024673437,
"loss": 6.8383,
"step": 880
},
{
"epoch": 0.74,
"learning_rate": 0.00013584905660377357,
"loss": 6.6623,
"step": 881
},
{
"epoch": 0.74,
"learning_rate": 0.00013541364296081276,
"loss": 6.769,
"step": 882
},
{
"epoch": 0.74,
"learning_rate": 0.00013497822931785196,
"loss": 6.7631,
"step": 883
},
{
"epoch": 0.74,
"learning_rate": 0.00013454281567489113,
"loss": 6.7775,
"step": 884
},
{
"epoch": 0.74,
"learning_rate": 0.00013410740203193032,
"loss": 6.6173,
"step": 885
},
{
"epoch": 0.74,
"learning_rate": 0.00013367198838896952,
"loss": 6.6929,
"step": 886
},
{
"epoch": 0.75,
"learning_rate": 0.0001332365747460087,
"loss": 6.6906,
"step": 887
},
{
"epoch": 0.75,
"learning_rate": 0.00013280116110304789,
"loss": 6.7141,
"step": 888
},
{
"epoch": 0.75,
"learning_rate": 0.00013236574746008708,
"loss": 6.7914,
"step": 889
},
{
"epoch": 0.75,
"learning_rate": 0.00013193033381712625,
"loss": 6.6043,
"step": 890
},
{
"epoch": 0.75,
"learning_rate": 0.00013149492017416545,
"loss": 6.8003,
"step": 891
},
{
"epoch": 0.75,
"learning_rate": 0.00013105950653120464,
"loss": 6.7134,
"step": 892
},
{
"epoch": 0.75,
"learning_rate": 0.0001306240928882438,
"loss": 6.5845,
"step": 893
},
{
"epoch": 0.75,
"learning_rate": 0.000130188679245283,
"loss": 6.5482,
"step": 894
},
{
"epoch": 0.75,
"learning_rate": 0.0001297532656023222,
"loss": 6.6262,
"step": 895
},
{
"epoch": 0.75,
"learning_rate": 0.00012931785195936137,
"loss": 6.5822,
"step": 896
},
{
"epoch": 0.75,
"learning_rate": 0.00012888243831640057,
"loss": 6.3595,
"step": 897
},
{
"epoch": 0.75,
"learning_rate": 0.00012844702467343976,
"loss": 6.1957,
"step": 898
},
{
"epoch": 0.76,
"learning_rate": 0.00012801161103047896,
"loss": 6.3524,
"step": 899
},
{
"epoch": 0.76,
"learning_rate": 0.00012757619738751813,
"loss": 5.8456,
"step": 900
},
{
"epoch": 0.76,
"learning_rate": 0.00012714078374455732,
"loss": 6.8956,
"step": 901
},
{
"epoch": 0.76,
"learning_rate": 0.00012670537010159652,
"loss": 6.8253,
"step": 902
},
{
"epoch": 0.76,
"learning_rate": 0.0001262699564586357,
"loss": 6.8014,
"step": 903
},
{
"epoch": 0.76,
"learning_rate": 0.00012583454281567488,
"loss": 6.614,
"step": 904
},
{
"epoch": 0.76,
"learning_rate": 0.00012539912917271408,
"loss": 6.7152,
"step": 905
},
{
"epoch": 0.76,
"learning_rate": 0.00012496371552975325,
"loss": 6.7951,
"step": 906
},
{
"epoch": 0.76,
"learning_rate": 0.00012452830188679244,
"loss": 6.706,
"step": 907
},
{
"epoch": 0.76,
"learning_rate": 0.00012409288824383164,
"loss": 7.0412,
"step": 908
},
{
"epoch": 0.76,
"learning_rate": 0.0001236574746008708,
"loss": 6.7611,
"step": 909
},
{
"epoch": 0.77,
"learning_rate": 0.00012322206095791,
"loss": 6.785,
"step": 910
},
{
"epoch": 0.77,
"learning_rate": 0.0001227866473149492,
"loss": 6.7113,
"step": 911
},
{
"epoch": 0.77,
"learning_rate": 0.0001223512336719884,
"loss": 6.7938,
"step": 912
},
{
"epoch": 0.77,
"learning_rate": 0.00012191582002902757,
"loss": 6.7073,
"step": 913
},
{
"epoch": 0.77,
"learning_rate": 0.00012148040638606675,
"loss": 6.6513,
"step": 914
},
{
"epoch": 0.77,
"learning_rate": 0.00012104499274310594,
"loss": 6.7098,
"step": 915
},
{
"epoch": 0.77,
"learning_rate": 0.00012060957910014513,
"loss": 6.7984,
"step": 916
},
{
"epoch": 0.77,
"learning_rate": 0.00012017416545718431,
"loss": 6.7216,
"step": 917
},
{
"epoch": 0.77,
"learning_rate": 0.0001197387518142235,
"loss": 6.7223,
"step": 918
},
{
"epoch": 0.77,
"learning_rate": 0.00011930333817126269,
"loss": 6.8197,
"step": 919
},
{
"epoch": 0.77,
"learning_rate": 0.00011886792452830188,
"loss": 6.7176,
"step": 920
},
{
"epoch": 0.77,
"learning_rate": 0.00011843251088534106,
"loss": 6.6393,
"step": 921
},
{
"epoch": 0.78,
"learning_rate": 0.00011799709724238025,
"loss": 6.853,
"step": 922
},
{
"epoch": 0.78,
"learning_rate": 0.00011756168359941944,
"loss": 6.7337,
"step": 923
},
{
"epoch": 0.78,
"learning_rate": 0.00011712626995645863,
"loss": 6.779,
"step": 924
},
{
"epoch": 0.78,
"learning_rate": 0.00011669085631349781,
"loss": 6.6274,
"step": 925
},
{
"epoch": 0.78,
"learning_rate": 0.000116255442670537,
"loss": 6.6959,
"step": 926
},
{
"epoch": 0.78,
"learning_rate": 0.00011582002902757619,
"loss": 6.8175,
"step": 927
},
{
"epoch": 0.78,
"learning_rate": 0.00011538461538461538,
"loss": 6.7653,
"step": 928
},
{
"epoch": 0.78,
"learning_rate": 0.00011494920174165456,
"loss": 6.6164,
"step": 929
},
{
"epoch": 0.78,
"learning_rate": 0.00011451378809869375,
"loss": 6.677,
"step": 930
},
{
"epoch": 0.78,
"learning_rate": 0.00011407837445573294,
"loss": 6.7269,
"step": 931
},
{
"epoch": 0.78,
"learning_rate": 0.00011364296081277212,
"loss": 6.7803,
"step": 932
},
{
"epoch": 0.78,
"learning_rate": 0.00011320754716981132,
"loss": 6.8275,
"step": 933
},
{
"epoch": 0.79,
"learning_rate": 0.0001127721335268505,
"loss": 6.7024,
"step": 934
},
{
"epoch": 0.79,
"learning_rate": 0.00011233671988388969,
"loss": 6.6885,
"step": 935
},
{
"epoch": 0.79,
"learning_rate": 0.00011190130624092888,
"loss": 6.6577,
"step": 936
},
{
"epoch": 0.79,
"learning_rate": 0.00011146589259796806,
"loss": 6.712,
"step": 937
},
{
"epoch": 0.79,
"learning_rate": 0.00011103047895500725,
"loss": 6.8692,
"step": 938
},
{
"epoch": 0.79,
"learning_rate": 0.00011059506531204644,
"loss": 6.7184,
"step": 939
},
{
"epoch": 0.79,
"learning_rate": 0.00011015965166908562,
"loss": 6.6514,
"step": 940
},
{
"epoch": 0.79,
"learning_rate": 0.00010972423802612482,
"loss": 6.6002,
"step": 941
},
{
"epoch": 0.79,
"learning_rate": 0.000109288824383164,
"loss": 6.714,
"step": 942
},
{
"epoch": 0.79,
"learning_rate": 0.00010885341074020318,
"loss": 6.3859,
"step": 943
},
{
"epoch": 0.79,
"learning_rate": 0.00010841799709724238,
"loss": 6.3666,
"step": 944
},
{
"epoch": 0.79,
"learning_rate": 0.00010798258345428156,
"loss": 6.7387,
"step": 945
},
{
"epoch": 0.8,
"learning_rate": 0.00010754716981132073,
"loss": 6.5286,
"step": 946
},
{
"epoch": 0.8,
"learning_rate": 0.00010711175616835994,
"loss": 6.3192,
"step": 947
},
{
"epoch": 0.8,
"learning_rate": 0.00010667634252539912,
"loss": 6.2644,
"step": 948
},
{
"epoch": 0.8,
"learning_rate": 0.00010624092888243832,
"loss": 6.1977,
"step": 949
},
{
"epoch": 0.8,
"learning_rate": 0.0001058055152394775,
"loss": 6.0231,
"step": 950
},
{
"epoch": 0.8,
"learning_rate": 0.00010537010159651667,
"loss": 6.8315,
"step": 951
},
{
"epoch": 0.8,
"learning_rate": 0.00010493468795355588,
"loss": 6.8403,
"step": 952
},
{
"epoch": 0.8,
"learning_rate": 0.00010449927431059505,
"loss": 6.7027,
"step": 953
},
{
"epoch": 0.8,
"learning_rate": 0.00010406386066763423,
"loss": 6.5141,
"step": 954
},
{
"epoch": 0.8,
"learning_rate": 0.00010362844702467343,
"loss": 6.7009,
"step": 955
},
{
"epoch": 0.8,
"learning_rate": 0.00010319303338171261,
"loss": 6.8507,
"step": 956
},
{
"epoch": 0.8,
"learning_rate": 0.00010275761973875182,
"loss": 6.745,
"step": 957
},
{
"epoch": 0.81,
"learning_rate": 0.00010232220609579099,
"loss": 6.8369,
"step": 958
},
{
"epoch": 0.81,
"learning_rate": 0.00010188679245283017,
"loss": 6.7382,
"step": 959
},
{
"epoch": 0.81,
"learning_rate": 0.00010145137880986937,
"loss": 6.6531,
"step": 960
},
{
"epoch": 0.81,
"learning_rate": 0.00010101596516690855,
"loss": 6.6775,
"step": 961
},
{
"epoch": 0.81,
"learning_rate": 0.00010058055152394773,
"loss": 6.654,
"step": 962
},
{
"epoch": 0.81,
"learning_rate": 0.00010014513788098693,
"loss": 6.5752,
"step": 963
},
{
"epoch": 0.81,
"learning_rate": 9.970972423802611e-05,
"loss": 6.6846,
"step": 964
},
{
"epoch": 0.81,
"learning_rate": 9.92743105950653e-05,
"loss": 6.7512,
"step": 965
},
{
"epoch": 0.81,
"learning_rate": 9.883889695210449e-05,
"loss": 6.7307,
"step": 966
},
{
"epoch": 0.81,
"learning_rate": 9.840348330914367e-05,
"loss": 6.8927,
"step": 967
},
{
"epoch": 0.81,
"learning_rate": 9.796806966618286e-05,
"loss": 6.7615,
"step": 968
},
{
"epoch": 0.81,
"learning_rate": 9.753265602322205e-05,
"loss": 6.4604,
"step": 969
},
{
"epoch": 0.82,
"learning_rate": 9.709724238026124e-05,
"loss": 6.7544,
"step": 970
},
{
"epoch": 0.82,
"learning_rate": 9.666182873730043e-05,
"loss": 6.5679,
"step": 971
},
{
"epoch": 0.82,
"learning_rate": 9.622641509433961e-05,
"loss": 6.5403,
"step": 972
},
{
"epoch": 0.82,
"learning_rate": 9.57910014513788e-05,
"loss": 6.6384,
"step": 973
},
{
"epoch": 0.82,
"learning_rate": 9.535558780841799e-05,
"loss": 6.7412,
"step": 974
},
{
"epoch": 0.82,
"learning_rate": 9.492017416545717e-05,
"loss": 6.7564,
"step": 975
},
{
"epoch": 0.82,
"learning_rate": 9.448476052249636e-05,
"loss": 6.7782,
"step": 976
},
{
"epoch": 0.82,
"learning_rate": 9.404934687953555e-05,
"loss": 6.6239,
"step": 977
},
{
"epoch": 0.82,
"learning_rate": 9.361393323657474e-05,
"loss": 6.632,
"step": 978
},
{
"epoch": 0.82,
"learning_rate": 9.317851959361392e-05,
"loss": 6.6647,
"step": 979
},
{
"epoch": 0.82,
"learning_rate": 9.274310595065311e-05,
"loss": 6.7482,
"step": 980
},
{
"epoch": 0.82,
"learning_rate": 9.23076923076923e-05,
"loss": 6.9126,
"step": 981
},
{
"epoch": 0.83,
"learning_rate": 9.187227866473149e-05,
"loss": 6.7464,
"step": 982
},
{
"epoch": 0.83,
"learning_rate": 9.143686502177067e-05,
"loss": 6.7075,
"step": 983
},
{
"epoch": 0.83,
"learning_rate": 9.100145137880986e-05,
"loss": 6.9183,
"step": 984
},
{
"epoch": 0.83,
"learning_rate": 9.056603773584905e-05,
"loss": 6.5806,
"step": 985
},
{
"epoch": 0.83,
"learning_rate": 9.013062409288824e-05,
"loss": 6.5799,
"step": 986
},
{
"epoch": 0.83,
"learning_rate": 8.969521044992742e-05,
"loss": 6.7535,
"step": 987
},
{
"epoch": 0.83,
"learning_rate": 8.92597968069666e-05,
"loss": 6.6896,
"step": 988
},
{
"epoch": 0.83,
"learning_rate": 8.88243831640058e-05,
"loss": 6.654,
"step": 989
},
{
"epoch": 0.83,
"learning_rate": 8.838896952104498e-05,
"loss": 6.574,
"step": 990
},
{
"epoch": 0.83,
"learning_rate": 8.795355587808417e-05,
"loss": 6.6576,
"step": 991
},
{
"epoch": 0.83,
"learning_rate": 8.751814223512336e-05,
"loss": 6.6829,
"step": 992
},
{
"epoch": 0.83,
"learning_rate": 8.708272859216254e-05,
"loss": 6.5027,
"step": 993
},
{
"epoch": 0.84,
"learning_rate": 8.664731494920174e-05,
"loss": 6.767,
"step": 994
},
{
"epoch": 0.84,
"learning_rate": 8.621190130624092e-05,
"loss": 6.6885,
"step": 995
},
{
"epoch": 0.84,
"learning_rate": 8.57764876632801e-05,
"loss": 6.8854,
"step": 996
},
{
"epoch": 0.84,
"learning_rate": 8.53410740203193e-05,
"loss": 6.5446,
"step": 997
},
{
"epoch": 0.84,
"learning_rate": 8.490566037735848e-05,
"loss": 6.4119,
"step": 998
},
{
"epoch": 0.84,
"learning_rate": 8.447024673439768e-05,
"loss": 6.295,
"step": 999
},
{
"epoch": 0.84,
"learning_rate": 8.403483309143686e-05,
"loss": 6.1702,
"step": 1000
},
{
"epoch": 0.84,
"eval_loss": 6.585080146789551,
"eval_runtime": 754.776,
"eval_samples_per_second": 3.5,
"eval_steps_per_second": 0.293,
"eval_wer": 1.867511955076691,
"step": 1000
},
{
"epoch": 0.84,
"learning_rate": 8.359941944847604e-05,
"loss": 6.6907,
"step": 1001
},
{
"epoch": 0.84,
"learning_rate": 8.316400580551524e-05,
"loss": 6.757,
"step": 1002
},
{
"epoch": 0.84,
"learning_rate": 8.272859216255442e-05,
"loss": 6.7429,
"step": 1003
},
{
"epoch": 0.84,
"learning_rate": 8.22931785195936e-05,
"loss": 6.7758,
"step": 1004
},
{
"epoch": 0.84,
"learning_rate": 8.18577648766328e-05,
"loss": 6.7114,
"step": 1005
},
{
"epoch": 0.85,
"learning_rate": 8.142235123367198e-05,
"loss": 6.7421,
"step": 1006
},
{
"epoch": 0.85,
"learning_rate": 8.098693759071118e-05,
"loss": 6.6519,
"step": 1007
},
{
"epoch": 0.85,
"learning_rate": 8.055152394775036e-05,
"loss": 6.5508,
"step": 1008
},
{
"epoch": 0.85,
"learning_rate": 8.011611030478954e-05,
"loss": 6.7702,
"step": 1009
},
{
"epoch": 0.85,
"learning_rate": 7.968069666182874e-05,
"loss": 6.5349,
"step": 1010
},
{
"epoch": 0.85,
"learning_rate": 7.924528301886792e-05,
"loss": 6.6104,
"step": 1011
},
{
"epoch": 0.85,
"learning_rate": 7.88098693759071e-05,
"loss": 6.638,
"step": 1012
},
{
"epoch": 0.85,
"learning_rate": 7.83744557329463e-05,
"loss": 6.5846,
"step": 1013
},
{
"epoch": 0.85,
"learning_rate": 7.793904208998548e-05,
"loss": 6.5231,
"step": 1014
},
{
"epoch": 0.85,
"learning_rate": 7.750362844702468e-05,
"loss": 6.5827,
"step": 1015
},
{
"epoch": 0.85,
"learning_rate": 7.706821480406386e-05,
"loss": 6.755,
"step": 1016
},
{
"epoch": 0.85,
"learning_rate": 7.663280116110303e-05,
"loss": 6.5473,
"step": 1017
},
{
"epoch": 0.86,
"learning_rate": 7.619738751814224e-05,
"loss": 6.6718,
"step": 1018
},
{
"epoch": 0.86,
"learning_rate": 7.576197387518142e-05,
"loss": 6.8111,
"step": 1019
},
{
"epoch": 0.86,
"learning_rate": 7.532656023222059e-05,
"loss": 6.5385,
"step": 1020
},
{
"epoch": 0.86,
"learning_rate": 7.48911465892598e-05,
"loss": 6.7338,
"step": 1021
},
{
"epoch": 0.86,
"learning_rate": 7.445573294629897e-05,
"loss": 6.7642,
"step": 1022
},
{
"epoch": 0.86,
"learning_rate": 7.402031930333816e-05,
"loss": 6.8209,
"step": 1023
},
{
"epoch": 0.86,
"learning_rate": 7.358490566037735e-05,
"loss": 6.6269,
"step": 1024
},
{
"epoch": 0.86,
"learning_rate": 7.314949201741654e-05,
"loss": 6.5789,
"step": 1025
},
{
"epoch": 0.86,
"learning_rate": 7.271407837445572e-05,
"loss": 6.5876,
"step": 1026
},
{
"epoch": 0.86,
"learning_rate": 7.22786647314949e-05,
"loss": 6.5396,
"step": 1027
},
{
"epoch": 0.86,
"learning_rate": 7.18432510885341e-05,
"loss": 6.6626,
"step": 1028
},
{
"epoch": 0.87,
"learning_rate": 7.140783744557328e-05,
"loss": 6.6721,
"step": 1029
},
{
"epoch": 0.87,
"learning_rate": 7.097242380261248e-05,
"loss": 6.5971,
"step": 1030
},
{
"epoch": 0.87,
"learning_rate": 7.053701015965166e-05,
"loss": 6.6966,
"step": 1031
},
{
"epoch": 0.87,
"learning_rate": 7.010159651669085e-05,
"loss": 6.4928,
"step": 1032
},
{
"epoch": 0.87,
"learning_rate": 6.966618287373004e-05,
"loss": 6.6723,
"step": 1033
},
{
"epoch": 0.87,
"learning_rate": 6.923076923076922e-05,
"loss": 6.6167,
"step": 1034
},
{
"epoch": 0.87,
"learning_rate": 6.87953555878084e-05,
"loss": 6.6542,
"step": 1035
},
{
"epoch": 0.87,
"learning_rate": 6.83599419448476e-05,
"loss": 6.7551,
"step": 1036
},
{
"epoch": 0.87,
"learning_rate": 6.792452830188678e-05,
"loss": 6.5051,
"step": 1037
},
{
"epoch": 0.87,
"learning_rate": 6.748911465892598e-05,
"loss": 6.7267,
"step": 1038
},
{
"epoch": 0.87,
"learning_rate": 6.705370101596516e-05,
"loss": 6.6521,
"step": 1039
},
{
"epoch": 0.87,
"learning_rate": 6.661828737300434e-05,
"loss": 6.5425,
"step": 1040
},
{
"epoch": 0.88,
"learning_rate": 6.618287373004354e-05,
"loss": 6.5041,
"step": 1041
},
{
"epoch": 0.88,
"learning_rate": 6.574746008708272e-05,
"loss": 6.559,
"step": 1042
},
{
"epoch": 0.88,
"learning_rate": 6.53120464441219e-05,
"loss": 6.5408,
"step": 1043
},
{
"epoch": 0.88,
"learning_rate": 6.48766328011611e-05,
"loss": 6.5498,
"step": 1044
},
{
"epoch": 0.88,
"learning_rate": 6.444121915820028e-05,
"loss": 6.573,
"step": 1045
},
{
"epoch": 0.88,
"learning_rate": 6.400580551523948e-05,
"loss": 6.4869,
"step": 1046
},
{
"epoch": 0.88,
"learning_rate": 6.357039187227866e-05,
"loss": 6.301,
"step": 1047
},
{
"epoch": 0.88,
"learning_rate": 6.313497822931784e-05,
"loss": 6.4788,
"step": 1048
},
{
"epoch": 0.88,
"learning_rate": 6.269956458635704e-05,
"loss": 6.524,
"step": 1049
},
{
"epoch": 0.88,
"learning_rate": 6.226415094339622e-05,
"loss": 6.042,
"step": 1050
},
{
"epoch": 0.88,
"learning_rate": 6.18287373004354e-05,
"loss": 6.8218,
"step": 1051
},
{
"epoch": 0.88,
"learning_rate": 6.13933236574746e-05,
"loss": 6.6262,
"step": 1052
},
{
"epoch": 0.89,
"learning_rate": 6.095791001451378e-05,
"loss": 6.6762,
"step": 1053
},
{
"epoch": 0.89,
"learning_rate": 6.052249637155297e-05,
"loss": 6.74,
"step": 1054
},
{
"epoch": 0.89,
"learning_rate": 6.0087082728592154e-05,
"loss": 6.7309,
"step": 1055
},
{
"epoch": 0.89,
"learning_rate": 5.965166908563134e-05,
"loss": 6.7538,
"step": 1056
},
{
"epoch": 0.89,
"learning_rate": 5.921625544267053e-05,
"loss": 6.7058,
"step": 1057
},
{
"epoch": 0.89,
"learning_rate": 5.878084179970972e-05,
"loss": 6.5754,
"step": 1058
},
{
"epoch": 0.89,
"learning_rate": 5.8345428156748904e-05,
"loss": 6.755,
"step": 1059
},
{
"epoch": 0.89,
"learning_rate": 5.791001451378809e-05,
"loss": 6.7243,
"step": 1060
},
{
"epoch": 0.89,
"learning_rate": 5.747460087082728e-05,
"loss": 6.7692,
"step": 1061
},
{
"epoch": 0.89,
"learning_rate": 5.703918722786647e-05,
"loss": 6.6864,
"step": 1062
},
{
"epoch": 0.89,
"learning_rate": 5.660377358490566e-05,
"loss": 6.7534,
"step": 1063
},
{
"epoch": 0.89,
"learning_rate": 5.616835994194484e-05,
"loss": 6.6074,
"step": 1064
},
{
"epoch": 0.9,
"learning_rate": 5.573294629898403e-05,
"loss": 6.6383,
"step": 1065
},
{
"epoch": 0.9,
"learning_rate": 5.529753265602322e-05,
"loss": 6.6331,
"step": 1066
},
{
"epoch": 0.9,
"learning_rate": 5.486211901306241e-05,
"loss": 6.6365,
"step": 1067
},
{
"epoch": 0.9,
"learning_rate": 5.442670537010159e-05,
"loss": 6.6805,
"step": 1068
},
{
"epoch": 0.9,
"learning_rate": 5.399129172714078e-05,
"loss": 6.7947,
"step": 1069
},
{
"epoch": 0.9,
"learning_rate": 5.355587808417997e-05,
"loss": 6.7136,
"step": 1070
},
{
"epoch": 0.9,
"learning_rate": 5.312046444121916e-05,
"loss": 6.5652,
"step": 1071
},
{
"epoch": 0.9,
"learning_rate": 5.2685050798258335e-05,
"loss": 6.6649,
"step": 1072
},
{
"epoch": 0.9,
"learning_rate": 5.2249637155297524e-05,
"loss": 6.4699,
"step": 1073
},
{
"epoch": 0.9,
"learning_rate": 5.181422351233671e-05,
"loss": 6.6203,
"step": 1074
},
{
"epoch": 0.9,
"learning_rate": 5.137880986937591e-05,
"loss": 6.3756,
"step": 1075
},
{
"epoch": 0.9,
"learning_rate": 5.0943396226415085e-05,
"loss": 6.6922,
"step": 1076
},
{
"epoch": 0.91,
"learning_rate": 5.0507982583454274e-05,
"loss": 6.8846,
"step": 1077
},
{
"epoch": 0.91,
"learning_rate": 5.007256894049346e-05,
"loss": 6.732,
"step": 1078
},
{
"epoch": 0.91,
"learning_rate": 4.963715529753265e-05,
"loss": 6.537,
"step": 1079
},
{
"epoch": 0.91,
"learning_rate": 4.9201741654571834e-05,
"loss": 6.5756,
"step": 1080
},
{
"epoch": 0.91,
"learning_rate": 4.8766328011611024e-05,
"loss": 6.6429,
"step": 1081
},
{
"epoch": 0.91,
"learning_rate": 4.833091436865021e-05,
"loss": 6.9331,
"step": 1082
},
{
"epoch": 0.91,
"learning_rate": 4.78955007256894e-05,
"loss": 6.4924,
"step": 1083
},
{
"epoch": 0.91,
"learning_rate": 4.7460087082728584e-05,
"loss": 6.6273,
"step": 1084
},
{
"epoch": 0.91,
"learning_rate": 4.702467343976777e-05,
"loss": 6.6147,
"step": 1085
},
{
"epoch": 0.91,
"learning_rate": 4.658925979680696e-05,
"loss": 6.6515,
"step": 1086
},
{
"epoch": 0.91,
"learning_rate": 4.615384615384615e-05,
"loss": 6.566,
"step": 1087
},
{
"epoch": 0.91,
"learning_rate": 4.5718432510885334e-05,
"loss": 6.6589,
"step": 1088
},
{
"epoch": 0.92,
"learning_rate": 4.528301886792452e-05,
"loss": 6.649,
"step": 1089
},
{
"epoch": 0.92,
"learning_rate": 4.484760522496371e-05,
"loss": 6.5398,
"step": 1090
},
{
"epoch": 0.92,
"learning_rate": 4.44121915820029e-05,
"loss": 6.5297,
"step": 1091
},
{
"epoch": 0.92,
"learning_rate": 4.397677793904208e-05,
"loss": 6.4694,
"step": 1092
},
{
"epoch": 0.92,
"learning_rate": 4.354136429608127e-05,
"loss": 6.4292,
"step": 1093
},
{
"epoch": 0.92,
"learning_rate": 4.310595065312046e-05,
"loss": 6.637,
"step": 1094
},
{
"epoch": 0.92,
"learning_rate": 4.267053701015965e-05,
"loss": 6.4221,
"step": 1095
},
{
"epoch": 0.92,
"learning_rate": 4.223512336719884e-05,
"loss": 6.5716,
"step": 1096
},
{
"epoch": 0.92,
"learning_rate": 4.179970972423802e-05,
"loss": 6.3362,
"step": 1097
},
{
"epoch": 0.92,
"learning_rate": 4.136429608127721e-05,
"loss": 6.5254,
"step": 1098
},
{
"epoch": 0.92,
"learning_rate": 4.09288824383164e-05,
"loss": 6.4824,
"step": 1099
},
{
"epoch": 0.92,
"learning_rate": 4.049346879535559e-05,
"loss": 5.7116,
"step": 1100
},
{
"epoch": 0.93,
"learning_rate": 4.005805515239477e-05,
"loss": 6.7332,
"step": 1101
},
{
"epoch": 0.93,
"learning_rate": 3.962264150943396e-05,
"loss": 6.7109,
"step": 1102
},
{
"epoch": 0.93,
"learning_rate": 3.918722786647315e-05,
"loss": 6.7897,
"step": 1103
},
{
"epoch": 0.93,
"learning_rate": 3.875181422351234e-05,
"loss": 6.6304,
"step": 1104
},
{
"epoch": 0.93,
"learning_rate": 3.8316400580551515e-05,
"loss": 6.5934,
"step": 1105
},
{
"epoch": 0.93,
"learning_rate": 3.788098693759071e-05,
"loss": 6.5387,
"step": 1106
},
{
"epoch": 0.93,
"learning_rate": 3.74455732946299e-05,
"loss": 6.916,
"step": 1107
},
{
"epoch": 0.93,
"learning_rate": 3.701015965166908e-05,
"loss": 6.8103,
"step": 1108
},
{
"epoch": 0.93,
"learning_rate": 3.657474600870827e-05,
"loss": 6.7109,
"step": 1109
},
{
"epoch": 0.93,
"learning_rate": 3.613933236574745e-05,
"loss": 6.531,
"step": 1110
},
{
"epoch": 0.93,
"learning_rate": 3.570391872278664e-05,
"loss": 6.625,
"step": 1111
},
{
"epoch": 0.93,
"learning_rate": 3.526850507982583e-05,
"loss": 6.7393,
"step": 1112
},
{
"epoch": 0.94,
"learning_rate": 3.483309143686502e-05,
"loss": 6.5094,
"step": 1113
},
{
"epoch": 0.94,
"learning_rate": 3.43976777939042e-05,
"loss": 6.5362,
"step": 1114
},
{
"epoch": 0.94,
"learning_rate": 3.396226415094339e-05,
"loss": 6.7859,
"step": 1115
},
{
"epoch": 0.94,
"learning_rate": 3.352685050798258e-05,
"loss": 6.7046,
"step": 1116
},
{
"epoch": 0.94,
"learning_rate": 3.309143686502177e-05,
"loss": 6.7309,
"step": 1117
},
{
"epoch": 0.94,
"learning_rate": 3.265602322206095e-05,
"loss": 6.6637,
"step": 1118
},
{
"epoch": 0.94,
"learning_rate": 3.222060957910014e-05,
"loss": 6.4719,
"step": 1119
},
{
"epoch": 0.94,
"learning_rate": 3.178519593613933e-05,
"loss": 6.7841,
"step": 1120
},
{
"epoch": 0.94,
"learning_rate": 3.134978229317852e-05,
"loss": 6.6491,
"step": 1121
},
{
"epoch": 0.94,
"learning_rate": 3.09143686502177e-05,
"loss": 6.8233,
"step": 1122
},
{
"epoch": 0.94,
"learning_rate": 3.047895500725689e-05,
"loss": 6.5963,
"step": 1123
},
{
"epoch": 0.94,
"learning_rate": 3.0043541364296077e-05,
"loss": 6.7396,
"step": 1124
},
{
"epoch": 0.95,
"learning_rate": 2.9608127721335266e-05,
"loss": 6.8136,
"step": 1125
},
{
"epoch": 0.95,
"learning_rate": 2.9172714078374452e-05,
"loss": 6.545,
"step": 1126
},
{
"epoch": 0.95,
"learning_rate": 2.873730043541364e-05,
"loss": 6.634,
"step": 1127
},
{
"epoch": 0.95,
"learning_rate": 2.830188679245283e-05,
"loss": 6.5989,
"step": 1128
},
{
"epoch": 0.95,
"learning_rate": 2.7866473149492016e-05,
"loss": 6.65,
"step": 1129
},
{
"epoch": 0.95,
"learning_rate": 2.7431059506531205e-05,
"loss": 6.6033,
"step": 1130
},
{
"epoch": 0.95,
"learning_rate": 2.699564586357039e-05,
"loss": 6.4993,
"step": 1131
},
{
"epoch": 0.95,
"learning_rate": 2.656023222060958e-05,
"loss": 6.4688,
"step": 1132
},
{
"epoch": 0.95,
"learning_rate": 2.6124818577648762e-05,
"loss": 6.7395,
"step": 1133
},
{
"epoch": 0.95,
"learning_rate": 2.5689404934687955e-05,
"loss": 6.6679,
"step": 1134
},
{
"epoch": 0.95,
"learning_rate": 2.5253991291727137e-05,
"loss": 6.7816,
"step": 1135
},
{
"epoch": 0.96,
"learning_rate": 2.4818577648766326e-05,
"loss": 6.5733,
"step": 1136
},
{
"epoch": 0.96,
"learning_rate": 2.4383164005805512e-05,
"loss": 6.6597,
"step": 1137
},
{
"epoch": 0.96,
"learning_rate": 2.39477503628447e-05,
"loss": 6.6227,
"step": 1138
},
{
"epoch": 0.96,
"learning_rate": 2.3512336719883887e-05,
"loss": 6.6052,
"step": 1139
},
{
"epoch": 0.96,
"learning_rate": 2.3076923076923076e-05,
"loss": 6.4697,
"step": 1140
},
{
"epoch": 0.96,
"learning_rate": 2.264150943396226e-05,
"loss": 6.7342,
"step": 1141
},
{
"epoch": 0.96,
"learning_rate": 2.220609579100145e-05,
"loss": 6.6098,
"step": 1142
},
{
"epoch": 0.96,
"learning_rate": 2.1770682148040636e-05,
"loss": 6.5866,
"step": 1143
},
{
"epoch": 0.96,
"learning_rate": 2.1335268505079825e-05,
"loss": 6.6129,
"step": 1144
},
{
"epoch": 0.96,
"learning_rate": 2.089985486211901e-05,
"loss": 6.3888,
"step": 1145
},
{
"epoch": 0.96,
"learning_rate": 2.04644412191582e-05,
"loss": 6.5824,
"step": 1146
},
{
"epoch": 0.96,
"learning_rate": 2.0029027576197386e-05,
"loss": 6.3109,
"step": 1147
},
{
"epoch": 0.97,
"learning_rate": 1.9593613933236575e-05,
"loss": 6.1473,
"step": 1148
},
{
"epoch": 0.97,
"learning_rate": 1.9158200290275757e-05,
"loss": 6.303,
"step": 1149
},
{
"epoch": 0.97,
"learning_rate": 1.872278664731495e-05,
"loss": 6.0496,
"step": 1150
},
{
"epoch": 0.97,
"learning_rate": 1.8287373004354136e-05,
"loss": 6.7155,
"step": 1151
},
{
"epoch": 0.97,
"learning_rate": 1.785195936139332e-05,
"loss": 6.438,
"step": 1152
},
{
"epoch": 0.97,
"learning_rate": 1.741654571843251e-05,
"loss": 6.7806,
"step": 1153
},
{
"epoch": 0.97,
"learning_rate": 1.6981132075471696e-05,
"loss": 6.6463,
"step": 1154
},
{
"epoch": 0.97,
"learning_rate": 1.6545718432510885e-05,
"loss": 6.6742,
"step": 1155
},
{
"epoch": 0.97,
"learning_rate": 1.611030478955007e-05,
"loss": 6.8502,
"step": 1156
},
{
"epoch": 0.97,
"learning_rate": 1.567489114658926e-05,
"loss": 6.6052,
"step": 1157
},
{
"epoch": 0.97,
"learning_rate": 1.5239477503628446e-05,
"loss": 6.564,
"step": 1158
},
{
"epoch": 0.97,
"learning_rate": 1.4804063860667633e-05,
"loss": 6.5864,
"step": 1159
},
{
"epoch": 0.98,
"learning_rate": 1.436865021770682e-05,
"loss": 6.662,
"step": 1160
},
{
"epoch": 0.98,
"learning_rate": 1.3933236574746008e-05,
"loss": 6.787,
"step": 1161
},
{
"epoch": 0.98,
"learning_rate": 1.3497822931785195e-05,
"loss": 6.7509,
"step": 1162
},
{
"epoch": 0.98,
"learning_rate": 1.3062409288824381e-05,
"loss": 6.7778,
"step": 1163
},
{
"epoch": 0.98,
"learning_rate": 1.2626995645863568e-05,
"loss": 6.6383,
"step": 1164
},
{
"epoch": 0.98,
"learning_rate": 1.2191582002902756e-05,
"loss": 6.6005,
"step": 1165
},
{
"epoch": 0.98,
"learning_rate": 1.1756168359941943e-05,
"loss": 6.8618,
"step": 1166
},
{
"epoch": 0.98,
"learning_rate": 1.132075471698113e-05,
"loss": 6.4564,
"step": 1167
},
{
"epoch": 0.98,
"learning_rate": 1.0885341074020318e-05,
"loss": 6.6056,
"step": 1168
},
{
"epoch": 0.98,
"learning_rate": 1.0449927431059506e-05,
"loss": 6.5271,
"step": 1169
},
{
"epoch": 0.98,
"learning_rate": 1.0014513788098693e-05,
"loss": 6.5189,
"step": 1170
},
{
"epoch": 0.98,
"learning_rate": 9.579100145137879e-06,
"loss": 6.6099,
"step": 1171
},
{
"epoch": 0.99,
"learning_rate": 9.143686502177068e-06,
"loss": 6.5129,
"step": 1172
},
{
"epoch": 0.99,
"learning_rate": 8.708272859216255e-06,
"loss": 6.6832,
"step": 1173
},
{
"epoch": 0.99,
"learning_rate": 8.272859216255443e-06,
"loss": 6.7196,
"step": 1174
},
{
"epoch": 0.99,
"learning_rate": 7.83744557329463e-06,
"loss": 6.4536,
"step": 1175
},
{
"epoch": 0.99,
"learning_rate": 7.4020319303338166e-06,
"loss": 6.394,
"step": 1176
},
{
"epoch": 0.99,
"learning_rate": 6.966618287373004e-06,
"loss": 6.5681,
"step": 1177
},
{
"epoch": 0.99,
"learning_rate": 6.5312046444121905e-06,
"loss": 6.754,
"step": 1178
},
{
"epoch": 0.99,
"learning_rate": 6.095791001451378e-06,
"loss": 6.4237,
"step": 1179
},
{
"epoch": 0.99,
"learning_rate": 5.660377358490565e-06,
"loss": 6.8175,
"step": 1180
},
{
"epoch": 0.99,
"learning_rate": 5.224963715529753e-06,
"loss": 6.5249,
"step": 1181
},
{
"epoch": 0.99,
"learning_rate": 4.789550072568939e-06,
"loss": 6.6056,
"step": 1182
},
{
"epoch": 0.99,
"learning_rate": 4.354136429608128e-06,
"loss": 6.5389,
"step": 1183
},
{
"epoch": 1.0,
"learning_rate": 3.918722786647315e-06,
"loss": 6.5179,
"step": 1184
},
{
"epoch": 1.0,
"learning_rate": 3.483309143686502e-06,
"loss": 6.4921,
"step": 1185
},
{
"epoch": 1.0,
"learning_rate": 3.047895500725689e-06,
"loss": 6.3926,
"step": 1186
},
{
"epoch": 1.0,
"learning_rate": 2.6124818577648764e-06,
"loss": 6.2185,
"step": 1187
},
{
"epoch": 1.0,
"learning_rate": 2.177068214804064e-06,
"loss": 6.1024,
"step": 1188
},
{
"epoch": 1.0,
"learning_rate": 1.741654571843251e-06,
"loss": 6.1685,
"step": 1189
},
{
"epoch": 1.0,
"step": 1189,
"total_flos": 0.0,
"train_loss": 6.8302552445382005,
"train_runtime": 6161.6809,
"train_samples_per_second": 4.632,
"train_steps_per_second": 0.193
}
],
"max_steps": 1189,
"num_train_epochs": 1,
"total_flos": 0.0,
"trial_name": null,
"trial_params": null
}