v0.8u-adapter / checkpoint-600 /trainer_state.json
gotzmann's picture
..
daeea4a
raw
history blame
No virus
96.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0079798404031919,
"eval_steps": 500,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 0.4196510314941406,
"learning_rate": 3.7037037037037036e-07,
"loss": 1.4334,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 0.44790390133857727,
"learning_rate": 7.407407407407407e-07,
"loss": 1.4029,
"step": 2
},
{
"epoch": 0.01,
"grad_norm": 0.439674973487854,
"learning_rate": 1.111111111111111e-06,
"loss": 1.461,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 0.43389058113098145,
"learning_rate": 1.4814814814814815e-06,
"loss": 1.4012,
"step": 4
},
{
"epoch": 0.01,
"grad_norm": 0.5680592060089111,
"learning_rate": 1.8518518518518519e-06,
"loss": 1.445,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 0.3744407296180725,
"learning_rate": 2.222222222222222e-06,
"loss": 1.3717,
"step": 6
},
{
"epoch": 0.01,
"grad_norm": 0.36713480949401855,
"learning_rate": 2.5925925925925925e-06,
"loss": 1.4689,
"step": 7
},
{
"epoch": 0.01,
"grad_norm": 0.32056745886802673,
"learning_rate": 2.962962962962963e-06,
"loss": 1.4354,
"step": 8
},
{
"epoch": 0.02,
"grad_norm": 0.3159842789173126,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.4229,
"step": 9
},
{
"epoch": 0.02,
"grad_norm": 0.2807372510433197,
"learning_rate": 3.7037037037037037e-06,
"loss": 1.3873,
"step": 10
},
{
"epoch": 0.02,
"grad_norm": 0.27728646993637085,
"learning_rate": 4.074074074074074e-06,
"loss": 1.3989,
"step": 11
},
{
"epoch": 0.02,
"grad_norm": 0.280068039894104,
"learning_rate": 4.444444444444444e-06,
"loss": 1.4782,
"step": 12
},
{
"epoch": 0.02,
"grad_norm": 0.236131489276886,
"learning_rate": 4.814814814814815e-06,
"loss": 1.3919,
"step": 13
},
{
"epoch": 0.02,
"grad_norm": 0.2324475198984146,
"learning_rate": 5.185185185185185e-06,
"loss": 1.483,
"step": 14
},
{
"epoch": 0.03,
"grad_norm": 0.27377304434776306,
"learning_rate": 5.555555555555557e-06,
"loss": 1.3518,
"step": 15
},
{
"epoch": 0.03,
"grad_norm": 0.23332859575748444,
"learning_rate": 5.925925925925926e-06,
"loss": 1.3781,
"step": 16
},
{
"epoch": 0.03,
"grad_norm": 0.24840319156646729,
"learning_rate": 6.296296296296297e-06,
"loss": 1.3759,
"step": 17
},
{
"epoch": 0.03,
"grad_norm": 0.22274506092071533,
"learning_rate": 6.666666666666667e-06,
"loss": 1.3676,
"step": 18
},
{
"epoch": 0.03,
"grad_norm": 0.29411837458610535,
"learning_rate": 7.0370370370370375e-06,
"loss": 1.4396,
"step": 19
},
{
"epoch": 0.03,
"grad_norm": 0.292468786239624,
"learning_rate": 7.4074074074074075e-06,
"loss": 1.3571,
"step": 20
},
{
"epoch": 0.04,
"grad_norm": 0.22091880440711975,
"learning_rate": 7.77777777777778e-06,
"loss": 1.3387,
"step": 21
},
{
"epoch": 0.04,
"grad_norm": 0.21235232055187225,
"learning_rate": 8.148148148148148e-06,
"loss": 1.3793,
"step": 22
},
{
"epoch": 0.04,
"grad_norm": 0.23756876587867737,
"learning_rate": 8.518518518518519e-06,
"loss": 1.4069,
"step": 23
},
{
"epoch": 0.04,
"grad_norm": 0.21084965765476227,
"learning_rate": 8.888888888888888e-06,
"loss": 1.3839,
"step": 24
},
{
"epoch": 0.04,
"grad_norm": 0.21281524002552032,
"learning_rate": 9.25925925925926e-06,
"loss": 1.4341,
"step": 25
},
{
"epoch": 0.04,
"grad_norm": 0.21351666748523712,
"learning_rate": 9.62962962962963e-06,
"loss": 1.3967,
"step": 26
},
{
"epoch": 0.05,
"grad_norm": 0.21163725852966309,
"learning_rate": 1e-05,
"loss": 1.3992,
"step": 27
},
{
"epoch": 0.05,
"grad_norm": 0.2063266783952713,
"learning_rate": 1.037037037037037e-05,
"loss": 1.3997,
"step": 28
},
{
"epoch": 0.05,
"grad_norm": 0.2602396607398987,
"learning_rate": 1.0740740740740742e-05,
"loss": 1.3798,
"step": 29
},
{
"epoch": 0.05,
"grad_norm": 0.20360125601291656,
"learning_rate": 1.1111111111111113e-05,
"loss": 1.385,
"step": 30
},
{
"epoch": 0.05,
"grad_norm": 0.2125891149044037,
"learning_rate": 1.1481481481481482e-05,
"loss": 1.4193,
"step": 31
},
{
"epoch": 0.05,
"grad_norm": 0.21324987709522247,
"learning_rate": 1.1851851851851852e-05,
"loss": 1.3213,
"step": 32
},
{
"epoch": 0.06,
"grad_norm": 0.1991666704416275,
"learning_rate": 1.2222222222222224e-05,
"loss": 1.3575,
"step": 33
},
{
"epoch": 0.06,
"grad_norm": 0.22300179302692413,
"learning_rate": 1.2592592592592593e-05,
"loss": 1.4578,
"step": 34
},
{
"epoch": 0.06,
"grad_norm": 0.20070216059684753,
"learning_rate": 1.2962962962962964e-05,
"loss": 1.412,
"step": 35
},
{
"epoch": 0.06,
"grad_norm": 0.1976468563079834,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.3627,
"step": 36
},
{
"epoch": 0.06,
"grad_norm": 0.19419902563095093,
"learning_rate": 1.3703703703703706e-05,
"loss": 1.3384,
"step": 37
},
{
"epoch": 0.06,
"grad_norm": 0.19124644994735718,
"learning_rate": 1.4074074074074075e-05,
"loss": 1.3381,
"step": 38
},
{
"epoch": 0.07,
"grad_norm": 0.21110524237155914,
"learning_rate": 1.4444444444444446e-05,
"loss": 1.4208,
"step": 39
},
{
"epoch": 0.07,
"grad_norm": 0.1895744502544403,
"learning_rate": 1.4814814814814815e-05,
"loss": 1.3003,
"step": 40
},
{
"epoch": 0.07,
"grad_norm": 0.20592844486236572,
"learning_rate": 1.5185185185185187e-05,
"loss": 1.3216,
"step": 41
},
{
"epoch": 0.07,
"grad_norm": 0.20679640769958496,
"learning_rate": 1.555555555555556e-05,
"loss": 1.4045,
"step": 42
},
{
"epoch": 0.07,
"grad_norm": 0.19197501242160797,
"learning_rate": 1.5925925925925926e-05,
"loss": 1.4138,
"step": 43
},
{
"epoch": 0.07,
"grad_norm": 0.32921022176742554,
"learning_rate": 1.6296296296296297e-05,
"loss": 1.3683,
"step": 44
},
{
"epoch": 0.08,
"grad_norm": 0.21861666440963745,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.3522,
"step": 45
},
{
"epoch": 0.08,
"grad_norm": 0.1850321739912033,
"learning_rate": 1.7037037037037038e-05,
"loss": 1.293,
"step": 46
},
{
"epoch": 0.08,
"grad_norm": 0.36730220913887024,
"learning_rate": 1.740740740740741e-05,
"loss": 1.4122,
"step": 47
},
{
"epoch": 0.08,
"grad_norm": 0.1949268877506256,
"learning_rate": 1.7777777777777777e-05,
"loss": 1.4002,
"step": 48
},
{
"epoch": 0.08,
"grad_norm": 0.20806071162223816,
"learning_rate": 1.814814814814815e-05,
"loss": 1.3386,
"step": 49
},
{
"epoch": 0.08,
"grad_norm": 0.2157718390226364,
"learning_rate": 1.851851851851852e-05,
"loss": 1.3323,
"step": 50
},
{
"epoch": 0.09,
"grad_norm": 0.19431230425834656,
"learning_rate": 1.888888888888889e-05,
"loss": 1.3844,
"step": 51
},
{
"epoch": 0.09,
"grad_norm": 0.20404817163944244,
"learning_rate": 1.925925925925926e-05,
"loss": 1.2329,
"step": 52
},
{
"epoch": 0.09,
"grad_norm": 0.18933957815170288,
"learning_rate": 1.962962962962963e-05,
"loss": 1.2489,
"step": 53
},
{
"epoch": 0.09,
"grad_norm": 0.21972468495368958,
"learning_rate": 2e-05,
"loss": 1.3139,
"step": 54
},
{
"epoch": 0.09,
"grad_norm": 0.2144392728805542,
"learning_rate": 1.999998353068657e-05,
"loss": 1.3223,
"step": 55
},
{
"epoch": 0.09,
"grad_norm": 0.335573673248291,
"learning_rate": 1.9999934122800527e-05,
"loss": 1.3846,
"step": 56
},
{
"epoch": 0.1,
"grad_norm": 0.19709603488445282,
"learning_rate": 1.9999851776504614e-05,
"loss": 1.4115,
"step": 57
},
{
"epoch": 0.1,
"grad_norm": 0.20725642144680023,
"learning_rate": 1.9999736492070065e-05,
"loss": 1.3915,
"step": 58
},
{
"epoch": 0.1,
"grad_norm": 0.2252136766910553,
"learning_rate": 1.9999588269876614e-05,
"loss": 1.3825,
"step": 59
},
{
"epoch": 0.1,
"grad_norm": 0.18642185628414154,
"learning_rate": 1.999940711041249e-05,
"loss": 1.341,
"step": 60
},
{
"epoch": 0.1,
"grad_norm": 0.2533683180809021,
"learning_rate": 1.9999193014274393e-05,
"loss": 1.3444,
"step": 61
},
{
"epoch": 0.1,
"grad_norm": 0.21690671145915985,
"learning_rate": 1.9998945982167542e-05,
"loss": 1.344,
"step": 62
},
{
"epoch": 0.11,
"grad_norm": 0.19395007193088531,
"learning_rate": 1.9998666014905617e-05,
"loss": 1.3215,
"step": 63
},
{
"epoch": 0.11,
"grad_norm": 0.2266397327184677,
"learning_rate": 1.9998353113410796e-05,
"loss": 1.4338,
"step": 64
},
{
"epoch": 0.11,
"grad_norm": 0.19096337258815765,
"learning_rate": 1.999800727871373e-05,
"loss": 1.394,
"step": 65
},
{
"epoch": 0.11,
"grad_norm": 0.17608919739723206,
"learning_rate": 1.9997628511953554e-05,
"loss": 1.3472,
"step": 66
},
{
"epoch": 0.11,
"grad_norm": 0.19728903472423553,
"learning_rate": 1.9997216814377875e-05,
"loss": 1.3919,
"step": 67
},
{
"epoch": 0.11,
"grad_norm": 0.2579798102378845,
"learning_rate": 1.9996772187342764e-05,
"loss": 1.3303,
"step": 68
},
{
"epoch": 0.12,
"grad_norm": 0.1914272904396057,
"learning_rate": 1.9996294632312766e-05,
"loss": 1.3093,
"step": 69
},
{
"epoch": 0.12,
"grad_norm": 0.18615896999835968,
"learning_rate": 1.999578415086088e-05,
"loss": 1.4049,
"step": 70
},
{
"epoch": 0.12,
"grad_norm": 0.18626686930656433,
"learning_rate": 1.9995240744668555e-05,
"loss": 1.2712,
"step": 71
},
{
"epoch": 0.12,
"grad_norm": 0.18953536450862885,
"learning_rate": 1.999466441552571e-05,
"loss": 1.311,
"step": 72
},
{
"epoch": 0.12,
"grad_norm": 0.20964953303337097,
"learning_rate": 1.9994055165330683e-05,
"loss": 1.383,
"step": 73
},
{
"epoch": 0.12,
"grad_norm": 1.15555739402771,
"learning_rate": 1.9993412996090265e-05,
"loss": 1.2798,
"step": 74
},
{
"epoch": 0.13,
"grad_norm": 0.18615145981311798,
"learning_rate": 1.9992737909919672e-05,
"loss": 1.3288,
"step": 75
},
{
"epoch": 0.13,
"grad_norm": 0.17394311726093292,
"learning_rate": 1.9992029909042542e-05,
"loss": 1.3833,
"step": 76
},
{
"epoch": 0.13,
"grad_norm": 0.2260170429944992,
"learning_rate": 1.999128899579094e-05,
"loss": 1.3483,
"step": 77
},
{
"epoch": 0.13,
"grad_norm": 0.18870756030082703,
"learning_rate": 1.999051517260533e-05,
"loss": 1.3463,
"step": 78
},
{
"epoch": 0.13,
"grad_norm": 0.17621129751205444,
"learning_rate": 1.9989708442034573e-05,
"loss": 1.2911,
"step": 79
},
{
"epoch": 0.13,
"grad_norm": 0.19401304423809052,
"learning_rate": 1.9988868806735938e-05,
"loss": 1.2942,
"step": 80
},
{
"epoch": 0.14,
"grad_norm": 0.21740129590034485,
"learning_rate": 1.9987996269475063e-05,
"loss": 1.3842,
"step": 81
},
{
"epoch": 0.14,
"grad_norm": 0.24701721966266632,
"learning_rate": 1.9987090833125967e-05,
"loss": 1.4095,
"step": 82
},
{
"epoch": 0.14,
"grad_norm": 0.18182536959648132,
"learning_rate": 1.998615250067103e-05,
"loss": 1.3559,
"step": 83
},
{
"epoch": 0.14,
"grad_norm": 0.31561610102653503,
"learning_rate": 1.9985181275201e-05,
"loss": 1.3771,
"step": 84
},
{
"epoch": 0.14,
"grad_norm": 0.21156860888004303,
"learning_rate": 1.998417715991495e-05,
"loss": 1.3526,
"step": 85
},
{
"epoch": 0.14,
"grad_norm": 0.251007080078125,
"learning_rate": 1.9983140158120304e-05,
"loss": 1.3858,
"step": 86
},
{
"epoch": 0.15,
"grad_norm": 0.18290047347545624,
"learning_rate": 1.9982070273232796e-05,
"loss": 1.2914,
"step": 87
},
{
"epoch": 0.15,
"grad_norm": 0.18279613554477692,
"learning_rate": 1.9980967508776486e-05,
"loss": 1.2838,
"step": 88
},
{
"epoch": 0.15,
"grad_norm": 0.26361390948295593,
"learning_rate": 1.997983186838373e-05,
"loss": 1.4331,
"step": 89
},
{
"epoch": 0.15,
"grad_norm": 0.2679239511489868,
"learning_rate": 1.9978663355795167e-05,
"loss": 1.339,
"step": 90
},
{
"epoch": 0.15,
"grad_norm": 0.1776888072490692,
"learning_rate": 1.997746197485972e-05,
"loss": 1.3149,
"step": 91
},
{
"epoch": 0.15,
"grad_norm": 0.19774815440177917,
"learning_rate": 1.997622772953457e-05,
"loss": 1.3608,
"step": 92
},
{
"epoch": 0.16,
"grad_norm": 0.1756751984357834,
"learning_rate": 1.9974960623885158e-05,
"loss": 1.3813,
"step": 93
},
{
"epoch": 0.16,
"grad_norm": 0.17813633382320404,
"learning_rate": 1.9973660662085152e-05,
"loss": 1.3573,
"step": 94
},
{
"epoch": 0.16,
"grad_norm": 0.17685261368751526,
"learning_rate": 1.9972327848416442e-05,
"loss": 1.318,
"step": 95
},
{
"epoch": 0.16,
"grad_norm": 0.16881057620048523,
"learning_rate": 1.9970962187269144e-05,
"loss": 1.3384,
"step": 96
},
{
"epoch": 0.16,
"grad_norm": 0.1850956529378891,
"learning_rate": 1.996956368314155e-05,
"loss": 1.3708,
"step": 97
},
{
"epoch": 0.16,
"grad_norm": 0.1764199137687683,
"learning_rate": 1.996813234064014e-05,
"loss": 1.3025,
"step": 98
},
{
"epoch": 0.17,
"grad_norm": 0.1872323602437973,
"learning_rate": 1.9966668164479567e-05,
"loss": 1.3671,
"step": 99
},
{
"epoch": 0.17,
"grad_norm": 0.15457874536514282,
"learning_rate": 1.9965171159482623e-05,
"loss": 1.3341,
"step": 100
},
{
"epoch": 0.17,
"grad_norm": 0.18440444767475128,
"learning_rate": 1.9963641330580237e-05,
"loss": 1.3263,
"step": 101
},
{
"epoch": 0.17,
"grad_norm": 0.17000797390937805,
"learning_rate": 1.996207868281145e-05,
"loss": 1.3522,
"step": 102
},
{
"epoch": 0.17,
"grad_norm": 0.1682623326778412,
"learning_rate": 1.9960483221323417e-05,
"loss": 1.3363,
"step": 103
},
{
"epoch": 0.17,
"grad_norm": 0.16398635506629944,
"learning_rate": 1.9958854951371364e-05,
"loss": 1.297,
"step": 104
},
{
"epoch": 0.18,
"grad_norm": 0.16423435509204865,
"learning_rate": 1.9957193878318594e-05,
"loss": 1.3561,
"step": 105
},
{
"epoch": 0.18,
"grad_norm": 0.16171465814113617,
"learning_rate": 1.9955500007636445e-05,
"loss": 1.4085,
"step": 106
},
{
"epoch": 0.18,
"grad_norm": 0.17358383536338806,
"learning_rate": 1.9953773344904303e-05,
"loss": 1.3456,
"step": 107
},
{
"epoch": 0.18,
"grad_norm": 0.16697920858860016,
"learning_rate": 1.995201389580955e-05,
"loss": 1.2939,
"step": 108
},
{
"epoch": 0.18,
"grad_norm": 0.17273786664009094,
"learning_rate": 1.995022166614758e-05,
"loss": 1.2898,
"step": 109
},
{
"epoch": 0.18,
"grad_norm": 0.15889504551887512,
"learning_rate": 1.9948396661821742e-05,
"loss": 1.341,
"step": 110
},
{
"epoch": 0.19,
"grad_norm": 0.1592029333114624,
"learning_rate": 1.994653888884335e-05,
"loss": 1.3076,
"step": 111
},
{
"epoch": 0.19,
"grad_norm": 0.21797503530979156,
"learning_rate": 1.9944648353331663e-05,
"loss": 1.3441,
"step": 112
},
{
"epoch": 0.19,
"grad_norm": 0.17762970924377441,
"learning_rate": 1.9942725061513833e-05,
"loss": 1.3432,
"step": 113
},
{
"epoch": 0.19,
"grad_norm": 0.1651773303747177,
"learning_rate": 1.9940769019724926e-05,
"loss": 1.3499,
"step": 114
},
{
"epoch": 0.19,
"grad_norm": 0.1721709966659546,
"learning_rate": 1.9938780234407876e-05,
"loss": 1.361,
"step": 115
},
{
"epoch": 0.19,
"grad_norm": 0.16519513726234436,
"learning_rate": 1.9936758712113464e-05,
"loss": 1.3823,
"step": 116
},
{
"epoch": 0.2,
"grad_norm": 0.17121821641921997,
"learning_rate": 1.9934704459500312e-05,
"loss": 1.378,
"step": 117
},
{
"epoch": 0.2,
"grad_norm": 0.1555752456188202,
"learning_rate": 1.993261748333484e-05,
"loss": 1.2965,
"step": 118
},
{
"epoch": 0.2,
"grad_norm": 0.15821285545825958,
"learning_rate": 1.9930497790491265e-05,
"loss": 1.3857,
"step": 119
},
{
"epoch": 0.2,
"grad_norm": 0.1656782180070877,
"learning_rate": 1.9928345387951563e-05,
"loss": 1.3373,
"step": 120
},
{
"epoch": 0.2,
"grad_norm": 0.19061700999736786,
"learning_rate": 1.9926160282805456e-05,
"loss": 1.4497,
"step": 121
},
{
"epoch": 0.2,
"grad_norm": 0.16132259368896484,
"learning_rate": 1.9923942482250374e-05,
"loss": 1.3188,
"step": 122
},
{
"epoch": 0.21,
"grad_norm": 0.16741512715816498,
"learning_rate": 1.9921691993591455e-05,
"loss": 1.4353,
"step": 123
},
{
"epoch": 0.21,
"grad_norm": 0.1629590541124344,
"learning_rate": 1.991940882424149e-05,
"loss": 1.4078,
"step": 124
},
{
"epoch": 0.21,
"grad_norm": 0.158173069357872,
"learning_rate": 1.9917092981720934e-05,
"loss": 1.3877,
"step": 125
},
{
"epoch": 0.21,
"grad_norm": 0.15278945863246918,
"learning_rate": 1.991474447365785e-05,
"loss": 1.2866,
"step": 126
},
{
"epoch": 0.21,
"grad_norm": 0.15629614889621735,
"learning_rate": 1.9912363307787902e-05,
"loss": 1.3685,
"step": 127
},
{
"epoch": 0.22,
"grad_norm": 0.1634289026260376,
"learning_rate": 1.9909949491954325e-05,
"loss": 1.3361,
"step": 128
},
{
"epoch": 0.22,
"grad_norm": 0.1760614961385727,
"learning_rate": 1.9907503034107893e-05,
"loss": 1.3366,
"step": 129
},
{
"epoch": 0.22,
"grad_norm": 0.16594205796718597,
"learning_rate": 1.99050239423069e-05,
"loss": 1.3213,
"step": 130
},
{
"epoch": 0.22,
"grad_norm": 0.16287142038345337,
"learning_rate": 1.9902512224717147e-05,
"loss": 1.3046,
"step": 131
},
{
"epoch": 0.22,
"grad_norm": 0.16176696121692657,
"learning_rate": 1.9899967889611876e-05,
"loss": 1.3995,
"step": 132
},
{
"epoch": 0.22,
"grad_norm": 0.15077176690101624,
"learning_rate": 1.9897390945371778e-05,
"loss": 1.2764,
"step": 133
},
{
"epoch": 0.23,
"grad_norm": 0.17957943677902222,
"learning_rate": 1.9894781400484956e-05,
"loss": 1.3251,
"step": 134
},
{
"epoch": 0.23,
"grad_norm": 0.15552657842636108,
"learning_rate": 1.989213926354689e-05,
"loss": 1.2835,
"step": 135
},
{
"epoch": 0.23,
"grad_norm": 0.1609240621328354,
"learning_rate": 1.988946454326042e-05,
"loss": 1.3284,
"step": 136
},
{
"epoch": 0.23,
"grad_norm": 0.18385426700115204,
"learning_rate": 1.9886757248435705e-05,
"loss": 1.3721,
"step": 137
},
{
"epoch": 0.23,
"grad_norm": 0.14970523118972778,
"learning_rate": 1.9884017387990205e-05,
"loss": 1.2491,
"step": 138
},
{
"epoch": 0.23,
"grad_norm": 0.1609608381986618,
"learning_rate": 1.988124497094864e-05,
"loss": 1.4012,
"step": 139
},
{
"epoch": 0.24,
"grad_norm": 0.1574031412601471,
"learning_rate": 1.9878440006442972e-05,
"loss": 1.3694,
"step": 140
},
{
"epoch": 0.24,
"grad_norm": 0.15436096489429474,
"learning_rate": 1.987560250371237e-05,
"loss": 1.3719,
"step": 141
},
{
"epoch": 0.24,
"grad_norm": 0.15626446902751923,
"learning_rate": 1.987273247210318e-05,
"loss": 1.2816,
"step": 142
},
{
"epoch": 0.24,
"grad_norm": 0.15678633749485016,
"learning_rate": 1.9869829921068892e-05,
"loss": 1.294,
"step": 143
},
{
"epoch": 0.24,
"grad_norm": 0.16196396946907043,
"learning_rate": 1.9866894860170104e-05,
"loss": 1.3519,
"step": 144
},
{
"epoch": 0.24,
"grad_norm": 0.16082963347434998,
"learning_rate": 1.986392729907451e-05,
"loss": 1.2813,
"step": 145
},
{
"epoch": 0.25,
"grad_norm": 0.17131298780441284,
"learning_rate": 1.9860927247556846e-05,
"loss": 1.333,
"step": 146
},
{
"epoch": 0.25,
"grad_norm": 0.14682374894618988,
"learning_rate": 1.9857894715498873e-05,
"loss": 1.3217,
"step": 147
},
{
"epoch": 0.25,
"grad_norm": 0.1597621738910675,
"learning_rate": 1.9854829712889333e-05,
"loss": 1.3088,
"step": 148
},
{
"epoch": 0.25,
"grad_norm": 0.16332492232322693,
"learning_rate": 1.9851732249823924e-05,
"loss": 1.3484,
"step": 149
},
{
"epoch": 0.25,
"grad_norm": 0.15406523644924164,
"learning_rate": 1.9848602336505262e-05,
"loss": 1.3402,
"step": 150
},
{
"epoch": 0.25,
"grad_norm": 0.14765764772891998,
"learning_rate": 1.9845439983242857e-05,
"loss": 1.3036,
"step": 151
},
{
"epoch": 0.26,
"grad_norm": 0.14781777560710907,
"learning_rate": 1.984224520045306e-05,
"loss": 1.2671,
"step": 152
},
{
"epoch": 0.26,
"grad_norm": 0.1507379412651062,
"learning_rate": 1.983901799865905e-05,
"loss": 1.3549,
"step": 153
},
{
"epoch": 0.26,
"grad_norm": 0.16611751914024353,
"learning_rate": 1.9835758388490783e-05,
"loss": 1.3412,
"step": 154
},
{
"epoch": 0.26,
"grad_norm": 0.1613897681236267,
"learning_rate": 1.9832466380684976e-05,
"loss": 1.3356,
"step": 155
},
{
"epoch": 0.26,
"grad_norm": 0.15234436094760895,
"learning_rate": 1.9829141986085046e-05,
"loss": 1.3136,
"step": 156
},
{
"epoch": 0.26,
"grad_norm": 0.15218184888362885,
"learning_rate": 1.9825785215641088e-05,
"loss": 1.3919,
"step": 157
},
{
"epoch": 0.27,
"grad_norm": 0.14976489543914795,
"learning_rate": 1.9822396080409848e-05,
"loss": 1.3189,
"step": 158
},
{
"epoch": 0.27,
"grad_norm": 0.16917568445205688,
"learning_rate": 1.9818974591554668e-05,
"loss": 1.3115,
"step": 159
},
{
"epoch": 0.27,
"grad_norm": 0.16448311507701874,
"learning_rate": 1.981552076034547e-05,
"loss": 1.3304,
"step": 160
},
{
"epoch": 0.27,
"grad_norm": 0.17942431569099426,
"learning_rate": 1.981203459815869e-05,
"loss": 1.3738,
"step": 161
},
{
"epoch": 0.27,
"grad_norm": 0.15682873129844666,
"learning_rate": 1.9808516116477278e-05,
"loss": 1.3997,
"step": 162
},
{
"epoch": 0.27,
"grad_norm": 0.15899716317653656,
"learning_rate": 1.980496532689062e-05,
"loss": 1.3303,
"step": 163
},
{
"epoch": 0.28,
"grad_norm": 0.15109188854694366,
"learning_rate": 1.9801382241094532e-05,
"loss": 1.3162,
"step": 164
},
{
"epoch": 0.28,
"grad_norm": 0.15101797878742218,
"learning_rate": 1.979776687089121e-05,
"loss": 1.2961,
"step": 165
},
{
"epoch": 0.28,
"grad_norm": 0.15570296347141266,
"learning_rate": 1.9794119228189185e-05,
"loss": 1.3783,
"step": 166
},
{
"epoch": 0.28,
"grad_norm": 0.15653923153877258,
"learning_rate": 1.979043932500329e-05,
"loss": 1.3272,
"step": 167
},
{
"epoch": 0.28,
"grad_norm": 0.1806183159351349,
"learning_rate": 1.9786727173454623e-05,
"loss": 1.353,
"step": 168
},
{
"epoch": 0.28,
"grad_norm": 0.16590842604637146,
"learning_rate": 1.9782982785770497e-05,
"loss": 1.3478,
"step": 169
},
{
"epoch": 0.29,
"grad_norm": 0.15598629415035248,
"learning_rate": 1.9779206174284417e-05,
"loss": 1.3925,
"step": 170
},
{
"epoch": 0.29,
"grad_norm": 0.15137127041816711,
"learning_rate": 1.977539735143602e-05,
"loss": 1.3713,
"step": 171
},
{
"epoch": 0.29,
"grad_norm": 0.1489367038011551,
"learning_rate": 1.9771556329771043e-05,
"loss": 1.2586,
"step": 172
},
{
"epoch": 0.29,
"grad_norm": 0.17419755458831787,
"learning_rate": 1.976768312194129e-05,
"loss": 1.3376,
"step": 173
},
{
"epoch": 0.29,
"grad_norm": 0.15900814533233643,
"learning_rate": 1.9763777740704572e-05,
"loss": 1.2978,
"step": 174
},
{
"epoch": 0.29,
"grad_norm": 0.16462573409080505,
"learning_rate": 1.9759840198924674e-05,
"loss": 1.3108,
"step": 175
},
{
"epoch": 0.3,
"grad_norm": 0.3415735960006714,
"learning_rate": 1.9755870509571324e-05,
"loss": 1.2359,
"step": 176
},
{
"epoch": 0.3,
"grad_norm": 0.1518816500902176,
"learning_rate": 1.9751868685720136e-05,
"loss": 1.3125,
"step": 177
},
{
"epoch": 0.3,
"grad_norm": 0.15554927289485931,
"learning_rate": 1.974783474055256e-05,
"loss": 1.263,
"step": 178
},
{
"epoch": 0.3,
"grad_norm": 0.16101278364658356,
"learning_rate": 1.974376868735586e-05,
"loss": 1.3046,
"step": 179
},
{
"epoch": 0.3,
"grad_norm": 0.17000462114810944,
"learning_rate": 1.9739670539523065e-05,
"loss": 1.3244,
"step": 180
},
{
"epoch": 0.3,
"grad_norm": 0.1521628350019455,
"learning_rate": 1.97355403105529e-05,
"loss": 1.3207,
"step": 181
},
{
"epoch": 0.31,
"grad_norm": 0.18179717659950256,
"learning_rate": 1.973137801404978e-05,
"loss": 1.3671,
"step": 182
},
{
"epoch": 0.31,
"grad_norm": 0.16239362955093384,
"learning_rate": 1.972718366372373e-05,
"loss": 1.3399,
"step": 183
},
{
"epoch": 0.31,
"grad_norm": 0.7205573916435242,
"learning_rate": 1.9722957273390377e-05,
"loss": 1.3471,
"step": 184
},
{
"epoch": 0.31,
"grad_norm": 0.193926602602005,
"learning_rate": 1.9718698856970855e-05,
"loss": 1.3089,
"step": 185
},
{
"epoch": 0.31,
"grad_norm": 0.16461925208568573,
"learning_rate": 1.9714408428491817e-05,
"loss": 1.2604,
"step": 186
},
{
"epoch": 0.31,
"grad_norm": 0.15286822617053986,
"learning_rate": 1.9710086002085335e-05,
"loss": 1.3008,
"step": 187
},
{
"epoch": 0.32,
"grad_norm": 0.169696643948555,
"learning_rate": 1.9705731591988892e-05,
"loss": 1.365,
"step": 188
},
{
"epoch": 0.32,
"grad_norm": 0.15688490867614746,
"learning_rate": 1.970134521254532e-05,
"loss": 1.3128,
"step": 189
},
{
"epoch": 0.32,
"grad_norm": 0.15926450490951538,
"learning_rate": 1.9696926878202746e-05,
"loss": 1.2673,
"step": 190
},
{
"epoch": 0.32,
"grad_norm": 0.16111700236797333,
"learning_rate": 1.9692476603514555e-05,
"loss": 1.372,
"step": 191
},
{
"epoch": 0.32,
"grad_norm": 0.651131272315979,
"learning_rate": 1.9687994403139347e-05,
"loss": 1.3098,
"step": 192
},
{
"epoch": 0.32,
"grad_norm": 0.17667540907859802,
"learning_rate": 1.968348029184087e-05,
"loss": 1.3809,
"step": 193
},
{
"epoch": 0.33,
"grad_norm": 0.1771472841501236,
"learning_rate": 1.9678934284487988e-05,
"loss": 1.373,
"step": 194
},
{
"epoch": 0.33,
"grad_norm": 0.14985056221485138,
"learning_rate": 1.9674356396054624e-05,
"loss": 1.2685,
"step": 195
},
{
"epoch": 0.33,
"grad_norm": 0.15360082685947418,
"learning_rate": 1.9669746641619717e-05,
"loss": 1.3862,
"step": 196
},
{
"epoch": 0.33,
"grad_norm": 0.15841621160507202,
"learning_rate": 1.966510503636716e-05,
"loss": 1.3033,
"step": 197
},
{
"epoch": 0.33,
"grad_norm": 0.1631457805633545,
"learning_rate": 1.9660431595585773e-05,
"loss": 1.3405,
"step": 198
},
{
"epoch": 0.33,
"grad_norm": 0.15417036414146423,
"learning_rate": 1.9655726334669216e-05,
"loss": 1.3001,
"step": 199
},
{
"epoch": 0.34,
"grad_norm": 0.1631031632423401,
"learning_rate": 1.965098926911598e-05,
"loss": 1.3334,
"step": 200
},
{
"epoch": 0.34,
"grad_norm": 0.15555128455162048,
"learning_rate": 1.9646220414529305e-05,
"loss": 1.338,
"step": 201
},
{
"epoch": 0.34,
"grad_norm": 0.16267068684101105,
"learning_rate": 1.9641419786617143e-05,
"loss": 1.3814,
"step": 202
},
{
"epoch": 0.34,
"grad_norm": 0.15920913219451904,
"learning_rate": 1.963658740119211e-05,
"loss": 1.3267,
"step": 203
},
{
"epoch": 0.34,
"grad_norm": 0.15759576857089996,
"learning_rate": 1.9631723274171412e-05,
"loss": 1.3323,
"step": 204
},
{
"epoch": 0.34,
"grad_norm": 0.15223319828510284,
"learning_rate": 1.9626827421576816e-05,
"loss": 1.3074,
"step": 205
},
{
"epoch": 0.35,
"grad_norm": 0.16610486805438995,
"learning_rate": 1.9621899859534594e-05,
"loss": 1.3468,
"step": 206
},
{
"epoch": 0.35,
"grad_norm": 0.1576014757156372,
"learning_rate": 1.9616940604275454e-05,
"loss": 1.3221,
"step": 207
},
{
"epoch": 0.35,
"grad_norm": 0.15478689968585968,
"learning_rate": 1.9611949672134506e-05,
"loss": 1.3689,
"step": 208
},
{
"epoch": 0.35,
"grad_norm": 0.16408346593379974,
"learning_rate": 1.9606927079551187e-05,
"loss": 1.3231,
"step": 209
},
{
"epoch": 0.35,
"grad_norm": 0.14498670399188995,
"learning_rate": 1.9601872843069236e-05,
"loss": 1.2924,
"step": 210
},
{
"epoch": 0.35,
"grad_norm": 0.15980365872383118,
"learning_rate": 1.959678697933661e-05,
"loss": 1.3131,
"step": 211
},
{
"epoch": 0.36,
"grad_norm": 0.15579000115394592,
"learning_rate": 1.9591669505105448e-05,
"loss": 1.2685,
"step": 212
},
{
"epoch": 0.36,
"grad_norm": 0.15318329632282257,
"learning_rate": 1.9586520437232e-05,
"loss": 1.4043,
"step": 213
},
{
"epoch": 0.36,
"grad_norm": 0.15656523406505585,
"learning_rate": 1.9581339792676602e-05,
"loss": 1.371,
"step": 214
},
{
"epoch": 0.36,
"grad_norm": 0.15681013464927673,
"learning_rate": 1.957612758850357e-05,
"loss": 1.3142,
"step": 215
},
{
"epoch": 0.36,
"grad_norm": 0.1462039202451706,
"learning_rate": 1.9570883841881204e-05,
"loss": 1.3148,
"step": 216
},
{
"epoch": 0.36,
"grad_norm": 0.17131565511226654,
"learning_rate": 1.9565608570081674e-05,
"loss": 1.3454,
"step": 217
},
{
"epoch": 0.37,
"grad_norm": 0.15185542404651642,
"learning_rate": 1.9560301790481005e-05,
"loss": 1.2898,
"step": 218
},
{
"epoch": 0.37,
"grad_norm": 0.16285483539104462,
"learning_rate": 1.9554963520559003e-05,
"loss": 1.3785,
"step": 219
},
{
"epoch": 0.37,
"grad_norm": 0.15012206137180328,
"learning_rate": 1.9549593777899193e-05,
"loss": 1.2226,
"step": 220
},
{
"epoch": 0.37,
"grad_norm": 0.15580691397190094,
"learning_rate": 1.9544192580188766e-05,
"loss": 1.2281,
"step": 221
},
{
"epoch": 0.37,
"grad_norm": 0.18193548917770386,
"learning_rate": 1.9538759945218534e-05,
"loss": 1.2269,
"step": 222
},
{
"epoch": 0.37,
"grad_norm": 0.14431047439575195,
"learning_rate": 1.9533295890882848e-05,
"loss": 1.3628,
"step": 223
},
{
"epoch": 0.38,
"grad_norm": 0.155436709523201,
"learning_rate": 1.9527800435179548e-05,
"loss": 1.3569,
"step": 224
},
{
"epoch": 0.38,
"grad_norm": 0.15050619840621948,
"learning_rate": 1.952227359620992e-05,
"loss": 1.3103,
"step": 225
},
{
"epoch": 0.38,
"grad_norm": 0.18012791872024536,
"learning_rate": 1.95167153921786e-05,
"loss": 1.3798,
"step": 226
},
{
"epoch": 0.38,
"grad_norm": 0.14088086783885956,
"learning_rate": 1.951112584139356e-05,
"loss": 1.2466,
"step": 227
},
{
"epoch": 0.38,
"grad_norm": 0.17008265852928162,
"learning_rate": 1.9505504962266005e-05,
"loss": 1.3097,
"step": 228
},
{
"epoch": 0.38,
"grad_norm": 0.17221464216709137,
"learning_rate": 1.9499852773310345e-05,
"loss": 1.3605,
"step": 229
},
{
"epoch": 0.39,
"grad_norm": 0.15964262187480927,
"learning_rate": 1.9494169293144106e-05,
"loss": 1.3642,
"step": 230
},
{
"epoch": 0.39,
"grad_norm": 0.15858659148216248,
"learning_rate": 1.94884545404879e-05,
"loss": 1.3011,
"step": 231
},
{
"epoch": 0.39,
"grad_norm": 0.15182028710842133,
"learning_rate": 1.9482708534165337e-05,
"loss": 1.3169,
"step": 232
},
{
"epoch": 0.39,
"grad_norm": 0.17947536706924438,
"learning_rate": 1.947693129310297e-05,
"loss": 1.3268,
"step": 233
},
{
"epoch": 0.39,
"grad_norm": 0.1559893637895584,
"learning_rate": 1.9471122836330236e-05,
"loss": 1.3257,
"step": 234
},
{
"epoch": 0.39,
"grad_norm": 0.14727315306663513,
"learning_rate": 1.9465283182979396e-05,
"loss": 1.3423,
"step": 235
},
{
"epoch": 0.4,
"grad_norm": 0.15041925013065338,
"learning_rate": 1.9459412352285467e-05,
"loss": 1.2961,
"step": 236
},
{
"epoch": 0.4,
"grad_norm": 0.15340714156627655,
"learning_rate": 1.9453510363586155e-05,
"loss": 1.3252,
"step": 237
},
{
"epoch": 0.4,
"grad_norm": 0.1636042594909668,
"learning_rate": 1.9447577236321806e-05,
"loss": 1.3028,
"step": 238
},
{
"epoch": 0.4,
"grad_norm": 0.15641403198242188,
"learning_rate": 1.9441612990035324e-05,
"loss": 1.3845,
"step": 239
},
{
"epoch": 0.4,
"grad_norm": 0.1722465455532074,
"learning_rate": 1.943561764437212e-05,
"loss": 1.3624,
"step": 240
},
{
"epoch": 0.4,
"grad_norm": 0.1619051992893219,
"learning_rate": 1.9429591219080033e-05,
"loss": 1.3246,
"step": 241
},
{
"epoch": 0.41,
"grad_norm": 0.14947140216827393,
"learning_rate": 1.9423533734009287e-05,
"loss": 1.3551,
"step": 242
},
{
"epoch": 0.41,
"grad_norm": 0.18179036676883698,
"learning_rate": 1.94174452091124e-05,
"loss": 1.3495,
"step": 243
},
{
"epoch": 0.41,
"grad_norm": 0.15705548226833344,
"learning_rate": 1.9411325664444143e-05,
"loss": 1.2849,
"step": 244
},
{
"epoch": 0.41,
"grad_norm": 0.16060465574264526,
"learning_rate": 1.9405175120161453e-05,
"loss": 1.3548,
"step": 245
},
{
"epoch": 0.41,
"grad_norm": 0.16014854609966278,
"learning_rate": 1.939899359652338e-05,
"loss": 1.2388,
"step": 246
},
{
"epoch": 0.41,
"grad_norm": 0.1549297571182251,
"learning_rate": 1.939278111389101e-05,
"loss": 1.4098,
"step": 247
},
{
"epoch": 0.42,
"grad_norm": 0.15960820019245148,
"learning_rate": 1.938653769272741e-05,
"loss": 1.4058,
"step": 248
},
{
"epoch": 0.42,
"grad_norm": 0.2083292156457901,
"learning_rate": 1.9380263353597553e-05,
"loss": 1.3557,
"step": 249
},
{
"epoch": 0.42,
"grad_norm": 0.1558627188205719,
"learning_rate": 1.937395811716825e-05,
"loss": 1.3245,
"step": 250
},
{
"epoch": 0.42,
"grad_norm": 0.17779141664505005,
"learning_rate": 1.936762200420808e-05,
"loss": 1.2765,
"step": 251
},
{
"epoch": 0.42,
"grad_norm": 0.15625056624412537,
"learning_rate": 1.9361255035587338e-05,
"loss": 1.3106,
"step": 252
},
{
"epoch": 0.43,
"grad_norm": 0.15355145931243896,
"learning_rate": 1.9354857232277938e-05,
"loss": 1.3444,
"step": 253
},
{
"epoch": 0.43,
"grad_norm": 0.16163015365600586,
"learning_rate": 1.934842861535337e-05,
"loss": 1.3189,
"step": 254
},
{
"epoch": 0.43,
"grad_norm": 0.16402465105056763,
"learning_rate": 1.9341969205988605e-05,
"loss": 1.3453,
"step": 255
},
{
"epoch": 0.43,
"grad_norm": 0.1472061276435852,
"learning_rate": 1.933547902546006e-05,
"loss": 1.3715,
"step": 256
},
{
"epoch": 0.43,
"grad_norm": 0.19225962460041046,
"learning_rate": 1.93289580951455e-05,
"loss": 1.2784,
"step": 257
},
{
"epoch": 0.43,
"grad_norm": 0.15546953678131104,
"learning_rate": 1.9322406436523965e-05,
"loss": 1.3568,
"step": 258
},
{
"epoch": 0.44,
"grad_norm": 0.1593058705329895,
"learning_rate": 1.931582407117573e-05,
"loss": 1.3299,
"step": 259
},
{
"epoch": 0.44,
"grad_norm": 0.15713098645210266,
"learning_rate": 1.9309211020782192e-05,
"loss": 1.3397,
"step": 260
},
{
"epoch": 0.44,
"grad_norm": 0.152445986866951,
"learning_rate": 1.9302567307125836e-05,
"loss": 1.3449,
"step": 261
},
{
"epoch": 0.44,
"grad_norm": 0.15717758238315582,
"learning_rate": 1.9295892952090143e-05,
"loss": 1.3241,
"step": 262
},
{
"epoch": 0.44,
"grad_norm": 0.1531231552362442,
"learning_rate": 1.928918797765952e-05,
"loss": 1.3516,
"step": 263
},
{
"epoch": 0.44,
"grad_norm": 0.15562032163143158,
"learning_rate": 1.9282452405919235e-05,
"loss": 1.343,
"step": 264
},
{
"epoch": 0.45,
"grad_norm": 0.15916050970554352,
"learning_rate": 1.9275686259055334e-05,
"loss": 1.3511,
"step": 265
},
{
"epoch": 0.45,
"grad_norm": 0.17035844922065735,
"learning_rate": 1.9268889559354576e-05,
"loss": 1.3714,
"step": 266
},
{
"epoch": 0.45,
"grad_norm": 0.1496887505054474,
"learning_rate": 1.9262062329204355e-05,
"loss": 1.3194,
"step": 267
},
{
"epoch": 0.45,
"grad_norm": 0.1466931402683258,
"learning_rate": 1.9255204591092634e-05,
"loss": 1.3235,
"step": 268
},
{
"epoch": 0.45,
"grad_norm": 0.1472676545381546,
"learning_rate": 1.9248316367607858e-05,
"loss": 1.2518,
"step": 269
},
{
"epoch": 0.45,
"grad_norm": 0.16347859799861908,
"learning_rate": 1.9241397681438887e-05,
"loss": 1.3375,
"step": 270
},
{
"epoch": 0.46,
"grad_norm": 0.1519639641046524,
"learning_rate": 1.923444855537493e-05,
"loss": 1.3081,
"step": 271
},
{
"epoch": 0.46,
"grad_norm": 0.15689344704151154,
"learning_rate": 1.9227469012305448e-05,
"loss": 1.3512,
"step": 272
},
{
"epoch": 0.46,
"grad_norm": 0.15142163634300232,
"learning_rate": 1.9220459075220095e-05,
"loss": 1.3023,
"step": 273
},
{
"epoch": 0.46,
"grad_norm": 0.1454487144947052,
"learning_rate": 1.9213418767208648e-05,
"loss": 1.3305,
"step": 274
},
{
"epoch": 0.46,
"grad_norm": 0.15581363439559937,
"learning_rate": 1.9206348111460914e-05,
"loss": 1.3362,
"step": 275
},
{
"epoch": 0.46,
"grad_norm": 0.1467771828174591,
"learning_rate": 1.919924713126666e-05,
"loss": 1.2882,
"step": 276
},
{
"epoch": 0.47,
"grad_norm": 0.1589370220899582,
"learning_rate": 1.919211585001554e-05,
"loss": 1.3969,
"step": 277
},
{
"epoch": 0.47,
"grad_norm": 0.1439632922410965,
"learning_rate": 1.9184954291197013e-05,
"loss": 1.3405,
"step": 278
},
{
"epoch": 0.47,
"grad_norm": 0.14654165506362915,
"learning_rate": 1.9177762478400276e-05,
"loss": 1.3628,
"step": 279
},
{
"epoch": 0.47,
"grad_norm": 0.15207156538963318,
"learning_rate": 1.9170540435314167e-05,
"loss": 1.3192,
"step": 280
},
{
"epoch": 0.47,
"grad_norm": 0.1550934612751007,
"learning_rate": 1.916328818572711e-05,
"loss": 1.2997,
"step": 281
},
{
"epoch": 0.47,
"grad_norm": 0.15896658599376678,
"learning_rate": 1.9156005753527013e-05,
"loss": 1.313,
"step": 282
},
{
"epoch": 0.48,
"grad_norm": 0.1461910903453827,
"learning_rate": 1.914869316270121e-05,
"loss": 1.2686,
"step": 283
},
{
"epoch": 0.48,
"grad_norm": 0.2558024823665619,
"learning_rate": 1.9141350437336374e-05,
"loss": 1.3308,
"step": 284
},
{
"epoch": 0.48,
"grad_norm": 0.16172054409980774,
"learning_rate": 1.9133977601618433e-05,
"loss": 1.3029,
"step": 285
},
{
"epoch": 0.48,
"grad_norm": 0.143438458442688,
"learning_rate": 1.912657467983249e-05,
"loss": 1.3168,
"step": 286
},
{
"epoch": 0.48,
"grad_norm": 0.1616157591342926,
"learning_rate": 1.911914169636276e-05,
"loss": 1.3542,
"step": 287
},
{
"epoch": 0.48,
"grad_norm": 0.1510392427444458,
"learning_rate": 1.9111678675692468e-05,
"loss": 1.2391,
"step": 288
},
{
"epoch": 0.49,
"grad_norm": 0.15160228312015533,
"learning_rate": 1.9104185642403775e-05,
"loss": 1.3301,
"step": 289
},
{
"epoch": 0.49,
"grad_norm": 0.15298177301883698,
"learning_rate": 1.909666262117771e-05,
"loss": 1.3385,
"step": 290
},
{
"epoch": 0.49,
"grad_norm": 0.14593380689620972,
"learning_rate": 1.908910963679407e-05,
"loss": 1.3486,
"step": 291
},
{
"epoch": 0.49,
"grad_norm": 0.23284722864627838,
"learning_rate": 1.9081526714131345e-05,
"loss": 1.3028,
"step": 292
},
{
"epoch": 0.49,
"grad_norm": 0.1577797383069992,
"learning_rate": 1.9073913878166642e-05,
"loss": 1.2827,
"step": 293
},
{
"epoch": 0.49,
"grad_norm": 0.15135832130908966,
"learning_rate": 1.9066271153975602e-05,
"loss": 1.3179,
"step": 294
},
{
"epoch": 0.5,
"grad_norm": 0.1568109542131424,
"learning_rate": 1.90585985667323e-05,
"loss": 1.2906,
"step": 295
},
{
"epoch": 0.5,
"grad_norm": 0.1509489119052887,
"learning_rate": 1.9050896141709198e-05,
"loss": 1.3785,
"step": 296
},
{
"epoch": 0.5,
"grad_norm": 0.14765121042728424,
"learning_rate": 1.904316390427702e-05,
"loss": 1.2586,
"step": 297
},
{
"epoch": 0.5,
"grad_norm": 0.14666473865509033,
"learning_rate": 1.9035401879904687e-05,
"loss": 1.3265,
"step": 298
},
{
"epoch": 0.5,
"grad_norm": 0.1482343077659607,
"learning_rate": 1.902761009415925e-05,
"loss": 1.3288,
"step": 299
},
{
"epoch": 0.5,
"grad_norm": 0.15392710268497467,
"learning_rate": 1.901978857270578e-05,
"loss": 1.3739,
"step": 300
},
{
"epoch": 0.51,
"grad_norm": 0.15929271280765533,
"learning_rate": 1.9011937341307292e-05,
"loss": 1.4441,
"step": 301
},
{
"epoch": 0.51,
"grad_norm": 0.15494123101234436,
"learning_rate": 1.9004056425824672e-05,
"loss": 1.2291,
"step": 302
},
{
"epoch": 0.51,
"grad_norm": 0.15912345051765442,
"learning_rate": 1.899614585221656e-05,
"loss": 1.3614,
"step": 303
},
{
"epoch": 0.51,
"grad_norm": 0.14854875206947327,
"learning_rate": 1.898820564653931e-05,
"loss": 1.3281,
"step": 304
},
{
"epoch": 0.51,
"grad_norm": 0.1518874615430832,
"learning_rate": 1.898023583494687e-05,
"loss": 1.3495,
"step": 305
},
{
"epoch": 0.51,
"grad_norm": 0.15471160411834717,
"learning_rate": 1.8972236443690694e-05,
"loss": 1.3025,
"step": 306
},
{
"epoch": 0.52,
"grad_norm": 0.14632518589496613,
"learning_rate": 1.8964207499119686e-05,
"loss": 1.2828,
"step": 307
},
{
"epoch": 0.52,
"grad_norm": 0.14436408877372742,
"learning_rate": 1.8956149027680085e-05,
"loss": 1.3215,
"step": 308
},
{
"epoch": 0.52,
"grad_norm": 0.1432180106639862,
"learning_rate": 1.8948061055915395e-05,
"loss": 1.3367,
"step": 309
},
{
"epoch": 0.52,
"grad_norm": 0.15100084245204926,
"learning_rate": 1.893994361046628e-05,
"loss": 1.3321,
"step": 310
},
{
"epoch": 0.52,
"grad_norm": 0.1476551741361618,
"learning_rate": 1.8931796718070487e-05,
"loss": 1.3087,
"step": 311
},
{
"epoch": 0.52,
"grad_norm": 0.15668947994709015,
"learning_rate": 1.8923620405562768e-05,
"loss": 1.4232,
"step": 312
},
{
"epoch": 0.53,
"grad_norm": 0.14729134738445282,
"learning_rate": 1.891541469987477e-05,
"loss": 1.3333,
"step": 313
},
{
"epoch": 0.53,
"grad_norm": 0.15708036720752716,
"learning_rate": 1.8907179628034957e-05,
"loss": 1.2837,
"step": 314
},
{
"epoch": 0.53,
"grad_norm": 0.14895762503147125,
"learning_rate": 1.8898915217168528e-05,
"loss": 1.3339,
"step": 315
},
{
"epoch": 0.53,
"grad_norm": 0.16167424619197845,
"learning_rate": 1.8890621494497322e-05,
"loss": 1.357,
"step": 316
},
{
"epoch": 0.53,
"grad_norm": 0.1540985405445099,
"learning_rate": 1.888229848733972e-05,
"loss": 1.3907,
"step": 317
},
{
"epoch": 0.53,
"grad_norm": 0.16398704051971436,
"learning_rate": 1.8873946223110564e-05,
"loss": 1.3349,
"step": 318
},
{
"epoch": 0.54,
"grad_norm": 0.15464505553245544,
"learning_rate": 1.8865564729321062e-05,
"loss": 1.2991,
"step": 319
},
{
"epoch": 0.54,
"grad_norm": 0.1430719792842865,
"learning_rate": 1.885715403357871e-05,
"loss": 1.3175,
"step": 320
},
{
"epoch": 0.54,
"grad_norm": 0.1606094092130661,
"learning_rate": 1.8848714163587178e-05,
"loss": 1.3142,
"step": 321
},
{
"epoch": 0.54,
"grad_norm": 0.1566372811794281,
"learning_rate": 1.8840245147146245e-05,
"loss": 1.3133,
"step": 322
},
{
"epoch": 0.54,
"grad_norm": 0.15060843527317047,
"learning_rate": 1.8831747012151687e-05,
"loss": 1.3333,
"step": 323
},
{
"epoch": 0.54,
"grad_norm": 0.15068593621253967,
"learning_rate": 1.882321978659519e-05,
"loss": 1.3938,
"step": 324
},
{
"epoch": 0.55,
"grad_norm": 0.16211798787117004,
"learning_rate": 1.8814663498564267e-05,
"loss": 1.3081,
"step": 325
},
{
"epoch": 0.55,
"grad_norm": 0.1806996911764145,
"learning_rate": 1.8806078176242158e-05,
"loss": 1.273,
"step": 326
},
{
"epoch": 0.55,
"grad_norm": 0.14823010563850403,
"learning_rate": 1.8797463847907732e-05,
"loss": 1.2599,
"step": 327
},
{
"epoch": 0.55,
"grad_norm": 0.15183423459529877,
"learning_rate": 1.8788820541935403e-05,
"loss": 1.3626,
"step": 328
},
{
"epoch": 0.55,
"grad_norm": 0.15430210530757904,
"learning_rate": 1.8780148286795037e-05,
"loss": 1.3076,
"step": 329
},
{
"epoch": 0.55,
"grad_norm": 0.158985897898674,
"learning_rate": 1.8771447111051854e-05,
"loss": 1.363,
"step": 330
},
{
"epoch": 0.56,
"grad_norm": 0.15100038051605225,
"learning_rate": 1.8762717043366324e-05,
"loss": 1.2647,
"step": 331
},
{
"epoch": 0.56,
"grad_norm": 0.14728271961212158,
"learning_rate": 1.87539581124941e-05,
"loss": 1.323,
"step": 332
},
{
"epoch": 0.56,
"grad_norm": 0.18443995714187622,
"learning_rate": 1.874517034728589e-05,
"loss": 1.3526,
"step": 333
},
{
"epoch": 0.56,
"grad_norm": 0.1705956757068634,
"learning_rate": 1.873635377668739e-05,
"loss": 1.3675,
"step": 334
},
{
"epoch": 0.56,
"grad_norm": 0.14912624657154083,
"learning_rate": 1.8727508429739172e-05,
"loss": 1.303,
"step": 335
},
{
"epoch": 0.56,
"grad_norm": 0.15988433361053467,
"learning_rate": 1.87186343355766e-05,
"loss": 1.3503,
"step": 336
},
{
"epoch": 0.57,
"grad_norm": 0.1975414901971817,
"learning_rate": 1.870973152342971e-05,
"loss": 1.3636,
"step": 337
},
{
"epoch": 0.57,
"grad_norm": 0.1463659554719925,
"learning_rate": 1.870080002262315e-05,
"loss": 1.2705,
"step": 338
},
{
"epoch": 0.57,
"grad_norm": 0.14839382469654083,
"learning_rate": 1.869183986257606e-05,
"loss": 1.2638,
"step": 339
},
{
"epoch": 0.57,
"grad_norm": 0.14624640345573425,
"learning_rate": 1.868285107280197e-05,
"loss": 1.2893,
"step": 340
},
{
"epoch": 0.57,
"grad_norm": 0.1559121459722519,
"learning_rate": 1.8673833682908724e-05,
"loss": 1.3034,
"step": 341
},
{
"epoch": 0.57,
"grad_norm": 0.1462070792913437,
"learning_rate": 1.866478772259836e-05,
"loss": 1.3006,
"step": 342
},
{
"epoch": 0.58,
"grad_norm": 0.15308046340942383,
"learning_rate": 1.865571322166704e-05,
"loss": 1.3051,
"step": 343
},
{
"epoch": 0.58,
"grad_norm": 0.14594431221485138,
"learning_rate": 1.8646610210004912e-05,
"loss": 1.2814,
"step": 344
},
{
"epoch": 0.58,
"grad_norm": 0.149860218167305,
"learning_rate": 1.8637478717596056e-05,
"loss": 1.3348,
"step": 345
},
{
"epoch": 0.58,
"grad_norm": 0.14472465217113495,
"learning_rate": 1.8628318774518347e-05,
"loss": 1.3192,
"step": 346
},
{
"epoch": 0.58,
"grad_norm": 0.14928650856018066,
"learning_rate": 1.8619130410943386e-05,
"loss": 1.3442,
"step": 347
},
{
"epoch": 0.58,
"grad_norm": 0.1469736397266388,
"learning_rate": 1.8609913657136375e-05,
"loss": 1.4005,
"step": 348
},
{
"epoch": 0.59,
"grad_norm": 0.16151031851768494,
"learning_rate": 1.860066854345604e-05,
"loss": 1.3263,
"step": 349
},
{
"epoch": 0.59,
"grad_norm": 0.1534898281097412,
"learning_rate": 1.859139510035451e-05,
"loss": 1.3609,
"step": 350
},
{
"epoch": 0.59,
"grad_norm": 0.1836443841457367,
"learning_rate": 1.8582093358377245e-05,
"loss": 1.3158,
"step": 351
},
{
"epoch": 0.59,
"grad_norm": 0.14370837807655334,
"learning_rate": 1.8572763348162893e-05,
"loss": 1.326,
"step": 352
},
{
"epoch": 0.59,
"grad_norm": 0.33690914511680603,
"learning_rate": 1.8563405100443233e-05,
"loss": 1.3528,
"step": 353
},
{
"epoch": 0.59,
"grad_norm": 0.1471925675868988,
"learning_rate": 1.8554018646043045e-05,
"loss": 1.2951,
"step": 354
},
{
"epoch": 0.6,
"grad_norm": 0.1465420424938202,
"learning_rate": 1.8544604015880024e-05,
"loss": 1.3042,
"step": 355
},
{
"epoch": 0.6,
"grad_norm": 0.15233609080314636,
"learning_rate": 1.8535161240964666e-05,
"loss": 1.3348,
"step": 356
},
{
"epoch": 0.6,
"grad_norm": 0.14329326152801514,
"learning_rate": 1.852569035240018e-05,
"loss": 1.2539,
"step": 357
},
{
"epoch": 0.6,
"grad_norm": 0.1572476178407669,
"learning_rate": 1.8516191381382367e-05,
"loss": 1.3509,
"step": 358
},
{
"epoch": 0.6,
"grad_norm": 0.14190582931041718,
"learning_rate": 1.8506664359199536e-05,
"loss": 1.2363,
"step": 359
},
{
"epoch": 0.6,
"grad_norm": 0.1511862426996231,
"learning_rate": 1.8497109317232386e-05,
"loss": 1.2926,
"step": 360
},
{
"epoch": 0.61,
"grad_norm": 0.15441784262657166,
"learning_rate": 1.8487526286953922e-05,
"loss": 1.3389,
"step": 361
},
{
"epoch": 0.61,
"grad_norm": 0.1593203991651535,
"learning_rate": 1.8477915299929317e-05,
"loss": 1.3323,
"step": 362
},
{
"epoch": 0.61,
"grad_norm": 0.17237433791160583,
"learning_rate": 1.8468276387815853e-05,
"loss": 1.2756,
"step": 363
},
{
"epoch": 0.61,
"grad_norm": 0.15212436020374298,
"learning_rate": 1.8458609582362783e-05,
"loss": 1.3487,
"step": 364
},
{
"epoch": 0.61,
"grad_norm": 0.1509844809770584,
"learning_rate": 1.8448914915411227e-05,
"loss": 1.2997,
"step": 365
},
{
"epoch": 0.61,
"grad_norm": 0.1497824341058731,
"learning_rate": 1.8439192418894096e-05,
"loss": 1.2858,
"step": 366
},
{
"epoch": 0.62,
"grad_norm": 0.1543625295162201,
"learning_rate": 1.8429442124835955e-05,
"loss": 1.2635,
"step": 367
},
{
"epoch": 0.62,
"grad_norm": 0.14757508039474487,
"learning_rate": 1.8419664065352933e-05,
"loss": 1.2856,
"step": 368
},
{
"epoch": 0.62,
"grad_norm": 0.23889374732971191,
"learning_rate": 1.840985827265262e-05,
"loss": 1.2669,
"step": 369
},
{
"epoch": 0.62,
"grad_norm": 0.14553211629390717,
"learning_rate": 1.8400024779033944e-05,
"loss": 1.3678,
"step": 370
},
{
"epoch": 0.62,
"grad_norm": 0.15173819661140442,
"learning_rate": 1.839016361688709e-05,
"loss": 1.3373,
"step": 371
},
{
"epoch": 0.62,
"grad_norm": 0.14966481924057007,
"learning_rate": 1.8380274818693366e-05,
"loss": 1.3075,
"step": 372
},
{
"epoch": 0.63,
"grad_norm": 0.15706394612789154,
"learning_rate": 1.8370358417025116e-05,
"loss": 1.3383,
"step": 373
},
{
"epoch": 0.63,
"grad_norm": 0.2577916979789734,
"learning_rate": 1.8360414444545608e-05,
"loss": 1.3489,
"step": 374
},
{
"epoch": 0.63,
"grad_norm": 0.24489471316337585,
"learning_rate": 1.835044293400892e-05,
"loss": 1.2881,
"step": 375
},
{
"epoch": 0.63,
"grad_norm": 0.14937125146389008,
"learning_rate": 1.8340443918259842e-05,
"loss": 1.393,
"step": 376
},
{
"epoch": 0.63,
"grad_norm": 0.14557434618473053,
"learning_rate": 1.8330417430233756e-05,
"loss": 1.3315,
"step": 377
},
{
"epoch": 0.64,
"grad_norm": 0.22447769343852997,
"learning_rate": 1.8320363502956533e-05,
"loss": 1.331,
"step": 378
},
{
"epoch": 0.64,
"grad_norm": 0.1532454937696457,
"learning_rate": 1.8310282169544436e-05,
"loss": 1.3164,
"step": 379
},
{
"epoch": 0.64,
"grad_norm": 0.15634536743164062,
"learning_rate": 1.8300173463203986e-05,
"loss": 1.4203,
"step": 380
},
{
"epoch": 0.64,
"grad_norm": 0.166867196559906,
"learning_rate": 1.829003741723188e-05,
"loss": 1.3482,
"step": 381
},
{
"epoch": 0.64,
"grad_norm": 0.14226773381233215,
"learning_rate": 1.827987406501486e-05,
"loss": 1.341,
"step": 382
},
{
"epoch": 0.64,
"grad_norm": 0.14897264540195465,
"learning_rate": 1.826968344002961e-05,
"loss": 1.3164,
"step": 383
},
{
"epoch": 0.65,
"grad_norm": 0.14921049773693085,
"learning_rate": 1.825946557584265e-05,
"loss": 1.3575,
"step": 384
},
{
"epoch": 0.65,
"grad_norm": 0.1691223531961441,
"learning_rate": 1.8249220506110225e-05,
"loss": 1.3106,
"step": 385
},
{
"epoch": 0.65,
"grad_norm": 0.14653781056404114,
"learning_rate": 1.8238948264578185e-05,
"loss": 1.3769,
"step": 386
},
{
"epoch": 0.65,
"grad_norm": 0.1483498364686966,
"learning_rate": 1.8228648885081886e-05,
"loss": 1.3134,
"step": 387
},
{
"epoch": 0.65,
"grad_norm": 0.17251162230968475,
"learning_rate": 1.8218322401546063e-05,
"loss": 1.28,
"step": 388
},
{
"epoch": 0.65,
"grad_norm": 0.15499843657016754,
"learning_rate": 1.820796884798474e-05,
"loss": 1.2516,
"step": 389
},
{
"epoch": 0.66,
"grad_norm": 0.1524689644575119,
"learning_rate": 1.81975882585011e-05,
"loss": 1.2891,
"step": 390
},
{
"epoch": 0.66,
"grad_norm": 0.18753460049629211,
"learning_rate": 1.818718066728738e-05,
"loss": 1.3317,
"step": 391
},
{
"epoch": 0.66,
"grad_norm": 0.15421994030475616,
"learning_rate": 1.8176746108624756e-05,
"loss": 1.4153,
"step": 392
},
{
"epoch": 0.66,
"grad_norm": 0.17222876846790314,
"learning_rate": 1.816628461688323e-05,
"loss": 1.3106,
"step": 393
},
{
"epoch": 0.66,
"grad_norm": 0.16242216527462006,
"learning_rate": 1.8155796226521522e-05,
"loss": 1.3807,
"step": 394
},
{
"epoch": 0.66,
"grad_norm": 0.1497281938791275,
"learning_rate": 1.8145280972086946e-05,
"loss": 1.3209,
"step": 395
},
{
"epoch": 0.67,
"grad_norm": 0.15559233725070953,
"learning_rate": 1.8134738888215308e-05,
"loss": 1.3251,
"step": 396
},
{
"epoch": 0.67,
"grad_norm": 0.2383277863264084,
"learning_rate": 1.8124170009630788e-05,
"loss": 1.2843,
"step": 397
},
{
"epoch": 0.67,
"grad_norm": 0.1535133570432663,
"learning_rate": 1.8113574371145817e-05,
"loss": 1.3841,
"step": 398
},
{
"epoch": 0.67,
"grad_norm": 0.15431390702724457,
"learning_rate": 1.810295200766097e-05,
"loss": 1.2803,
"step": 399
},
{
"epoch": 0.67,
"grad_norm": 0.1573055535554886,
"learning_rate": 1.8092302954164863e-05,
"loss": 1.3007,
"step": 400
},
{
"epoch": 0.67,
"grad_norm": 0.16370755434036255,
"learning_rate": 1.808162724573401e-05,
"loss": 1.2791,
"step": 401
},
{
"epoch": 0.68,
"grad_norm": 0.20099897682666779,
"learning_rate": 1.8070924917532725e-05,
"loss": 1.2399,
"step": 402
},
{
"epoch": 0.68,
"grad_norm": 0.29741019010543823,
"learning_rate": 1.8060196004813013e-05,
"loss": 1.2888,
"step": 403
},
{
"epoch": 0.68,
"grad_norm": 0.14683599770069122,
"learning_rate": 1.8049440542914438e-05,
"loss": 1.301,
"step": 404
},
{
"epoch": 0.68,
"grad_norm": 0.14803573489189148,
"learning_rate": 1.803865856726402e-05,
"loss": 1.2875,
"step": 405
},
{
"epoch": 0.68,
"grad_norm": 0.14676880836486816,
"learning_rate": 1.8027850113376092e-05,
"loss": 1.3015,
"step": 406
},
{
"epoch": 0.68,
"grad_norm": 0.14843180775642395,
"learning_rate": 1.801701521685223e-05,
"loss": 1.3201,
"step": 407
},
{
"epoch": 0.69,
"grad_norm": 0.15639850497245789,
"learning_rate": 1.8006153913381098e-05,
"loss": 1.3571,
"step": 408
},
{
"epoch": 0.69,
"grad_norm": 0.1461266577243805,
"learning_rate": 1.7995266238738324e-05,
"loss": 1.3085,
"step": 409
},
{
"epoch": 0.69,
"grad_norm": 0.1541517972946167,
"learning_rate": 1.798435222878642e-05,
"loss": 1.3525,
"step": 410
},
{
"epoch": 0.69,
"grad_norm": 0.15596306324005127,
"learning_rate": 1.797341191947464e-05,
"loss": 1.3209,
"step": 411
},
{
"epoch": 0.69,
"grad_norm": 0.1753711998462677,
"learning_rate": 1.7962445346838856e-05,
"loss": 1.2966,
"step": 412
},
{
"epoch": 0.69,
"grad_norm": 0.16760778427124023,
"learning_rate": 1.7951452547001454e-05,
"loss": 1.2777,
"step": 413
},
{
"epoch": 0.7,
"grad_norm": 0.14851422607898712,
"learning_rate": 1.794043355617121e-05,
"loss": 1.2955,
"step": 414
},
{
"epoch": 0.7,
"grad_norm": 0.14428943395614624,
"learning_rate": 1.792938841064316e-05,
"loss": 1.322,
"step": 415
},
{
"epoch": 0.7,
"grad_norm": 0.147806778550148,
"learning_rate": 1.79183171467985e-05,
"loss": 1.4356,
"step": 416
},
{
"epoch": 0.7,
"grad_norm": 0.14978933334350586,
"learning_rate": 1.7907219801104458e-05,
"loss": 1.3197,
"step": 417
},
{
"epoch": 0.7,
"grad_norm": 0.15199780464172363,
"learning_rate": 1.789609641011416e-05,
"loss": 1.2553,
"step": 418
},
{
"epoch": 0.7,
"grad_norm": 0.18434306979179382,
"learning_rate": 1.7884947010466533e-05,
"loss": 1.2829,
"step": 419
},
{
"epoch": 0.71,
"grad_norm": 0.1494559347629547,
"learning_rate": 1.7873771638886163e-05,
"loss": 1.3004,
"step": 420
},
{
"epoch": 0.71,
"grad_norm": 0.15020005404949188,
"learning_rate": 1.7862570332183193e-05,
"loss": 1.2689,
"step": 421
},
{
"epoch": 0.71,
"grad_norm": 0.21198858320713043,
"learning_rate": 1.785134312725319e-05,
"loss": 1.2978,
"step": 422
},
{
"epoch": 0.71,
"grad_norm": 0.14668674767017365,
"learning_rate": 1.7840090061077023e-05,
"loss": 1.2458,
"step": 423
},
{
"epoch": 0.71,
"grad_norm": 0.15473228693008423,
"learning_rate": 1.782881117072075e-05,
"loss": 1.3109,
"step": 424
},
{
"epoch": 0.71,
"grad_norm": 0.15694057941436768,
"learning_rate": 1.7817506493335485e-05,
"loss": 1.2955,
"step": 425
},
{
"epoch": 0.72,
"grad_norm": 0.14998142421245575,
"learning_rate": 1.7806176066157285e-05,
"loss": 1.3279,
"step": 426
},
{
"epoch": 0.72,
"grad_norm": 0.22829650342464447,
"learning_rate": 1.7794819926507013e-05,
"loss": 1.3223,
"step": 427
},
{
"epoch": 0.72,
"grad_norm": 0.15419165790081024,
"learning_rate": 1.7783438111790247e-05,
"loss": 1.2545,
"step": 428
},
{
"epoch": 0.72,
"grad_norm": 0.14832891523838043,
"learning_rate": 1.7772030659497112e-05,
"loss": 1.3429,
"step": 429
},
{
"epoch": 0.72,
"grad_norm": 0.15142028033733368,
"learning_rate": 1.7760597607202195e-05,
"loss": 1.229,
"step": 430
},
{
"epoch": 0.72,
"grad_norm": 0.1493011862039566,
"learning_rate": 1.7749138992564395e-05,
"loss": 1.3663,
"step": 431
},
{
"epoch": 0.73,
"grad_norm": 0.19301459193229675,
"learning_rate": 1.7737654853326818e-05,
"loss": 1.2877,
"step": 432
},
{
"epoch": 0.73,
"grad_norm": 0.2175184041261673,
"learning_rate": 1.7726145227316642e-05,
"loss": 1.2534,
"step": 433
},
{
"epoch": 0.73,
"grad_norm": 0.170121967792511,
"learning_rate": 1.7714610152444994e-05,
"loss": 1.3817,
"step": 434
},
{
"epoch": 0.73,
"grad_norm": 0.14693012833595276,
"learning_rate": 1.7703049666706826e-05,
"loss": 1.3544,
"step": 435
},
{
"epoch": 0.73,
"grad_norm": 0.15702098608016968,
"learning_rate": 1.7691463808180797e-05,
"loss": 1.3407,
"step": 436
},
{
"epoch": 0.73,
"grad_norm": 0.1530607044696808,
"learning_rate": 1.7679852615029124e-05,
"loss": 1.3075,
"step": 437
},
{
"epoch": 0.74,
"grad_norm": 0.15074275434017181,
"learning_rate": 1.7668216125497487e-05,
"loss": 1.415,
"step": 438
},
{
"epoch": 0.74,
"grad_norm": 0.17382702231407166,
"learning_rate": 1.7656554377914887e-05,
"loss": 1.2609,
"step": 439
},
{
"epoch": 0.74,
"grad_norm": 0.15414436161518097,
"learning_rate": 1.7644867410693517e-05,
"loss": 1.3245,
"step": 440
},
{
"epoch": 0.74,
"grad_norm": 0.14748679101467133,
"learning_rate": 1.7633155262328644e-05,
"loss": 1.3218,
"step": 441
},
{
"epoch": 0.74,
"grad_norm": 0.14213520288467407,
"learning_rate": 1.7621417971398472e-05,
"loss": 1.3097,
"step": 442
},
{
"epoch": 0.74,
"grad_norm": 0.1499529927968979,
"learning_rate": 1.760965557656403e-05,
"loss": 1.3159,
"step": 443
},
{
"epoch": 0.75,
"grad_norm": 0.15373371541500092,
"learning_rate": 1.7597868116569036e-05,
"loss": 1.3102,
"step": 444
},
{
"epoch": 0.75,
"grad_norm": 0.1485459953546524,
"learning_rate": 1.7586055630239755e-05,
"loss": 1.3018,
"step": 445
},
{
"epoch": 0.75,
"grad_norm": 0.15774905681610107,
"learning_rate": 1.7574218156484896e-05,
"loss": 1.2975,
"step": 446
},
{
"epoch": 0.75,
"grad_norm": 0.1483812928199768,
"learning_rate": 1.7562355734295478e-05,
"loss": 1.2975,
"step": 447
},
{
"epoch": 0.75,
"grad_norm": 0.16566769778728485,
"learning_rate": 1.7550468402744685e-05,
"loss": 1.3145,
"step": 448
},
{
"epoch": 0.75,
"grad_norm": 0.14695635437965393,
"learning_rate": 1.753855620098776e-05,
"loss": 1.2927,
"step": 449
},
{
"epoch": 0.76,
"grad_norm": 0.1469867080450058,
"learning_rate": 1.7526619168261856e-05,
"loss": 1.2627,
"step": 450
},
{
"epoch": 0.76,
"grad_norm": 0.15066002309322357,
"learning_rate": 1.7514657343885923e-05,
"loss": 1.3224,
"step": 451
},
{
"epoch": 0.76,
"grad_norm": 0.15294314920902252,
"learning_rate": 1.7502670767260562e-05,
"loss": 1.3363,
"step": 452
},
{
"epoch": 0.76,
"grad_norm": 0.16633671522140503,
"learning_rate": 1.749065947786792e-05,
"loss": 1.3089,
"step": 453
},
{
"epoch": 0.76,
"grad_norm": 0.15498566627502441,
"learning_rate": 1.7478623515271526e-05,
"loss": 1.2816,
"step": 454
},
{
"epoch": 0.76,
"grad_norm": 0.15707740187644958,
"learning_rate": 1.7466562919116192e-05,
"loss": 1.2932,
"step": 455
},
{
"epoch": 0.77,
"grad_norm": 0.165644109249115,
"learning_rate": 1.745447772912787e-05,
"loss": 1.3519,
"step": 456
},
{
"epoch": 0.77,
"grad_norm": 0.1507626622915268,
"learning_rate": 1.7442367985113504e-05,
"loss": 1.3745,
"step": 457
},
{
"epoch": 0.77,
"grad_norm": 0.15195555984973907,
"learning_rate": 1.743023372696094e-05,
"loss": 1.3419,
"step": 458
},
{
"epoch": 0.77,
"grad_norm": 0.15092672407627106,
"learning_rate": 1.7418074994638752e-05,
"loss": 1.3152,
"step": 459
},
{
"epoch": 0.77,
"grad_norm": 0.15098002552986145,
"learning_rate": 1.7405891828196138e-05,
"loss": 1.315,
"step": 460
},
{
"epoch": 0.77,
"grad_norm": 0.14881564676761627,
"learning_rate": 1.7393684267762773e-05,
"loss": 1.2516,
"step": 461
},
{
"epoch": 0.78,
"grad_norm": 0.15771602094173431,
"learning_rate": 1.7381452353548687e-05,
"loss": 1.294,
"step": 462
},
{
"epoch": 0.78,
"grad_norm": 0.19221486151218414,
"learning_rate": 1.7369196125844124e-05,
"loss": 1.3867,
"step": 463
},
{
"epoch": 0.78,
"grad_norm": 0.1485375463962555,
"learning_rate": 1.7356915625019416e-05,
"loss": 1.3202,
"step": 464
},
{
"epoch": 0.78,
"grad_norm": 0.1480698585510254,
"learning_rate": 1.734461089152484e-05,
"loss": 1.301,
"step": 465
},
{
"epoch": 0.78,
"grad_norm": 0.1499594748020172,
"learning_rate": 1.7332281965890513e-05,
"loss": 1.3391,
"step": 466
},
{
"epoch": 0.78,
"grad_norm": 0.15874969959259033,
"learning_rate": 1.731992888872621e-05,
"loss": 1.3246,
"step": 467
},
{
"epoch": 0.79,
"grad_norm": 0.14509308338165283,
"learning_rate": 1.7307551700721273e-05,
"loss": 1.2611,
"step": 468
},
{
"epoch": 0.79,
"grad_norm": 0.15065301954746246,
"learning_rate": 1.729515044264447e-05,
"loss": 1.28,
"step": 469
},
{
"epoch": 0.79,
"grad_norm": 0.1511443853378296,
"learning_rate": 1.7282725155343828e-05,
"loss": 1.37,
"step": 470
},
{
"epoch": 0.79,
"grad_norm": 0.16135594248771667,
"learning_rate": 1.7270275879746547e-05,
"loss": 1.2823,
"step": 471
},
{
"epoch": 0.79,
"grad_norm": 0.1686365306377411,
"learning_rate": 1.7257802656858826e-05,
"loss": 1.3356,
"step": 472
},
{
"epoch": 0.79,
"grad_norm": 0.16540803015232086,
"learning_rate": 1.724530552776575e-05,
"loss": 1.3159,
"step": 473
},
{
"epoch": 0.8,
"grad_norm": 0.23215636610984802,
"learning_rate": 1.7232784533631148e-05,
"loss": 1.2724,
"step": 474
},
{
"epoch": 0.8,
"grad_norm": 0.15948258340358734,
"learning_rate": 1.7220239715697455e-05,
"loss": 1.2742,
"step": 475
},
{
"epoch": 0.8,
"grad_norm": 0.1488332599401474,
"learning_rate": 1.7207671115285577e-05,
"loss": 1.3531,
"step": 476
},
{
"epoch": 0.8,
"grad_norm": 0.1468440741300583,
"learning_rate": 1.7195078773794756e-05,
"loss": 1.2834,
"step": 477
},
{
"epoch": 0.8,
"grad_norm": 0.14220255613327026,
"learning_rate": 1.7182462732702442e-05,
"loss": 1.2984,
"step": 478
},
{
"epoch": 0.8,
"grad_norm": 0.1553741842508316,
"learning_rate": 1.7169823033564137e-05,
"loss": 1.3449,
"step": 479
},
{
"epoch": 0.81,
"grad_norm": 0.15940803289413452,
"learning_rate": 1.7157159718013275e-05,
"loss": 1.3179,
"step": 480
},
{
"epoch": 0.81,
"grad_norm": 0.16116777062416077,
"learning_rate": 1.714447282776108e-05,
"loss": 1.3078,
"step": 481
},
{
"epoch": 0.81,
"grad_norm": 0.16338405013084412,
"learning_rate": 1.7131762404596424e-05,
"loss": 1.3704,
"step": 482
},
{
"epoch": 0.81,
"grad_norm": 0.14362382888793945,
"learning_rate": 1.7119028490385703e-05,
"loss": 1.2434,
"step": 483
},
{
"epoch": 0.81,
"grad_norm": 0.16018328070640564,
"learning_rate": 1.7106271127072672e-05,
"loss": 1.3293,
"step": 484
},
{
"epoch": 0.81,
"grad_norm": 0.15323656797409058,
"learning_rate": 1.7093490356678338e-05,
"loss": 1.3381,
"step": 485
},
{
"epoch": 0.82,
"grad_norm": 0.14089493453502655,
"learning_rate": 1.7080686221300804e-05,
"loss": 1.269,
"step": 486
},
{
"epoch": 0.82,
"grad_norm": 0.15111474692821503,
"learning_rate": 1.706785876311513e-05,
"loss": 1.3165,
"step": 487
},
{
"epoch": 0.82,
"grad_norm": 0.1574653536081314,
"learning_rate": 1.7055008024373214e-05,
"loss": 1.3204,
"step": 488
},
{
"epoch": 0.82,
"grad_norm": 0.15187577903270721,
"learning_rate": 1.7042134047403613e-05,
"loss": 1.3261,
"step": 489
},
{
"epoch": 0.82,
"grad_norm": 0.15277227759361267,
"learning_rate": 1.7029236874611444e-05,
"loss": 1.3667,
"step": 490
},
{
"epoch": 0.82,
"grad_norm": 0.17707929015159607,
"learning_rate": 1.7016316548478217e-05,
"loss": 1.3065,
"step": 491
},
{
"epoch": 0.83,
"grad_norm": 0.1623065024614334,
"learning_rate": 1.700337311156172e-05,
"loss": 1.3231,
"step": 492
},
{
"epoch": 0.83,
"grad_norm": 0.17087838053703308,
"learning_rate": 1.6990406606495854e-05,
"loss": 1.3201,
"step": 493
},
{
"epoch": 0.83,
"grad_norm": 0.16629967093467712,
"learning_rate": 1.6977417075990508e-05,
"loss": 1.3758,
"step": 494
},
{
"epoch": 0.83,
"grad_norm": 0.14834420382976532,
"learning_rate": 1.696440456283141e-05,
"loss": 1.3256,
"step": 495
},
{
"epoch": 0.83,
"grad_norm": 0.1470130831003189,
"learning_rate": 1.6951369109879987e-05,
"loss": 1.2848,
"step": 496
},
{
"epoch": 0.83,
"grad_norm": 0.14451508224010468,
"learning_rate": 1.693831076007324e-05,
"loss": 1.2913,
"step": 497
},
{
"epoch": 0.84,
"grad_norm": 0.15200211107730865,
"learning_rate": 1.692522955642357e-05,
"loss": 1.3047,
"step": 498
},
{
"epoch": 0.84,
"grad_norm": 0.14897271990776062,
"learning_rate": 1.6912125542018675e-05,
"loss": 1.2377,
"step": 499
},
{
"epoch": 0.84,
"grad_norm": 0.1497008055448532,
"learning_rate": 1.6898998760021376e-05,
"loss": 1.3308,
"step": 500
},
{
"epoch": 0.84,
"grad_norm": 0.15045613050460815,
"learning_rate": 1.6885849253669492e-05,
"loss": 1.3119,
"step": 501
},
{
"epoch": 0.84,
"grad_norm": 0.15971465408802032,
"learning_rate": 1.6872677066275685e-05,
"loss": 1.342,
"step": 502
},
{
"epoch": 0.85,
"grad_norm": 0.15068389475345612,
"learning_rate": 1.6859482241227337e-05,
"loss": 1.3444,
"step": 503
},
{
"epoch": 0.85,
"grad_norm": 0.14832505583763123,
"learning_rate": 1.684626482198639e-05,
"loss": 1.2658,
"step": 504
},
{
"epoch": 0.85,
"grad_norm": 0.16742801666259766,
"learning_rate": 1.6833024852089207e-05,
"loss": 1.2719,
"step": 505
},
{
"epoch": 0.85,
"grad_norm": 0.14772577583789825,
"learning_rate": 1.6819762375146427e-05,
"loss": 1.3055,
"step": 506
},
{
"epoch": 0.85,
"grad_norm": 0.1421637088060379,
"learning_rate": 1.6806477434842835e-05,
"loss": 1.3252,
"step": 507
},
{
"epoch": 0.85,
"grad_norm": 0.15263010561466217,
"learning_rate": 1.6793170074937196e-05,
"loss": 1.3379,
"step": 508
},
{
"epoch": 0.86,
"grad_norm": 0.1491978019475937,
"learning_rate": 1.6779840339262128e-05,
"loss": 1.3355,
"step": 509
},
{
"epoch": 0.86,
"grad_norm": 0.16020318865776062,
"learning_rate": 1.6766488271723948e-05,
"loss": 1.3056,
"step": 510
},
{
"epoch": 0.86,
"grad_norm": 0.1441843956708908,
"learning_rate": 1.675311391630253e-05,
"loss": 1.2625,
"step": 511
},
{
"epoch": 0.86,
"grad_norm": 0.15116120874881744,
"learning_rate": 1.6739717317051173e-05,
"loss": 1.3625,
"step": 512
},
{
"epoch": 0.86,
"grad_norm": 0.16190451383590698,
"learning_rate": 1.6726298518096427e-05,
"loss": 1.3478,
"step": 513
},
{
"epoch": 0.86,
"grad_norm": 0.2721071243286133,
"learning_rate": 1.671285756363798e-05,
"loss": 1.278,
"step": 514
},
{
"epoch": 0.87,
"grad_norm": 0.15258455276489258,
"learning_rate": 1.6699394497948486e-05,
"loss": 1.3409,
"step": 515
},
{
"epoch": 0.87,
"grad_norm": 0.1480305790901184,
"learning_rate": 1.6685909365373435e-05,
"loss": 1.2828,
"step": 516
},
{
"epoch": 0.87,
"grad_norm": 0.15107598900794983,
"learning_rate": 1.6672402210331002e-05,
"loss": 1.3185,
"step": 517
},
{
"epoch": 0.87,
"grad_norm": 0.16138269007205963,
"learning_rate": 1.6658873077311902e-05,
"loss": 1.2488,
"step": 518
},
{
"epoch": 0.87,
"grad_norm": 0.1458127200603485,
"learning_rate": 1.6645322010879242e-05,
"loss": 1.3045,
"step": 519
},
{
"epoch": 0.87,
"grad_norm": 0.1560029536485672,
"learning_rate": 1.663174905566837e-05,
"loss": 1.3145,
"step": 520
},
{
"epoch": 0.88,
"grad_norm": 0.15814390778541565,
"learning_rate": 1.6618154256386746e-05,
"loss": 1.2516,
"step": 521
},
{
"epoch": 0.88,
"grad_norm": 0.1609438955783844,
"learning_rate": 1.660453765781376e-05,
"loss": 1.3517,
"step": 522
},
{
"epoch": 0.88,
"grad_norm": 0.1503395289182663,
"learning_rate": 1.6590899304800626e-05,
"loss": 1.2836,
"step": 523
},
{
"epoch": 0.88,
"grad_norm": 0.15777058899402618,
"learning_rate": 1.6577239242270208e-05,
"loss": 1.3166,
"step": 524
},
{
"epoch": 0.88,
"grad_norm": 0.15574100613594055,
"learning_rate": 1.6563557515216868e-05,
"loss": 1.3431,
"step": 525
},
{
"epoch": 0.88,
"grad_norm": 0.31943216919898987,
"learning_rate": 1.654985416870634e-05,
"loss": 1.3003,
"step": 526
},
{
"epoch": 0.89,
"grad_norm": 0.1493591070175171,
"learning_rate": 1.6536129247875568e-05,
"loss": 1.3107,
"step": 527
},
{
"epoch": 0.89,
"grad_norm": 0.16144278645515442,
"learning_rate": 1.6522382797932555e-05,
"loss": 1.3139,
"step": 528
},
{
"epoch": 0.89,
"grad_norm": 0.15227490663528442,
"learning_rate": 1.6508614864156218e-05,
"loss": 1.3107,
"step": 529
},
{
"epoch": 0.89,
"grad_norm": 0.1584673821926117,
"learning_rate": 1.6494825491896246e-05,
"loss": 1.3199,
"step": 530
},
{
"epoch": 0.89,
"grad_norm": 0.16497363150119781,
"learning_rate": 1.6481014726572924e-05,
"loss": 1.3562,
"step": 531
},
{
"epoch": 0.89,
"grad_norm": 0.15551984310150146,
"learning_rate": 1.6467182613677035e-05,
"loss": 1.2964,
"step": 532
},
{
"epoch": 0.9,
"grad_norm": 0.15545369684696198,
"learning_rate": 1.645332919876965e-05,
"loss": 1.3617,
"step": 533
},
{
"epoch": 0.9,
"grad_norm": 0.15759220719337463,
"learning_rate": 1.6439454527482014e-05,
"loss": 1.3197,
"step": 534
},
{
"epoch": 0.9,
"grad_norm": 0.16849841177463531,
"learning_rate": 1.642555864551539e-05,
"loss": 1.3545,
"step": 535
},
{
"epoch": 0.9,
"grad_norm": 0.16088669002056122,
"learning_rate": 1.6411641598640908e-05,
"loss": 1.3041,
"step": 536
},
{
"epoch": 0.9,
"grad_norm": 0.18748100101947784,
"learning_rate": 1.6397703432699404e-05,
"loss": 1.3261,
"step": 537
},
{
"epoch": 0.9,
"grad_norm": 0.15511862933635712,
"learning_rate": 1.6383744193601293e-05,
"loss": 1.2566,
"step": 538
},
{
"epoch": 0.91,
"grad_norm": 0.15378575026988983,
"learning_rate": 1.6369763927326378e-05,
"loss": 1.3685,
"step": 539
},
{
"epoch": 0.91,
"grad_norm": 0.15075547993183136,
"learning_rate": 1.6355762679923744e-05,
"loss": 1.3565,
"step": 540
},
{
"epoch": 0.91,
"grad_norm": 0.17224986851215363,
"learning_rate": 1.6341740497511584e-05,
"loss": 1.3599,
"step": 541
},
{
"epoch": 0.91,
"grad_norm": 0.16651223599910736,
"learning_rate": 1.632769742627703e-05,
"loss": 1.2676,
"step": 542
},
{
"epoch": 0.91,
"grad_norm": 0.1479247361421585,
"learning_rate": 1.6313633512476034e-05,
"loss": 1.3511,
"step": 543
},
{
"epoch": 0.91,
"grad_norm": 0.14475040137767792,
"learning_rate": 1.62995488024332e-05,
"loss": 1.281,
"step": 544
},
{
"epoch": 0.92,
"grad_norm": 0.14610210061073303,
"learning_rate": 1.628544334254162e-05,
"loss": 1.2803,
"step": 545
},
{
"epoch": 0.92,
"grad_norm": 0.15002188086509705,
"learning_rate": 1.6271317179262754e-05,
"loss": 1.3114,
"step": 546
},
{
"epoch": 0.92,
"grad_norm": 0.15048658847808838,
"learning_rate": 1.6257170359126237e-05,
"loss": 1.342,
"step": 547
},
{
"epoch": 0.92,
"grad_norm": 0.14822815358638763,
"learning_rate": 1.6243002928729752e-05,
"loss": 1.3811,
"step": 548
},
{
"epoch": 0.92,
"grad_norm": 0.14471104741096497,
"learning_rate": 1.6228814934738873e-05,
"loss": 1.3415,
"step": 549
},
{
"epoch": 0.92,
"grad_norm": 0.16976451873779297,
"learning_rate": 1.6214606423886903e-05,
"loss": 1.246,
"step": 550
},
{
"epoch": 0.93,
"grad_norm": 0.15606847405433655,
"learning_rate": 1.6200377442974723e-05,
"loss": 1.3191,
"step": 551
},
{
"epoch": 0.93,
"grad_norm": 0.15066905319690704,
"learning_rate": 1.6186128038870644e-05,
"loss": 1.3419,
"step": 552
},
{
"epoch": 0.93,
"grad_norm": 0.1576036810874939,
"learning_rate": 1.6171858258510246e-05,
"loss": 1.2499,
"step": 553
},
{
"epoch": 0.93,
"grad_norm": 0.16077366471290588,
"learning_rate": 1.6157568148896227e-05,
"loss": 1.2236,
"step": 554
},
{
"epoch": 0.93,
"grad_norm": 0.15549123287200928,
"learning_rate": 1.6143257757098242e-05,
"loss": 1.2914,
"step": 555
},
{
"epoch": 0.93,
"grad_norm": 0.15645426511764526,
"learning_rate": 1.6128927130252764e-05,
"loss": 1.344,
"step": 556
},
{
"epoch": 0.94,
"grad_norm": 0.1562529355287552,
"learning_rate": 1.61145763155629e-05,
"loss": 1.3898,
"step": 557
},
{
"epoch": 0.94,
"grad_norm": 0.14849528670310974,
"learning_rate": 1.6100205360298277e-05,
"loss": 1.2746,
"step": 558
},
{
"epoch": 0.94,
"grad_norm": 0.15014708042144775,
"learning_rate": 1.6085814311794833e-05,
"loss": 1.355,
"step": 559
},
{
"epoch": 0.94,
"grad_norm": 0.15373437106609344,
"learning_rate": 1.607140321745472e-05,
"loss": 1.3893,
"step": 560
},
{
"epoch": 0.94,
"grad_norm": 0.14762701094150543,
"learning_rate": 1.605697212474609e-05,
"loss": 1.2868,
"step": 561
},
{
"epoch": 0.94,
"grad_norm": 0.16601255536079407,
"learning_rate": 1.604252108120299e-05,
"loss": 1.3123,
"step": 562
},
{
"epoch": 0.95,
"grad_norm": 0.15280282497406006,
"learning_rate": 1.6028050134425172e-05,
"loss": 1.2975,
"step": 563
},
{
"epoch": 0.95,
"grad_norm": 0.15927375853061676,
"learning_rate": 1.6013559332077945e-05,
"loss": 1.371,
"step": 564
},
{
"epoch": 0.95,
"grad_norm": 0.22685493528842926,
"learning_rate": 1.5999048721892024e-05,
"loss": 1.3319,
"step": 565
},
{
"epoch": 0.95,
"grad_norm": 0.16215336322784424,
"learning_rate": 1.5984518351663365e-05,
"loss": 1.3597,
"step": 566
},
{
"epoch": 0.95,
"grad_norm": 0.15450100600719452,
"learning_rate": 1.5969968269253014e-05,
"loss": 1.3647,
"step": 567
},
{
"epoch": 0.95,
"grad_norm": 0.156316339969635,
"learning_rate": 1.5955398522586942e-05,
"loss": 1.3858,
"step": 568
},
{
"epoch": 0.96,
"grad_norm": 0.1615144908428192,
"learning_rate": 1.5940809159655898e-05,
"loss": 1.3203,
"step": 569
},
{
"epoch": 0.96,
"grad_norm": 0.1442023366689682,
"learning_rate": 1.5926200228515235e-05,
"loss": 1.3644,
"step": 570
},
{
"epoch": 0.96,
"grad_norm": 0.15228597819805145,
"learning_rate": 1.591157177728477e-05,
"loss": 1.2107,
"step": 571
},
{
"epoch": 0.96,
"grad_norm": 0.15035109221935272,
"learning_rate": 1.5896923854148613e-05,
"loss": 1.2465,
"step": 572
},
{
"epoch": 0.96,
"grad_norm": 0.1499544084072113,
"learning_rate": 1.588225650735501e-05,
"loss": 1.3081,
"step": 573
},
{
"epoch": 0.96,
"grad_norm": 0.152054563164711,
"learning_rate": 1.5867569785216187e-05,
"loss": 1.2955,
"step": 574
},
{
"epoch": 0.97,
"grad_norm": 0.15024514496326447,
"learning_rate": 1.585286373610819e-05,
"loss": 1.2715,
"step": 575
},
{
"epoch": 0.97,
"grad_norm": 0.15844179689884186,
"learning_rate": 1.5838138408470723e-05,
"loss": 1.3345,
"step": 576
},
{
"epoch": 0.97,
"grad_norm": 0.1464165300130844,
"learning_rate": 1.5823393850807e-05,
"loss": 1.2863,
"step": 577
},
{
"epoch": 0.97,
"grad_norm": 0.16190552711486816,
"learning_rate": 1.5808630111683566e-05,
"loss": 1.2433,
"step": 578
},
{
"epoch": 0.97,
"grad_norm": 0.15056265890598297,
"learning_rate": 1.5793847239730148e-05,
"loss": 1.3604,
"step": 579
},
{
"epoch": 0.97,
"grad_norm": 0.1699298769235611,
"learning_rate": 1.5779045283639495e-05,
"loss": 1.3728,
"step": 580
},
{
"epoch": 0.98,
"grad_norm": 0.1500501036643982,
"learning_rate": 1.5764224292167223e-05,
"loss": 1.3597,
"step": 581
},
{
"epoch": 0.98,
"grad_norm": 0.15492461621761322,
"learning_rate": 1.5749384314131643e-05,
"loss": 1.312,
"step": 582
},
{
"epoch": 0.98,
"grad_norm": 0.15505270659923553,
"learning_rate": 1.5734525398413597e-05,
"loss": 1.3137,
"step": 583
},
{
"epoch": 0.98,
"grad_norm": 0.14275893568992615,
"learning_rate": 1.5719647593956328e-05,
"loss": 1.283,
"step": 584
},
{
"epoch": 0.98,
"grad_norm": 0.14876849949359894,
"learning_rate": 1.5704750949765263e-05,
"loss": 1.3035,
"step": 585
},
{
"epoch": 0.98,
"grad_norm": 0.1965971291065216,
"learning_rate": 1.5689835514907916e-05,
"loss": 1.258,
"step": 586
},
{
"epoch": 0.99,
"grad_norm": 0.14744770526885986,
"learning_rate": 1.5674901338513672e-05,
"loss": 1.3358,
"step": 587
},
{
"epoch": 0.99,
"grad_norm": 0.1537308543920517,
"learning_rate": 1.5659948469773665e-05,
"loss": 1.3796,
"step": 588
},
{
"epoch": 0.99,
"grad_norm": 0.15412989258766174,
"learning_rate": 1.5644976957940586e-05,
"loss": 1.2337,
"step": 589
},
{
"epoch": 0.99,
"grad_norm": 0.15361419320106506,
"learning_rate": 1.5629986852328546e-05,
"loss": 1.3013,
"step": 590
},
{
"epoch": 0.99,
"grad_norm": 0.15221114456653595,
"learning_rate": 1.5614978202312886e-05,
"loss": 1.2744,
"step": 591
},
{
"epoch": 0.99,
"grad_norm": 0.2714800238609314,
"learning_rate": 1.559995105733004e-05,
"loss": 1.2738,
"step": 592
},
{
"epoch": 1.0,
"grad_norm": 0.15101397037506104,
"learning_rate": 1.558490546687737e-05,
"loss": 1.3258,
"step": 593
},
{
"epoch": 1.0,
"grad_norm": 0.1766865998506546,
"learning_rate": 1.5569841480512972e-05,
"loss": 1.2578,
"step": 594
},
{
"epoch": 1.0,
"grad_norm": 0.15496864914894104,
"learning_rate": 1.5554759147855554e-05,
"loss": 1.315,
"step": 595
},
{
"epoch": 1.0,
"grad_norm": 0.20621678233146667,
"learning_rate": 1.5539658518584248e-05,
"loss": 1.2806,
"step": 596
},
{
"epoch": 1.0,
"grad_norm": 0.18250219523906708,
"learning_rate": 1.552453964243845e-05,
"loss": 1.1638,
"step": 597
},
{
"epoch": 1.0,
"grad_norm": 0.15596213936805725,
"learning_rate": 1.5509402569217677e-05,
"loss": 1.2473,
"step": 598
},
{
"epoch": 1.01,
"grad_norm": 0.15193147957324982,
"learning_rate": 1.549424734878135e-05,
"loss": 1.1939,
"step": 599
},
{
"epoch": 1.01,
"grad_norm": 0.1586681604385376,
"learning_rate": 1.5479074031048695e-05,
"loss": 1.2696,
"step": 600
}
],
"logging_steps": 1.0,
"max_steps": 1785,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 50,
"total_flos": 3.2809320102708118e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}