Weyaxi's picture
Upload folder using huggingface_hub
62c7db0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.000740192450037,
"eval_steps": 507,
"global_step": 1014,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 92.56075816528,
"learning_rate": 5.000000000000001e-07,
"loss": 1.6849,
"step": 1
},
{
"epoch": 0.0,
"eval_loss": 1.7294352054595947,
"eval_runtime": 1884.169,
"eval_samples_per_second": 0.701,
"eval_steps_per_second": 0.078,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 62.47252473867764,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.4808,
"step": 2
},
{
"epoch": 0.0,
"grad_norm": 86.16163524072095,
"learning_rate": 1.5e-06,
"loss": 1.622,
"step": 3
},
{
"epoch": 0.0,
"grad_norm": 60.0955549964575,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.5835,
"step": 4
},
{
"epoch": 0.0,
"grad_norm": 29.984043062746373,
"learning_rate": 2.5e-06,
"loss": 1.5242,
"step": 5
},
{
"epoch": 0.01,
"grad_norm": 301.1618364383654,
"learning_rate": 3e-06,
"loss": 1.2748,
"step": 6
},
{
"epoch": 0.01,
"grad_norm": 109.29021464248359,
"learning_rate": 3.5e-06,
"loss": 1.1112,
"step": 7
},
{
"epoch": 0.01,
"grad_norm": 10.444328634170152,
"learning_rate": 4.000000000000001e-06,
"loss": 0.9269,
"step": 8
},
{
"epoch": 0.01,
"grad_norm": 6.523925938807769,
"learning_rate": 4.5e-06,
"loss": 0.8644,
"step": 9
},
{
"epoch": 0.01,
"grad_norm": 17.340636604619814,
"learning_rate": 5e-06,
"loss": 0.8207,
"step": 10
},
{
"epoch": 0.01,
"grad_norm": 44.61126816135743,
"learning_rate": 4.9999969645113385e-06,
"loss": 1.1849,
"step": 11
},
{
"epoch": 0.01,
"grad_norm": 4.3252416457498315,
"learning_rate": 4.999987858052726e-06,
"loss": 0.885,
"step": 12
},
{
"epoch": 0.01,
"grad_norm": 2.9355463597411195,
"learning_rate": 4.999972680646276e-06,
"loss": 0.8329,
"step": 13
},
{
"epoch": 0.01,
"grad_norm": 2.0811257726357524,
"learning_rate": 4.999951432328846e-06,
"loss": 0.7869,
"step": 14
},
{
"epoch": 0.01,
"grad_norm": 1.9698959240524336,
"learning_rate": 4.999924113152034e-06,
"loss": 0.7948,
"step": 15
},
{
"epoch": 0.02,
"grad_norm": 1.6266575755898287,
"learning_rate": 4.999890723182183e-06,
"loss": 0.8005,
"step": 16
},
{
"epoch": 0.02,
"grad_norm": 1.419625838626686,
"learning_rate": 4.999851262500375e-06,
"loss": 0.7565,
"step": 17
},
{
"epoch": 0.02,
"grad_norm": 1.4352187159776242,
"learning_rate": 4.999805731202437e-06,
"loss": 0.7602,
"step": 18
},
{
"epoch": 0.02,
"grad_norm": 1.2355497382357647,
"learning_rate": 4.999754129398938e-06,
"loss": 0.7564,
"step": 19
},
{
"epoch": 0.02,
"grad_norm": 1.2700121185974393,
"learning_rate": 4.999696457215186e-06,
"loss": 0.8125,
"step": 20
},
{
"epoch": 0.02,
"grad_norm": 1.1702941851369728,
"learning_rate": 4.9996327147912315e-06,
"loss": 0.7382,
"step": 21
},
{
"epoch": 0.02,
"grad_norm": 1.1798174730304447,
"learning_rate": 4.999562902281866e-06,
"loss": 0.7742,
"step": 22
},
{
"epoch": 0.02,
"grad_norm": 1.1364491950081737,
"learning_rate": 4.999487019856623e-06,
"loss": 0.7282,
"step": 23
},
{
"epoch": 0.02,
"grad_norm": 1.1131547945883817,
"learning_rate": 4.999405067699773e-06,
"loss": 0.7475,
"step": 24
},
{
"epoch": 0.02,
"grad_norm": 1.1506149455536359,
"learning_rate": 4.9993170460103295e-06,
"loss": 0.7072,
"step": 25
},
{
"epoch": 0.03,
"grad_norm": 1.2889319229905287,
"learning_rate": 4.999222955002041e-06,
"loss": 0.8064,
"step": 26
},
{
"epoch": 0.03,
"grad_norm": 1.059562137107165,
"learning_rate": 4.999122794903399e-06,
"loss": 0.7147,
"step": 27
},
{
"epoch": 0.03,
"grad_norm": 1.149120672076531,
"learning_rate": 4.999016565957633e-06,
"loss": 0.7792,
"step": 28
},
{
"epoch": 0.03,
"grad_norm": 1.1596428841302342,
"learning_rate": 4.998904268422705e-06,
"loss": 0.7586,
"step": 29
},
{
"epoch": 0.03,
"grad_norm": 0.980994033140509,
"learning_rate": 4.998785902571319e-06,
"loss": 0.7721,
"step": 30
},
{
"epoch": 0.03,
"grad_norm": 0.9969455208276253,
"learning_rate": 4.998661468690914e-06,
"loss": 0.7482,
"step": 31
},
{
"epoch": 0.03,
"grad_norm": 1.2088730805217034,
"learning_rate": 4.998530967083664e-06,
"loss": 0.7687,
"step": 32
},
{
"epoch": 0.03,
"grad_norm": 1.1772131571685314,
"learning_rate": 4.998394398066477e-06,
"loss": 0.7336,
"step": 33
},
{
"epoch": 0.03,
"grad_norm": 1.0095424896414942,
"learning_rate": 4.998251761970997e-06,
"loss": 0.7454,
"step": 34
},
{
"epoch": 0.03,
"grad_norm": 1.058399487048206,
"learning_rate": 4.998103059143599e-06,
"loss": 0.7424,
"step": 35
},
{
"epoch": 0.04,
"grad_norm": 1.0553895642417541,
"learning_rate": 4.997948289945392e-06,
"loss": 0.7034,
"step": 36
},
{
"epoch": 0.04,
"grad_norm": 0.9737916869473869,
"learning_rate": 4.997787454752217e-06,
"loss": 0.7225,
"step": 37
},
{
"epoch": 0.04,
"grad_norm": 1.0497906961287207,
"learning_rate": 4.997620553954645e-06,
"loss": 0.7545,
"step": 38
},
{
"epoch": 0.04,
"grad_norm": 1.0351771937970027,
"learning_rate": 4.9974475879579745e-06,
"loss": 0.6999,
"step": 39
},
{
"epoch": 0.04,
"grad_norm": 1.025178950396183,
"learning_rate": 4.9972685571822355e-06,
"loss": 0.6836,
"step": 40
},
{
"epoch": 0.04,
"grad_norm": 0.8990499683406185,
"learning_rate": 4.997083462062185e-06,
"loss": 0.749,
"step": 41
},
{
"epoch": 0.04,
"grad_norm": 1.1934033398652506,
"learning_rate": 4.996892303047306e-06,
"loss": 0.7118,
"step": 42
},
{
"epoch": 0.04,
"grad_norm": 1.0093747627659915,
"learning_rate": 4.9966950806018075e-06,
"loss": 0.6931,
"step": 43
},
{
"epoch": 0.04,
"grad_norm": 1.1259320175035152,
"learning_rate": 4.996491795204623e-06,
"loss": 0.7011,
"step": 44
},
{
"epoch": 0.04,
"grad_norm": 0.9340587879454492,
"learning_rate": 4.996282447349408e-06,
"loss": 0.7031,
"step": 45
},
{
"epoch": 0.05,
"grad_norm": 1.093497345220761,
"learning_rate": 4.996067037544542e-06,
"loss": 0.6984,
"step": 46
},
{
"epoch": 0.05,
"grad_norm": 0.9253425670099831,
"learning_rate": 4.9958455663131235e-06,
"loss": 0.7211,
"step": 47
},
{
"epoch": 0.05,
"grad_norm": 1.1321089016179777,
"learning_rate": 4.99561803419297e-06,
"loss": 0.6809,
"step": 48
},
{
"epoch": 0.05,
"grad_norm": 0.9225335387284348,
"learning_rate": 4.995384441736622e-06,
"loss": 0.6926,
"step": 49
},
{
"epoch": 0.05,
"grad_norm": 1.03537386787448,
"learning_rate": 4.9951447895113305e-06,
"loss": 0.6868,
"step": 50
},
{
"epoch": 0.05,
"grad_norm": 0.9586247752600078,
"learning_rate": 4.994899078099064e-06,
"loss": 0.6973,
"step": 51
},
{
"epoch": 0.05,
"grad_norm": 1.164879075983742,
"learning_rate": 4.994647308096509e-06,
"loss": 0.6976,
"step": 52
},
{
"epoch": 0.05,
"grad_norm": 1.007151099232745,
"learning_rate": 4.994389480115059e-06,
"loss": 0.6728,
"step": 53
},
{
"epoch": 0.05,
"grad_norm": 1.194002279613801,
"learning_rate": 4.9941255947808226e-06,
"loss": 0.7386,
"step": 54
},
{
"epoch": 0.05,
"grad_norm": 1.2903234642197774,
"learning_rate": 4.993855652734616e-06,
"loss": 0.6884,
"step": 55
},
{
"epoch": 0.06,
"grad_norm": 0.9696246122793456,
"learning_rate": 4.993579654631963e-06,
"loss": 0.7175,
"step": 56
},
{
"epoch": 0.06,
"grad_norm": 1.1155328763962573,
"learning_rate": 4.993297601143095e-06,
"loss": 0.7146,
"step": 57
},
{
"epoch": 0.06,
"grad_norm": 0.9599001918058567,
"learning_rate": 4.993009492952951e-06,
"loss": 0.7054,
"step": 58
},
{
"epoch": 0.06,
"grad_norm": 0.9180819748743216,
"learning_rate": 4.992715330761167e-06,
"loss": 0.6984,
"step": 59
},
{
"epoch": 0.06,
"grad_norm": 0.9088984413752035,
"learning_rate": 4.992415115282085e-06,
"loss": 0.7001,
"step": 60
},
{
"epoch": 0.06,
"grad_norm": 1.1141716959997088,
"learning_rate": 4.992108847244746e-06,
"loss": 0.6756,
"step": 61
},
{
"epoch": 0.06,
"grad_norm": 0.8636787369748702,
"learning_rate": 4.991796527392888e-06,
"loss": 0.7548,
"step": 62
},
{
"epoch": 0.06,
"grad_norm": 1.0780460703120494,
"learning_rate": 4.991478156484945e-06,
"loss": 0.7039,
"step": 63
},
{
"epoch": 0.06,
"grad_norm": 0.9164733455688905,
"learning_rate": 4.9911537352940485e-06,
"loss": 0.7385,
"step": 64
},
{
"epoch": 0.06,
"grad_norm": 1.0268015729747888,
"learning_rate": 4.990823264608017e-06,
"loss": 0.7283,
"step": 65
},
{
"epoch": 0.07,
"grad_norm": 0.9924581532223246,
"learning_rate": 4.990486745229364e-06,
"loss": 0.7315,
"step": 66
},
{
"epoch": 0.07,
"grad_norm": 0.953526608505078,
"learning_rate": 4.99014417797529e-06,
"loss": 0.7194,
"step": 67
},
{
"epoch": 0.07,
"grad_norm": 0.9721089885130799,
"learning_rate": 4.9897955636776824e-06,
"loss": 0.6786,
"step": 68
},
{
"epoch": 0.07,
"grad_norm": 1.095580212055755,
"learning_rate": 4.989440903183112e-06,
"loss": 0.6547,
"step": 69
},
{
"epoch": 0.07,
"grad_norm": 1.0176000454229972,
"learning_rate": 4.989080197352834e-06,
"loss": 0.7024,
"step": 70
},
{
"epoch": 0.07,
"grad_norm": 0.9764964284876755,
"learning_rate": 4.988713447062784e-06,
"loss": 0.6897,
"step": 71
},
{
"epoch": 0.07,
"grad_norm": 0.9720697960414958,
"learning_rate": 4.9883406532035735e-06,
"loss": 0.7368,
"step": 72
},
{
"epoch": 0.07,
"grad_norm": 0.8924650786548242,
"learning_rate": 4.987961816680493e-06,
"loss": 0.6986,
"step": 73
},
{
"epoch": 0.07,
"grad_norm": 1.0583230570989346,
"learning_rate": 4.987576938413504e-06,
"loss": 0.6488,
"step": 74
},
{
"epoch": 0.07,
"grad_norm": 0.8532905196520026,
"learning_rate": 4.9871860193372424e-06,
"loss": 0.6545,
"step": 75
},
{
"epoch": 0.08,
"grad_norm": 1.110205997163892,
"learning_rate": 4.9867890604010125e-06,
"loss": 0.7296,
"step": 76
},
{
"epoch": 0.08,
"grad_norm": 0.8585453329590255,
"learning_rate": 4.986386062568787e-06,
"loss": 0.6478,
"step": 77
},
{
"epoch": 0.08,
"grad_norm": 1.072721980278167,
"learning_rate": 4.9859770268192e-06,
"loss": 0.683,
"step": 78
},
{
"epoch": 0.08,
"grad_norm": 0.9114010594463168,
"learning_rate": 4.985561954145552e-06,
"loss": 0.7278,
"step": 79
},
{
"epoch": 0.08,
"grad_norm": 0.9407855861796833,
"learning_rate": 4.985140845555799e-06,
"loss": 0.6767,
"step": 80
},
{
"epoch": 0.08,
"grad_norm": 0.9716353726299576,
"learning_rate": 4.984713702072561e-06,
"loss": 0.7058,
"step": 81
},
{
"epoch": 0.08,
"grad_norm": 0.9289919706551977,
"learning_rate": 4.984280524733107e-06,
"loss": 0.7465,
"step": 82
},
{
"epoch": 0.08,
"grad_norm": 0.9719373864834964,
"learning_rate": 4.9838413145893615e-06,
"loss": 0.7173,
"step": 83
},
{
"epoch": 0.08,
"grad_norm": 0.8656006720690229,
"learning_rate": 4.983396072707898e-06,
"loss": 0.7138,
"step": 84
},
{
"epoch": 0.08,
"grad_norm": 0.9405397793417023,
"learning_rate": 4.982944800169939e-06,
"loss": 0.7172,
"step": 85
},
{
"epoch": 0.08,
"grad_norm": 0.8968931033898299,
"learning_rate": 4.982487498071349e-06,
"loss": 0.6959,
"step": 86
},
{
"epoch": 0.09,
"grad_norm": 0.969922689772122,
"learning_rate": 4.982024167522638e-06,
"loss": 0.706,
"step": 87
},
{
"epoch": 0.09,
"grad_norm": 0.8691971891272707,
"learning_rate": 4.981554809648952e-06,
"loss": 0.7156,
"step": 88
},
{
"epoch": 0.09,
"grad_norm": 0.9182420056058627,
"learning_rate": 4.981079425590078e-06,
"loss": 0.6694,
"step": 89
},
{
"epoch": 0.09,
"grad_norm": 0.9014669660783321,
"learning_rate": 4.980598016500431e-06,
"loss": 0.6842,
"step": 90
},
{
"epoch": 0.09,
"grad_norm": 0.8598791805359901,
"learning_rate": 4.980110583549062e-06,
"loss": 0.6707,
"step": 91
},
{
"epoch": 0.09,
"grad_norm": 0.8569795805490359,
"learning_rate": 4.97961712791965e-06,
"loss": 0.6637,
"step": 92
},
{
"epoch": 0.09,
"grad_norm": 0.8939370143328529,
"learning_rate": 4.979117650810495e-06,
"loss": 0.6927,
"step": 93
},
{
"epoch": 0.09,
"grad_norm": 0.9152075194521869,
"learning_rate": 4.978612153434527e-06,
"loss": 0.6803,
"step": 94
},
{
"epoch": 0.09,
"grad_norm": 0.9298563182624136,
"learning_rate": 4.978100637019287e-06,
"loss": 0.6776,
"step": 95
},
{
"epoch": 0.09,
"grad_norm": 0.9132188893536833,
"learning_rate": 4.97758310280694e-06,
"loss": 0.7032,
"step": 96
},
{
"epoch": 0.1,
"grad_norm": 0.9574161232781394,
"learning_rate": 4.97705955205426e-06,
"loss": 0.6731,
"step": 97
},
{
"epoch": 0.1,
"grad_norm": 0.8327779171567138,
"learning_rate": 4.976529986032632e-06,
"loss": 0.6754,
"step": 98
},
{
"epoch": 0.1,
"grad_norm": 1.1175664554073028,
"learning_rate": 4.975994406028051e-06,
"loss": 0.6774,
"step": 99
},
{
"epoch": 0.1,
"grad_norm": 0.9122529617157105,
"learning_rate": 4.9754528133411144e-06,
"loss": 0.6905,
"step": 100
},
{
"epoch": 0.1,
"grad_norm": 0.9495918841010983,
"learning_rate": 4.97490520928702e-06,
"loss": 0.6683,
"step": 101
},
{
"epoch": 0.1,
"grad_norm": 0.9199809745368361,
"learning_rate": 4.9743515951955655e-06,
"loss": 0.6512,
"step": 102
},
{
"epoch": 0.1,
"grad_norm": 0.9371967382154123,
"learning_rate": 4.973791972411142e-06,
"loss": 0.7004,
"step": 103
},
{
"epoch": 0.1,
"grad_norm": 1.0249455893660548,
"learning_rate": 4.973226342292732e-06,
"loss": 0.6902,
"step": 104
},
{
"epoch": 0.1,
"grad_norm": 1.007634999853075,
"learning_rate": 4.972654706213906e-06,
"loss": 0.712,
"step": 105
},
{
"epoch": 0.1,
"grad_norm": 0.9151412270511807,
"learning_rate": 4.9720770655628216e-06,
"loss": 0.689,
"step": 106
},
{
"epoch": 0.11,
"grad_norm": 0.9263042922044208,
"learning_rate": 4.971493421742215e-06,
"loss": 0.7188,
"step": 107
},
{
"epoch": 0.11,
"grad_norm": 0.9875225370080567,
"learning_rate": 4.970903776169403e-06,
"loss": 0.7313,
"step": 108
},
{
"epoch": 0.11,
"grad_norm": 0.9329951904410932,
"learning_rate": 4.970308130276273e-06,
"loss": 0.6473,
"step": 109
},
{
"epoch": 0.11,
"grad_norm": 0.952122809501081,
"learning_rate": 4.969706485509287e-06,
"loss": 0.6866,
"step": 110
},
{
"epoch": 0.11,
"grad_norm": 0.8675487151632736,
"learning_rate": 4.969098843329475e-06,
"loss": 0.6621,
"step": 111
},
{
"epoch": 0.11,
"grad_norm": 1.0124220925484857,
"learning_rate": 4.968485205212429e-06,
"loss": 0.696,
"step": 112
},
{
"epoch": 0.11,
"grad_norm": 0.8692279553117837,
"learning_rate": 4.967865572648302e-06,
"loss": 0.6503,
"step": 113
},
{
"epoch": 0.11,
"grad_norm": 0.8684082104461566,
"learning_rate": 4.967239947141803e-06,
"loss": 0.717,
"step": 114
},
{
"epoch": 0.11,
"grad_norm": 0.9363131782505817,
"learning_rate": 4.966608330212198e-06,
"loss": 0.67,
"step": 115
},
{
"epoch": 0.11,
"grad_norm": 0.9483280670290893,
"learning_rate": 4.965970723393297e-06,
"loss": 0.6645,
"step": 116
},
{
"epoch": 0.12,
"grad_norm": 0.9645550818323707,
"learning_rate": 4.965327128233462e-06,
"loss": 0.6402,
"step": 117
},
{
"epoch": 0.12,
"grad_norm": 0.9020494921501199,
"learning_rate": 4.96467754629559e-06,
"loss": 0.6848,
"step": 118
},
{
"epoch": 0.12,
"grad_norm": 0.9273157548895928,
"learning_rate": 4.964021979157122e-06,
"loss": 0.6888,
"step": 119
},
{
"epoch": 0.12,
"grad_norm": 1.0083484650433756,
"learning_rate": 4.963360428410031e-06,
"loss": 0.7223,
"step": 120
},
{
"epoch": 0.12,
"grad_norm": 0.9556311862868435,
"learning_rate": 4.96269289566082e-06,
"loss": 0.6924,
"step": 121
},
{
"epoch": 0.12,
"grad_norm": 0.8565613256126575,
"learning_rate": 4.962019382530521e-06,
"loss": 0.6551,
"step": 122
},
{
"epoch": 0.12,
"grad_norm": 1.0082880819703033,
"learning_rate": 4.961339890654685e-06,
"loss": 0.6208,
"step": 123
},
{
"epoch": 0.12,
"grad_norm": 1.0159617953712363,
"learning_rate": 4.9606544216833865e-06,
"loss": 0.7223,
"step": 124
},
{
"epoch": 0.12,
"grad_norm": 0.9941249295005459,
"learning_rate": 4.95996297728121e-06,
"loss": 0.6853,
"step": 125
},
{
"epoch": 0.12,
"grad_norm": 0.980987656479997,
"learning_rate": 4.959265559127253e-06,
"loss": 0.6531,
"step": 126
},
{
"epoch": 0.13,
"grad_norm": 0.8906826655489122,
"learning_rate": 4.958562168915121e-06,
"loss": 0.6822,
"step": 127
},
{
"epoch": 0.13,
"grad_norm": 0.9447717033103807,
"learning_rate": 4.95785280835292e-06,
"loss": 0.6735,
"step": 128
},
{
"epoch": 0.13,
"grad_norm": 0.9322226016156214,
"learning_rate": 4.957137479163253e-06,
"loss": 0.6853,
"step": 129
},
{
"epoch": 0.13,
"grad_norm": 0.9016538169637961,
"learning_rate": 4.9564161830832214e-06,
"loss": 0.6546,
"step": 130
},
{
"epoch": 0.13,
"grad_norm": 0.9425807696655617,
"learning_rate": 4.955688921864411e-06,
"loss": 0.6874,
"step": 131
},
{
"epoch": 0.13,
"grad_norm": 0.939091235820591,
"learning_rate": 4.954955697272899e-06,
"loss": 0.7002,
"step": 132
},
{
"epoch": 0.13,
"grad_norm": 0.8787416221103219,
"learning_rate": 4.954216511089242e-06,
"loss": 0.7502,
"step": 133
},
{
"epoch": 0.13,
"grad_norm": 0.9742122954687594,
"learning_rate": 4.95347136510847e-06,
"loss": 0.6657,
"step": 134
},
{
"epoch": 0.13,
"grad_norm": 0.8801095585358768,
"learning_rate": 4.95272026114009e-06,
"loss": 0.6555,
"step": 135
},
{
"epoch": 0.13,
"grad_norm": 0.9481198118358727,
"learning_rate": 4.9519632010080765e-06,
"loss": 0.6737,
"step": 136
},
{
"epoch": 0.14,
"grad_norm": 0.9764499477724559,
"learning_rate": 4.951200186550868e-06,
"loss": 0.6616,
"step": 137
},
{
"epoch": 0.14,
"grad_norm": 0.9677532996854814,
"learning_rate": 4.9504312196213596e-06,
"loss": 0.6604,
"step": 138
},
{
"epoch": 0.14,
"grad_norm": 0.9262840329326826,
"learning_rate": 4.949656302086907e-06,
"loss": 0.6634,
"step": 139
},
{
"epoch": 0.14,
"grad_norm": 0.9913169871400999,
"learning_rate": 4.94887543582931e-06,
"loss": 0.6834,
"step": 140
},
{
"epoch": 0.14,
"grad_norm": 1.037107833577886,
"learning_rate": 4.948088622744819e-06,
"loss": 0.7047,
"step": 141
},
{
"epoch": 0.14,
"grad_norm": 1.0270598791002519,
"learning_rate": 4.947295864744121e-06,
"loss": 0.6637,
"step": 142
},
{
"epoch": 0.14,
"grad_norm": 0.9780188865838979,
"learning_rate": 4.9464971637523465e-06,
"loss": 0.7196,
"step": 143
},
{
"epoch": 0.14,
"grad_norm": 0.9564318039750372,
"learning_rate": 4.94569252170905e-06,
"loss": 0.661,
"step": 144
},
{
"epoch": 0.14,
"grad_norm": 1.070366815703729,
"learning_rate": 4.944881940568219e-06,
"loss": 0.7288,
"step": 145
},
{
"epoch": 0.14,
"grad_norm": 0.951311961745425,
"learning_rate": 4.944065422298262e-06,
"loss": 0.6846,
"step": 146
},
{
"epoch": 0.15,
"grad_norm": 0.9924454965931206,
"learning_rate": 4.943242968882002e-06,
"loss": 0.6382,
"step": 147
},
{
"epoch": 0.15,
"grad_norm": 0.899515518807678,
"learning_rate": 4.942414582316679e-06,
"loss": 0.6949,
"step": 148
},
{
"epoch": 0.15,
"grad_norm": 0.9872873524132176,
"learning_rate": 4.94158026461394e-06,
"loss": 0.6765,
"step": 149
},
{
"epoch": 0.15,
"grad_norm": 0.8805014666801008,
"learning_rate": 4.9407400177998335e-06,
"loss": 0.6288,
"step": 150
},
{
"epoch": 0.15,
"grad_norm": 0.9546823658223654,
"learning_rate": 4.939893843914808e-06,
"loss": 0.6749,
"step": 151
},
{
"epoch": 0.15,
"grad_norm": 0.9963934911162335,
"learning_rate": 4.939041745013703e-06,
"loss": 0.6433,
"step": 152
},
{
"epoch": 0.15,
"grad_norm": 0.9708720605788677,
"learning_rate": 4.93818372316575e-06,
"loss": 0.6774,
"step": 153
},
{
"epoch": 0.15,
"grad_norm": 0.9473055224950518,
"learning_rate": 4.937319780454559e-06,
"loss": 0.685,
"step": 154
},
{
"epoch": 0.15,
"grad_norm": 0.9819312296607249,
"learning_rate": 4.936449918978123e-06,
"loss": 0.6611,
"step": 155
},
{
"epoch": 0.15,
"grad_norm": 0.9603793426190534,
"learning_rate": 4.9355741408488045e-06,
"loss": 0.6939,
"step": 156
},
{
"epoch": 0.15,
"grad_norm": 1.123192378969838,
"learning_rate": 4.9346924481933345e-06,
"loss": 0.6596,
"step": 157
},
{
"epoch": 0.16,
"grad_norm": 1.049989264664389,
"learning_rate": 4.933804843152808e-06,
"loss": 0.6664,
"step": 158
},
{
"epoch": 0.16,
"grad_norm": 0.9287929732517263,
"learning_rate": 4.932911327882679e-06,
"loss": 0.6401,
"step": 159
},
{
"epoch": 0.16,
"grad_norm": 0.913529924198974,
"learning_rate": 4.932011904552749e-06,
"loss": 0.6525,
"step": 160
},
{
"epoch": 0.16,
"grad_norm": 0.8908499663624638,
"learning_rate": 4.931106575347171e-06,
"loss": 0.6781,
"step": 161
},
{
"epoch": 0.16,
"grad_norm": 0.8871778748426131,
"learning_rate": 4.930195342464437e-06,
"loss": 0.6816,
"step": 162
},
{
"epoch": 0.16,
"grad_norm": 0.8819674351946986,
"learning_rate": 4.929278208117378e-06,
"loss": 0.6153,
"step": 163
},
{
"epoch": 0.16,
"grad_norm": 0.9892043785009771,
"learning_rate": 4.928355174533153e-06,
"loss": 0.6716,
"step": 164
},
{
"epoch": 0.16,
"grad_norm": 0.9215453869699415,
"learning_rate": 4.927426243953252e-06,
"loss": 0.6784,
"step": 165
},
{
"epoch": 0.16,
"grad_norm": 0.9652774517156478,
"learning_rate": 4.9264914186334775e-06,
"loss": 0.7372,
"step": 166
},
{
"epoch": 0.16,
"grad_norm": 0.8716382127920056,
"learning_rate": 4.925550700843953e-06,
"loss": 0.6594,
"step": 167
},
{
"epoch": 0.17,
"grad_norm": 1.02563864851558,
"learning_rate": 4.924604092869109e-06,
"loss": 0.6678,
"step": 168
},
{
"epoch": 0.17,
"grad_norm": 0.980206197810093,
"learning_rate": 4.923651597007679e-06,
"loss": 0.7016,
"step": 169
},
{
"epoch": 0.17,
"grad_norm": 0.9257361907115577,
"learning_rate": 4.922693215572695e-06,
"loss": 0.6537,
"step": 170
},
{
"epoch": 0.17,
"grad_norm": 0.9407972324104933,
"learning_rate": 4.9217289508914836e-06,
"loss": 0.6906,
"step": 171
},
{
"epoch": 0.17,
"grad_norm": 0.953792733030727,
"learning_rate": 4.920758805305654e-06,
"loss": 0.6874,
"step": 172
},
{
"epoch": 0.17,
"grad_norm": 0.9679520423391571,
"learning_rate": 4.919782781171101e-06,
"loss": 0.7057,
"step": 173
},
{
"epoch": 0.17,
"grad_norm": 0.8643570019367653,
"learning_rate": 4.918800880857991e-06,
"loss": 0.6224,
"step": 174
},
{
"epoch": 0.17,
"grad_norm": 0.9696150692028724,
"learning_rate": 4.917813106750763e-06,
"loss": 0.6623,
"step": 175
},
{
"epoch": 0.17,
"grad_norm": 0.9068337402094646,
"learning_rate": 4.916819461248119e-06,
"loss": 0.6756,
"step": 176
},
{
"epoch": 0.17,
"grad_norm": 0.9032967004326324,
"learning_rate": 4.915819946763017e-06,
"loss": 0.6624,
"step": 177
},
{
"epoch": 0.18,
"grad_norm": 0.9042931017160107,
"learning_rate": 4.914814565722671e-06,
"loss": 0.6711,
"step": 178
},
{
"epoch": 0.18,
"grad_norm": 0.8611902344265154,
"learning_rate": 4.913803320568538e-06,
"loss": 0.6652,
"step": 179
},
{
"epoch": 0.18,
"grad_norm": 1.1208348114642743,
"learning_rate": 4.912786213756316e-06,
"loss": 0.7054,
"step": 180
},
{
"epoch": 0.18,
"grad_norm": 0.8514149510371133,
"learning_rate": 4.91176324775594e-06,
"loss": 0.6777,
"step": 181
},
{
"epoch": 0.18,
"grad_norm": 1.0707648844359314,
"learning_rate": 4.91073442505157e-06,
"loss": 0.6793,
"step": 182
},
{
"epoch": 0.18,
"grad_norm": 1.007444952147854,
"learning_rate": 4.9096997481415885e-06,
"loss": 0.6532,
"step": 183
},
{
"epoch": 0.18,
"grad_norm": 0.8430194319590454,
"learning_rate": 4.908659219538598e-06,
"loss": 0.6532,
"step": 184
},
{
"epoch": 0.18,
"grad_norm": 0.9867088678023405,
"learning_rate": 4.907612841769407e-06,
"loss": 0.7107,
"step": 185
},
{
"epoch": 0.18,
"grad_norm": 0.9369756600022385,
"learning_rate": 4.90656061737503e-06,
"loss": 0.7306,
"step": 186
},
{
"epoch": 0.18,
"grad_norm": 0.8672758137612632,
"learning_rate": 4.905502548910681e-06,
"loss": 0.6412,
"step": 187
},
{
"epoch": 0.19,
"grad_norm": 0.8706341698734509,
"learning_rate": 4.904438638945761e-06,
"loss": 0.7022,
"step": 188
},
{
"epoch": 0.19,
"grad_norm": 0.9114920575560276,
"learning_rate": 4.903368890063861e-06,
"loss": 0.6328,
"step": 189
},
{
"epoch": 0.19,
"grad_norm": 1.0338522059220963,
"learning_rate": 4.9022933048627496e-06,
"loss": 0.7328,
"step": 190
},
{
"epoch": 0.19,
"grad_norm": 0.8878198226644961,
"learning_rate": 4.901211885954367e-06,
"loss": 0.6687,
"step": 191
},
{
"epoch": 0.19,
"grad_norm": 0.9423243153918482,
"learning_rate": 4.900124635964823e-06,
"loss": 0.6604,
"step": 192
},
{
"epoch": 0.19,
"grad_norm": 0.9129637670735964,
"learning_rate": 4.899031557534383e-06,
"loss": 0.6731,
"step": 193
},
{
"epoch": 0.19,
"grad_norm": 0.9896776529578333,
"learning_rate": 4.8979326533174696e-06,
"loss": 0.6608,
"step": 194
},
{
"epoch": 0.19,
"grad_norm": 0.8896434182833951,
"learning_rate": 4.896827925982654e-06,
"loss": 0.7074,
"step": 195
},
{
"epoch": 0.19,
"grad_norm": 0.8194553474739297,
"learning_rate": 4.895717378212644e-06,
"loss": 0.6335,
"step": 196
},
{
"epoch": 0.19,
"grad_norm": 0.9346190741848343,
"learning_rate": 4.894601012704284e-06,
"loss": 0.7068,
"step": 197
},
{
"epoch": 0.2,
"grad_norm": 0.874199819168889,
"learning_rate": 4.893478832168545e-06,
"loss": 0.6641,
"step": 198
},
{
"epoch": 0.2,
"grad_norm": 0.9159502482028847,
"learning_rate": 4.8923508393305224e-06,
"loss": 0.6731,
"step": 199
},
{
"epoch": 0.2,
"grad_norm": 0.8558744309352562,
"learning_rate": 4.891217036929422e-06,
"loss": 0.6083,
"step": 200
},
{
"epoch": 0.2,
"grad_norm": 0.936605889796404,
"learning_rate": 4.89007742771856e-06,
"loss": 0.6863,
"step": 201
},
{
"epoch": 0.2,
"grad_norm": 0.8664253450652424,
"learning_rate": 4.8889320144653525e-06,
"loss": 0.6693,
"step": 202
},
{
"epoch": 0.2,
"grad_norm": 0.859861919600542,
"learning_rate": 4.887780799951311e-06,
"loss": 0.63,
"step": 203
},
{
"epoch": 0.2,
"grad_norm": 0.8390583343121363,
"learning_rate": 4.8866237869720334e-06,
"loss": 0.7058,
"step": 204
},
{
"epoch": 0.2,
"grad_norm": 0.9197736229674498,
"learning_rate": 4.885460978337201e-06,
"loss": 0.6782,
"step": 205
},
{
"epoch": 0.2,
"grad_norm": 0.9686243956595285,
"learning_rate": 4.884292376870567e-06,
"loss": 0.6619,
"step": 206
},
{
"epoch": 0.2,
"grad_norm": 0.8454295473139757,
"learning_rate": 4.883117985409954e-06,
"loss": 0.6551,
"step": 207
},
{
"epoch": 0.21,
"grad_norm": 0.9658575404117379,
"learning_rate": 4.881937806807241e-06,
"loss": 0.647,
"step": 208
},
{
"epoch": 0.21,
"grad_norm": 0.8533364242044601,
"learning_rate": 4.880751843928365e-06,
"loss": 0.6563,
"step": 209
},
{
"epoch": 0.21,
"grad_norm": 0.9045931704682527,
"learning_rate": 4.879560099653306e-06,
"loss": 0.6667,
"step": 210
},
{
"epoch": 0.21,
"grad_norm": 0.8723782063352707,
"learning_rate": 4.8783625768760865e-06,
"loss": 0.6425,
"step": 211
},
{
"epoch": 0.21,
"grad_norm": 0.8891929748147736,
"learning_rate": 4.877159278504759e-06,
"loss": 0.638,
"step": 212
},
{
"epoch": 0.21,
"grad_norm": 0.8842932376273528,
"learning_rate": 4.875950207461403e-06,
"loss": 0.6437,
"step": 213
},
{
"epoch": 0.21,
"grad_norm": 0.8634008607871101,
"learning_rate": 4.8747353666821155e-06,
"loss": 0.6229,
"step": 214
},
{
"epoch": 0.21,
"grad_norm": 0.8749107029341481,
"learning_rate": 4.873514759117004e-06,
"loss": 0.6785,
"step": 215
},
{
"epoch": 0.21,
"grad_norm": 0.8531375678504259,
"learning_rate": 4.872288387730182e-06,
"loss": 0.637,
"step": 216
},
{
"epoch": 0.21,
"grad_norm": 0.8582175373389599,
"learning_rate": 4.871056255499758e-06,
"loss": 0.6444,
"step": 217
},
{
"epoch": 0.22,
"grad_norm": 1.0656353331347395,
"learning_rate": 4.86981836541783e-06,
"loss": 0.6756,
"step": 218
},
{
"epoch": 0.22,
"grad_norm": 0.8937004109826505,
"learning_rate": 4.8685747204904796e-06,
"loss": 0.652,
"step": 219
},
{
"epoch": 0.22,
"grad_norm": 0.9024529450351501,
"learning_rate": 4.867325323737765e-06,
"loss": 0.6606,
"step": 220
},
{
"epoch": 0.22,
"grad_norm": 0.847448497225188,
"learning_rate": 4.866070178193707e-06,
"loss": 0.6445,
"step": 221
},
{
"epoch": 0.22,
"grad_norm": 0.9790412024382392,
"learning_rate": 4.86480928690629e-06,
"loss": 0.6377,
"step": 222
},
{
"epoch": 0.22,
"grad_norm": 0.9166672541084665,
"learning_rate": 4.863542652937453e-06,
"loss": 0.6568,
"step": 223
},
{
"epoch": 0.22,
"grad_norm": 0.9053859567083279,
"learning_rate": 4.862270279363076e-06,
"loss": 0.6729,
"step": 224
},
{
"epoch": 0.22,
"grad_norm": 0.8963082010943075,
"learning_rate": 4.860992169272981e-06,
"loss": 0.7187,
"step": 225
},
{
"epoch": 0.22,
"grad_norm": 0.8625928467278408,
"learning_rate": 4.859708325770919e-06,
"loss": 0.6569,
"step": 226
},
{
"epoch": 0.22,
"grad_norm": 0.9572711715249694,
"learning_rate": 4.858418751974564e-06,
"loss": 0.6858,
"step": 227
},
{
"epoch": 0.23,
"grad_norm": 0.9116206947499983,
"learning_rate": 4.857123451015503e-06,
"loss": 0.6341,
"step": 228
},
{
"epoch": 0.23,
"grad_norm": 0.9469293485174248,
"learning_rate": 4.855822426039236e-06,
"loss": 0.6475,
"step": 229
},
{
"epoch": 0.23,
"grad_norm": 0.9320786521604542,
"learning_rate": 4.854515680205159e-06,
"loss": 0.7021,
"step": 230
},
{
"epoch": 0.23,
"grad_norm": 1.038184288949622,
"learning_rate": 4.853203216686562e-06,
"loss": 0.5999,
"step": 231
},
{
"epoch": 0.23,
"grad_norm": 0.904022305585284,
"learning_rate": 4.851885038670618e-06,
"loss": 0.6555,
"step": 232
},
{
"epoch": 0.23,
"grad_norm": 1.323477712215313,
"learning_rate": 4.8505611493583815e-06,
"loss": 0.6297,
"step": 233
},
{
"epoch": 0.23,
"grad_norm": 1.2176660817908431,
"learning_rate": 4.849231551964771e-06,
"loss": 0.6771,
"step": 234
},
{
"epoch": 0.23,
"grad_norm": 0.8930100103847493,
"learning_rate": 4.84789624971857e-06,
"loss": 0.6296,
"step": 235
},
{
"epoch": 0.23,
"grad_norm": 1.0104643222544036,
"learning_rate": 4.846555245862413e-06,
"loss": 0.6329,
"step": 236
},
{
"epoch": 0.23,
"grad_norm": 1.0006315975644342,
"learning_rate": 4.845208543652783e-06,
"loss": 0.6448,
"step": 237
},
{
"epoch": 0.23,
"grad_norm": 0.9719470767684136,
"learning_rate": 4.843856146359999e-06,
"loss": 0.6538,
"step": 238
},
{
"epoch": 0.24,
"grad_norm": 1.025734064898448,
"learning_rate": 4.842498057268209e-06,
"loss": 0.6335,
"step": 239
},
{
"epoch": 0.24,
"grad_norm": 0.8825687655634383,
"learning_rate": 4.841134279675386e-06,
"loss": 0.6728,
"step": 240
},
{
"epoch": 0.24,
"grad_norm": 0.9411597091872842,
"learning_rate": 4.839764816893315e-06,
"loss": 0.6756,
"step": 241
},
{
"epoch": 0.24,
"grad_norm": 0.8838664041310971,
"learning_rate": 4.838389672247585e-06,
"loss": 0.6593,
"step": 242
},
{
"epoch": 0.24,
"grad_norm": 0.8745260448073756,
"learning_rate": 4.837008849077588e-06,
"loss": 0.6397,
"step": 243
},
{
"epoch": 0.24,
"grad_norm": 0.8847557689287797,
"learning_rate": 4.835622350736499e-06,
"loss": 0.659,
"step": 244
},
{
"epoch": 0.24,
"grad_norm": 0.875366668905105,
"learning_rate": 4.8342301805912815e-06,
"loss": 0.627,
"step": 245
},
{
"epoch": 0.24,
"grad_norm": 0.8537326681265562,
"learning_rate": 4.832832342022666e-06,
"loss": 0.6477,
"step": 246
},
{
"epoch": 0.24,
"grad_norm": 0.8593654316451845,
"learning_rate": 4.831428838425153e-06,
"loss": 0.6698,
"step": 247
},
{
"epoch": 0.24,
"grad_norm": 0.9220753709893383,
"learning_rate": 4.830019673206997e-06,
"loss": 0.6477,
"step": 248
},
{
"epoch": 0.25,
"grad_norm": 0.8570602383594389,
"learning_rate": 4.828604849790201e-06,
"loss": 0.6815,
"step": 249
},
{
"epoch": 0.25,
"grad_norm": 0.9015787695209869,
"learning_rate": 4.827184371610511e-06,
"loss": 0.6293,
"step": 250
},
{
"epoch": 0.25,
"grad_norm": 0.900429307913389,
"learning_rate": 4.8257582421174025e-06,
"loss": 0.6799,
"step": 251
},
{
"epoch": 0.25,
"grad_norm": 0.8833032292011391,
"learning_rate": 4.824326464774076e-06,
"loss": 0.6665,
"step": 252
},
{
"epoch": 0.25,
"grad_norm": 0.9806435575510756,
"learning_rate": 4.822889043057446e-06,
"loss": 0.6828,
"step": 253
},
{
"epoch": 0.25,
"grad_norm": 0.8273111947815865,
"learning_rate": 4.821445980458134e-06,
"loss": 0.6265,
"step": 254
},
{
"epoch": 0.25,
"grad_norm": 0.9867943428090185,
"learning_rate": 4.8199972804804615e-06,
"loss": 0.6856,
"step": 255
},
{
"epoch": 0.25,
"grad_norm": 0.941670204697157,
"learning_rate": 4.8185429466424375e-06,
"loss": 0.6241,
"step": 256
},
{
"epoch": 0.25,
"grad_norm": 0.950190491805473,
"learning_rate": 4.817082982475753e-06,
"loss": 0.6733,
"step": 257
},
{
"epoch": 0.25,
"grad_norm": 0.9059424297489737,
"learning_rate": 4.815617391525772e-06,
"loss": 0.6717,
"step": 258
},
{
"epoch": 0.26,
"grad_norm": 0.833767481386628,
"learning_rate": 4.814146177351523e-06,
"loss": 0.6167,
"step": 259
},
{
"epoch": 0.26,
"grad_norm": 0.9120074119227388,
"learning_rate": 4.812669343525688e-06,
"loss": 0.6383,
"step": 260
},
{
"epoch": 0.26,
"grad_norm": 1.0846687157648525,
"learning_rate": 4.811186893634597e-06,
"loss": 0.6542,
"step": 261
},
{
"epoch": 0.26,
"grad_norm": 0.9273661121621178,
"learning_rate": 4.809698831278217e-06,
"loss": 0.6658,
"step": 262
},
{
"epoch": 0.26,
"grad_norm": 0.8614935020984111,
"learning_rate": 4.808205160070147e-06,
"loss": 0.6632,
"step": 263
},
{
"epoch": 0.26,
"grad_norm": 0.8640864376511351,
"learning_rate": 4.806705883637604e-06,
"loss": 0.6714,
"step": 264
},
{
"epoch": 0.26,
"grad_norm": 0.8588959960471779,
"learning_rate": 4.8052010056214184e-06,
"loss": 0.6365,
"step": 265
},
{
"epoch": 0.26,
"grad_norm": 0.9735734771646465,
"learning_rate": 4.80369052967602e-06,
"loss": 0.6939,
"step": 266
},
{
"epoch": 0.26,
"grad_norm": 0.9404781469926247,
"learning_rate": 4.802174459469435e-06,
"loss": 0.6291,
"step": 267
},
{
"epoch": 0.26,
"grad_norm": 0.9663046015849157,
"learning_rate": 4.800652798683277e-06,
"loss": 0.6516,
"step": 268
},
{
"epoch": 0.27,
"grad_norm": 0.8807991294468791,
"learning_rate": 4.799125551012731e-06,
"loss": 0.6364,
"step": 269
},
{
"epoch": 0.27,
"grad_norm": 0.9527629477737198,
"learning_rate": 4.7975927201665515e-06,
"loss": 0.6298,
"step": 270
},
{
"epoch": 0.27,
"grad_norm": 0.8525983935565186,
"learning_rate": 4.796054309867053e-06,
"loss": 0.6371,
"step": 271
},
{
"epoch": 0.27,
"grad_norm": 0.9405625850498484,
"learning_rate": 4.794510323850096e-06,
"loss": 0.6264,
"step": 272
},
{
"epoch": 0.27,
"grad_norm": 0.9478829826012205,
"learning_rate": 4.79296076586508e-06,
"loss": 0.6328,
"step": 273
},
{
"epoch": 0.27,
"grad_norm": 0.9599057130243777,
"learning_rate": 4.791405639674941e-06,
"loss": 0.6151,
"step": 274
},
{
"epoch": 0.27,
"grad_norm": 0.959409086045542,
"learning_rate": 4.789844949056131e-06,
"loss": 0.6369,
"step": 275
},
{
"epoch": 0.27,
"grad_norm": 0.8492616257179605,
"learning_rate": 4.788278697798619e-06,
"loss": 0.6554,
"step": 276
},
{
"epoch": 0.27,
"grad_norm": 0.9806877124101798,
"learning_rate": 4.7867068897058725e-06,
"loss": 0.6579,
"step": 277
},
{
"epoch": 0.27,
"grad_norm": 0.9000450903810971,
"learning_rate": 4.785129528594858e-06,
"loss": 0.6235,
"step": 278
},
{
"epoch": 0.28,
"grad_norm": 0.9010530779092166,
"learning_rate": 4.783546618296025e-06,
"loss": 0.658,
"step": 279
},
{
"epoch": 0.28,
"grad_norm": 0.9784681165218017,
"learning_rate": 4.781958162653298e-06,
"loss": 0.6676,
"step": 280
},
{
"epoch": 0.28,
"grad_norm": 0.9576730133216942,
"learning_rate": 4.780364165524067e-06,
"loss": 0.6228,
"step": 281
},
{
"epoch": 0.28,
"grad_norm": 1.0020359961640437,
"learning_rate": 4.778764630779184e-06,
"loss": 0.647,
"step": 282
},
{
"epoch": 0.28,
"grad_norm": 0.8688473783118098,
"learning_rate": 4.77715956230294e-06,
"loss": 0.652,
"step": 283
},
{
"epoch": 0.28,
"grad_norm": 0.9278357926734184,
"learning_rate": 4.775548963993072e-06,
"loss": 0.6291,
"step": 284
},
{
"epoch": 0.28,
"grad_norm": 0.9690649138149913,
"learning_rate": 4.77393283976074e-06,
"loss": 0.6509,
"step": 285
},
{
"epoch": 0.28,
"grad_norm": 0.9407962797966478,
"learning_rate": 4.7723111935305275e-06,
"loss": 0.6197,
"step": 286
},
{
"epoch": 0.28,
"grad_norm": 0.9768285903734765,
"learning_rate": 4.770684029240425e-06,
"loss": 0.6518,
"step": 287
},
{
"epoch": 0.28,
"grad_norm": 1.0077965365925552,
"learning_rate": 4.769051350841822e-06,
"loss": 0.6195,
"step": 288
},
{
"epoch": 0.29,
"grad_norm": 0.9474683243935313,
"learning_rate": 4.767413162299501e-06,
"loss": 0.6504,
"step": 289
},
{
"epoch": 0.29,
"grad_norm": 0.8585900243531269,
"learning_rate": 4.765769467591626e-06,
"loss": 0.6539,
"step": 290
},
{
"epoch": 0.29,
"grad_norm": 0.8904877736571806,
"learning_rate": 4.764120270709727e-06,
"loss": 0.6321,
"step": 291
},
{
"epoch": 0.29,
"grad_norm": 0.8628282782617729,
"learning_rate": 4.7624655756587e-06,
"loss": 0.648,
"step": 292
},
{
"epoch": 0.29,
"grad_norm": 2.669416127497495,
"learning_rate": 4.760805386456793e-06,
"loss": 0.6594,
"step": 293
},
{
"epoch": 0.29,
"grad_norm": 0.8981381784754892,
"learning_rate": 4.759139707135592e-06,
"loss": 0.6769,
"step": 294
},
{
"epoch": 0.29,
"grad_norm": 0.7879080017640494,
"learning_rate": 4.757468541740019e-06,
"loss": 0.6281,
"step": 295
},
{
"epoch": 0.29,
"grad_norm": 0.8871289305729931,
"learning_rate": 4.755791894328317e-06,
"loss": 0.6321,
"step": 296
},
{
"epoch": 0.29,
"grad_norm": 0.8299591322141577,
"learning_rate": 4.75410976897204e-06,
"loss": 0.6505,
"step": 297
},
{
"epoch": 0.29,
"grad_norm": 0.8909590264299827,
"learning_rate": 4.752422169756048e-06,
"loss": 0.6811,
"step": 298
},
{
"epoch": 0.3,
"grad_norm": 0.9254976675249351,
"learning_rate": 4.75072910077849e-06,
"loss": 0.6545,
"step": 299
},
{
"epoch": 0.3,
"grad_norm": 0.8588476509107275,
"learning_rate": 4.7490305661508006e-06,
"loss": 0.667,
"step": 300
},
{
"epoch": 0.3,
"grad_norm": 0.8412766854913993,
"learning_rate": 4.747326569997684e-06,
"loss": 0.6003,
"step": 301
},
{
"epoch": 0.3,
"grad_norm": 0.8760795055942986,
"learning_rate": 4.74561711645711e-06,
"loss": 0.6364,
"step": 302
},
{
"epoch": 0.3,
"grad_norm": 0.83956409107638,
"learning_rate": 4.7439022096803024e-06,
"loss": 0.6575,
"step": 303
},
{
"epoch": 0.3,
"grad_norm": 0.8380872276529072,
"learning_rate": 4.742181853831721e-06,
"loss": 0.6424,
"step": 304
},
{
"epoch": 0.3,
"grad_norm": 0.8125631213297304,
"learning_rate": 4.740456053089065e-06,
"loss": 0.6663,
"step": 305
},
{
"epoch": 0.3,
"grad_norm": 0.8823004853380154,
"learning_rate": 4.7387248116432524e-06,
"loss": 0.6301,
"step": 306
},
{
"epoch": 0.3,
"grad_norm": 0.8640770572532905,
"learning_rate": 4.736988133698416e-06,
"loss": 0.6406,
"step": 307
},
{
"epoch": 0.3,
"grad_norm": 0.8823776031811036,
"learning_rate": 4.735246023471885e-06,
"loss": 0.6115,
"step": 308
},
{
"epoch": 0.3,
"grad_norm": 0.993013360555103,
"learning_rate": 4.733498485194188e-06,
"loss": 0.6643,
"step": 309
},
{
"epoch": 0.31,
"grad_norm": 0.8904288915714781,
"learning_rate": 4.731745523109029e-06,
"loss": 0.6555,
"step": 310
},
{
"epoch": 0.31,
"grad_norm": 0.8926370506160105,
"learning_rate": 4.729987141473286e-06,
"loss": 0.6601,
"step": 311
},
{
"epoch": 0.31,
"grad_norm": 0.953062196989694,
"learning_rate": 4.728223344556996e-06,
"loss": 0.6809,
"step": 312
},
{
"epoch": 0.31,
"grad_norm": 0.8761001219987443,
"learning_rate": 4.7264541366433495e-06,
"loss": 0.6833,
"step": 313
},
{
"epoch": 0.31,
"grad_norm": 0.9566539767394971,
"learning_rate": 4.724679522028672e-06,
"loss": 0.6385,
"step": 314
},
{
"epoch": 0.31,
"grad_norm": 0.9891726403966534,
"learning_rate": 4.722899505022424e-06,
"loss": 0.6999,
"step": 315
},
{
"epoch": 0.31,
"grad_norm": 0.8037801938204335,
"learning_rate": 4.721114089947181e-06,
"loss": 0.5879,
"step": 316
},
{
"epoch": 0.31,
"grad_norm": 0.9130617389052504,
"learning_rate": 4.71932328113863e-06,
"loss": 0.6373,
"step": 317
},
{
"epoch": 0.31,
"grad_norm": 0.8259173345318234,
"learning_rate": 4.717527082945555e-06,
"loss": 0.6019,
"step": 318
},
{
"epoch": 0.31,
"grad_norm": 0.8371433538017989,
"learning_rate": 4.715725499729826e-06,
"loss": 0.6329,
"step": 319
},
{
"epoch": 0.32,
"grad_norm": 0.9057753463438688,
"learning_rate": 4.713918535866392e-06,
"loss": 0.6281,
"step": 320
},
{
"epoch": 0.32,
"grad_norm": 0.9029311795134953,
"learning_rate": 4.712106195743269e-06,
"loss": 0.6324,
"step": 321
},
{
"epoch": 0.32,
"grad_norm": 0.8858712104718339,
"learning_rate": 4.710288483761524e-06,
"loss": 0.6032,
"step": 322
},
{
"epoch": 0.32,
"grad_norm": 0.8645912795937802,
"learning_rate": 4.708465404335277e-06,
"loss": 0.6165,
"step": 323
},
{
"epoch": 0.32,
"grad_norm": 0.8006257669108777,
"learning_rate": 4.706636961891673e-06,
"loss": 0.6829,
"step": 324
},
{
"epoch": 0.32,
"grad_norm": 0.8617001997741787,
"learning_rate": 4.704803160870888e-06,
"loss": 0.6338,
"step": 325
},
{
"epoch": 0.32,
"grad_norm": 0.8966975029556187,
"learning_rate": 4.702964005726106e-06,
"loss": 0.6447,
"step": 326
},
{
"epoch": 0.32,
"grad_norm": 1.0966911622953233,
"learning_rate": 4.701119500923516e-06,
"loss": 0.6652,
"step": 327
},
{
"epoch": 0.32,
"grad_norm": 0.882038752989712,
"learning_rate": 4.699269650942296e-06,
"loss": 0.6022,
"step": 328
},
{
"epoch": 0.32,
"grad_norm": 0.9811354762670036,
"learning_rate": 4.697414460274605e-06,
"loss": 0.6672,
"step": 329
},
{
"epoch": 0.33,
"grad_norm": 0.8694666503441387,
"learning_rate": 4.6955539334255714e-06,
"loss": 0.6327,
"step": 330
},
{
"epoch": 0.33,
"grad_norm": 0.8671894922067316,
"learning_rate": 4.693688074913282e-06,
"loss": 0.6306,
"step": 331
},
{
"epoch": 0.33,
"grad_norm": 0.8737657642067588,
"learning_rate": 4.69181688926877e-06,
"loss": 0.6059,
"step": 332
},
{
"epoch": 0.33,
"grad_norm": 0.9300407192703628,
"learning_rate": 4.689940381036005e-06,
"loss": 0.6382,
"step": 333
},
{
"epoch": 0.33,
"grad_norm": 0.836435132546773,
"learning_rate": 4.6880585547718845e-06,
"loss": 0.6493,
"step": 334
},
{
"epoch": 0.33,
"grad_norm": 0.858710237692544,
"learning_rate": 4.686171415046217e-06,
"loss": 0.6392,
"step": 335
},
{
"epoch": 0.33,
"grad_norm": 0.9070208021531481,
"learning_rate": 4.684278966441716e-06,
"loss": 0.6074,
"step": 336
},
{
"epoch": 0.33,
"grad_norm": 0.8430640165966782,
"learning_rate": 4.682381213553986e-06,
"loss": 0.6386,
"step": 337
},
{
"epoch": 0.33,
"grad_norm": 0.9181203953874308,
"learning_rate": 4.680478160991514e-06,
"loss": 0.6246,
"step": 338
},
{
"epoch": 0.33,
"grad_norm": 0.8224529815938104,
"learning_rate": 4.678569813375654e-06,
"loss": 0.6367,
"step": 339
},
{
"epoch": 0.34,
"grad_norm": 0.8840051978175106,
"learning_rate": 4.676656175340621e-06,
"loss": 0.6802,
"step": 340
},
{
"epoch": 0.34,
"grad_norm": 0.851500472699274,
"learning_rate": 4.674737251533476e-06,
"loss": 0.6415,
"step": 341
},
{
"epoch": 0.34,
"grad_norm": 0.9237880941549678,
"learning_rate": 4.672813046614116e-06,
"loss": 0.6324,
"step": 342
},
{
"epoch": 0.34,
"grad_norm": 0.835807900767003,
"learning_rate": 4.670883565255264e-06,
"loss": 0.6196,
"step": 343
},
{
"epoch": 0.34,
"grad_norm": 0.8636603022537266,
"learning_rate": 4.668948812142454e-06,
"loss": 0.6054,
"step": 344
},
{
"epoch": 0.34,
"grad_norm": 0.8467987581796347,
"learning_rate": 4.6670087919740224e-06,
"loss": 0.6364,
"step": 345
},
{
"epoch": 0.34,
"grad_norm": 0.7976168655689396,
"learning_rate": 4.665063509461098e-06,
"loss": 0.6407,
"step": 346
},
{
"epoch": 0.34,
"grad_norm": 0.8787272073060952,
"learning_rate": 4.663112969327584e-06,
"loss": 0.6275,
"step": 347
},
{
"epoch": 0.34,
"grad_norm": 0.9257599968777892,
"learning_rate": 4.661157176310159e-06,
"loss": 0.6561,
"step": 348
},
{
"epoch": 0.34,
"grad_norm": 0.8310789526292252,
"learning_rate": 4.659196135158251e-06,
"loss": 0.6718,
"step": 349
},
{
"epoch": 0.35,
"grad_norm": 0.909951412942146,
"learning_rate": 4.657229850634033e-06,
"loss": 0.6861,
"step": 350
},
{
"epoch": 0.35,
"grad_norm": 0.8697647332483404,
"learning_rate": 4.6552583275124145e-06,
"loss": 0.6131,
"step": 351
},
{
"epoch": 0.35,
"grad_norm": 0.9004276489872411,
"learning_rate": 4.653281570581023e-06,
"loss": 0.6187,
"step": 352
},
{
"epoch": 0.35,
"grad_norm": 0.8571109763430029,
"learning_rate": 4.651299584640198e-06,
"loss": 0.6096,
"step": 353
},
{
"epoch": 0.35,
"grad_norm": 0.8734397889718317,
"learning_rate": 4.6493123745029765e-06,
"loss": 0.6915,
"step": 354
},
{
"epoch": 0.35,
"grad_norm": 0.8669799668424895,
"learning_rate": 4.64731994499508e-06,
"loss": 0.6401,
"step": 355
},
{
"epoch": 0.35,
"grad_norm": 0.8542352737214168,
"learning_rate": 4.645322300954908e-06,
"loss": 0.6559,
"step": 356
},
{
"epoch": 0.35,
"grad_norm": 0.870542275037437,
"learning_rate": 4.643319447233521e-06,
"loss": 0.6826,
"step": 357
},
{
"epoch": 0.35,
"grad_norm": 0.8583311322550404,
"learning_rate": 4.641311388694629e-06,
"loss": 0.6258,
"step": 358
},
{
"epoch": 0.35,
"grad_norm": 0.8737060383296704,
"learning_rate": 4.639298130214585e-06,
"loss": 0.6339,
"step": 359
},
{
"epoch": 0.36,
"grad_norm": 0.8912967573704136,
"learning_rate": 4.637279676682367e-06,
"loss": 0.6469,
"step": 360
},
{
"epoch": 0.36,
"grad_norm": 0.8715196658497849,
"learning_rate": 4.635256032999569e-06,
"loss": 0.6646,
"step": 361
},
{
"epoch": 0.36,
"grad_norm": 0.8662783788798808,
"learning_rate": 4.633227204080389e-06,
"loss": 0.6338,
"step": 362
},
{
"epoch": 0.36,
"grad_norm": 0.8927387239503449,
"learning_rate": 4.631193194851617e-06,
"loss": 0.6251,
"step": 363
},
{
"epoch": 0.36,
"grad_norm": 0.9498780462874753,
"learning_rate": 4.629154010252624e-06,
"loss": 0.615,
"step": 364
},
{
"epoch": 0.36,
"grad_norm": 0.956056661851013,
"learning_rate": 4.627109655235345e-06,
"loss": 0.6302,
"step": 365
},
{
"epoch": 0.36,
"grad_norm": 0.9067649800180307,
"learning_rate": 4.625060134764273e-06,
"loss": 0.6604,
"step": 366
},
{
"epoch": 0.36,
"grad_norm": 1.027192573699629,
"learning_rate": 4.623005453816447e-06,
"loss": 0.5859,
"step": 367
},
{
"epoch": 0.36,
"grad_norm": 0.9203828984363205,
"learning_rate": 4.620945617381435e-06,
"loss": 0.6202,
"step": 368
},
{
"epoch": 0.36,
"grad_norm": 0.951292364398945,
"learning_rate": 4.618880630461324e-06,
"loss": 0.65,
"step": 369
},
{
"epoch": 0.37,
"grad_norm": 0.9118476668711323,
"learning_rate": 4.6168104980707105e-06,
"loss": 0.6487,
"step": 370
},
{
"epoch": 0.37,
"grad_norm": 0.878750842802124,
"learning_rate": 4.614735225236685e-06,
"loss": 0.615,
"step": 371
},
{
"epoch": 0.37,
"grad_norm": 0.9060696025734105,
"learning_rate": 4.612654816998821e-06,
"loss": 0.6248,
"step": 372
},
{
"epoch": 0.37,
"grad_norm": 0.9609216437070113,
"learning_rate": 4.610569278409164e-06,
"loss": 0.6778,
"step": 373
},
{
"epoch": 0.37,
"grad_norm": 0.898695374253286,
"learning_rate": 4.608478614532215e-06,
"loss": 0.6336,
"step": 374
},
{
"epoch": 0.37,
"grad_norm": 0.9341703247271076,
"learning_rate": 4.606382830444925e-06,
"loss": 0.7034,
"step": 375
},
{
"epoch": 0.37,
"grad_norm": 0.9888477139866373,
"learning_rate": 4.604281931236675e-06,
"loss": 0.6101,
"step": 376
},
{
"epoch": 0.37,
"grad_norm": 0.8782761928393398,
"learning_rate": 4.602175922009272e-06,
"loss": 0.5853,
"step": 377
},
{
"epoch": 0.37,
"grad_norm": 0.9482746932782271,
"learning_rate": 4.600064807876929e-06,
"loss": 0.6652,
"step": 378
},
{
"epoch": 0.37,
"grad_norm": 0.8564815677643716,
"learning_rate": 4.597948593966256e-06,
"loss": 0.6782,
"step": 379
},
{
"epoch": 0.38,
"grad_norm": 0.9240455443174742,
"learning_rate": 4.595827285416248e-06,
"loss": 0.6275,
"step": 380
},
{
"epoch": 0.38,
"grad_norm": 0.8929439029031102,
"learning_rate": 4.59370088737827e-06,
"loss": 0.6398,
"step": 381
},
{
"epoch": 0.38,
"grad_norm": 0.9148342501219822,
"learning_rate": 4.59156940501605e-06,
"loss": 0.6359,
"step": 382
},
{
"epoch": 0.38,
"grad_norm": 0.946705923671764,
"learning_rate": 4.589432843505659e-06,
"loss": 0.6128,
"step": 383
},
{
"epoch": 0.38,
"grad_norm": 0.9114594357972271,
"learning_rate": 4.587291208035504e-06,
"loss": 0.6329,
"step": 384
},
{
"epoch": 0.38,
"grad_norm": 0.8637275857494376,
"learning_rate": 4.585144503806312e-06,
"loss": 0.6444,
"step": 385
},
{
"epoch": 0.38,
"grad_norm": 0.9422654099393534,
"learning_rate": 4.5829927360311224e-06,
"loss": 0.619,
"step": 386
},
{
"epoch": 0.38,
"grad_norm": 0.8652896213966386,
"learning_rate": 4.5808359099352675e-06,
"loss": 0.5979,
"step": 387
},
{
"epoch": 0.38,
"grad_norm": 1.088012826550748,
"learning_rate": 4.578674030756364e-06,
"loss": 0.6621,
"step": 388
},
{
"epoch": 0.38,
"grad_norm": 0.8470033759027679,
"learning_rate": 4.576507103744299e-06,
"loss": 0.6089,
"step": 389
},
{
"epoch": 0.38,
"grad_norm": 0.8949204387411863,
"learning_rate": 4.574335134161219e-06,
"loss": 0.6306,
"step": 390
},
{
"epoch": 0.39,
"grad_norm": 0.9395950836852421,
"learning_rate": 4.572158127281516e-06,
"loss": 0.6559,
"step": 391
},
{
"epoch": 0.39,
"grad_norm": 0.8035578632803461,
"learning_rate": 4.569976088391813e-06,
"loss": 0.6262,
"step": 392
},
{
"epoch": 0.39,
"grad_norm": 0.8860765726891832,
"learning_rate": 4.567789022790953e-06,
"loss": 0.6469,
"step": 393
},
{
"epoch": 0.39,
"grad_norm": 0.9318594734495474,
"learning_rate": 4.565596935789987e-06,
"loss": 0.6153,
"step": 394
},
{
"epoch": 0.39,
"grad_norm": 0.8747678520637625,
"learning_rate": 4.5633998327121595e-06,
"loss": 0.6666,
"step": 395
},
{
"epoch": 0.39,
"grad_norm": 0.8349863455158287,
"learning_rate": 4.561197718892896e-06,
"loss": 0.6017,
"step": 396
},
{
"epoch": 0.39,
"grad_norm": 0.8835445705707418,
"learning_rate": 4.558990599679787e-06,
"loss": 0.6418,
"step": 397
},
{
"epoch": 0.39,
"grad_norm": 0.8929109099342171,
"learning_rate": 4.556778480432584e-06,
"loss": 0.6326,
"step": 398
},
{
"epoch": 0.39,
"grad_norm": 0.8364797596101992,
"learning_rate": 4.554561366523176e-06,
"loss": 0.6782,
"step": 399
},
{
"epoch": 0.39,
"grad_norm": 0.9260600125305487,
"learning_rate": 4.552339263335581e-06,
"loss": 0.595,
"step": 400
},
{
"epoch": 0.4,
"grad_norm": 0.8599195336450693,
"learning_rate": 4.550112176265937e-06,
"loss": 0.5968,
"step": 401
},
{
"epoch": 0.4,
"grad_norm": 0.9826419950054966,
"learning_rate": 4.54788011072248e-06,
"loss": 0.6456,
"step": 402
},
{
"epoch": 0.4,
"grad_norm": 0.9203378835364787,
"learning_rate": 4.5456430721255384e-06,
"loss": 0.7066,
"step": 403
},
{
"epoch": 0.4,
"grad_norm": 0.8939684740473081,
"learning_rate": 4.5434010659075165e-06,
"loss": 0.647,
"step": 404
},
{
"epoch": 0.4,
"grad_norm": 0.8917246609121712,
"learning_rate": 4.541154097512881e-06,
"loss": 0.6568,
"step": 405
},
{
"epoch": 0.4,
"grad_norm": 0.9075876860734314,
"learning_rate": 4.538902172398151e-06,
"loss": 0.6798,
"step": 406
},
{
"epoch": 0.4,
"grad_norm": 0.8110519815726133,
"learning_rate": 4.53664529603188e-06,
"loss": 0.5882,
"step": 407
},
{
"epoch": 0.4,
"grad_norm": 0.8925518187452417,
"learning_rate": 4.534383473894646e-06,
"loss": 0.6181,
"step": 408
},
{
"epoch": 0.4,
"grad_norm": 0.9114329206202244,
"learning_rate": 4.532116711479039e-06,
"loss": 0.6026,
"step": 409
},
{
"epoch": 0.4,
"grad_norm": 0.846641811272952,
"learning_rate": 4.529845014289642e-06,
"loss": 0.5825,
"step": 410
},
{
"epoch": 0.41,
"grad_norm": 0.8740877163769825,
"learning_rate": 4.527568387843025e-06,
"loss": 0.6989,
"step": 411
},
{
"epoch": 0.41,
"grad_norm": 0.9447837582033838,
"learning_rate": 4.525286837667726e-06,
"loss": 0.6582,
"step": 412
},
{
"epoch": 0.41,
"grad_norm": 0.8674554766425348,
"learning_rate": 4.523000369304244e-06,
"loss": 0.6217,
"step": 413
},
{
"epoch": 0.41,
"grad_norm": 0.8500815565912371,
"learning_rate": 4.520708988305014e-06,
"loss": 0.5941,
"step": 414
},
{
"epoch": 0.41,
"grad_norm": 1.0164281137951803,
"learning_rate": 4.518412700234407e-06,
"loss": 0.6469,
"step": 415
},
{
"epoch": 0.41,
"grad_norm": 0.8420383857000564,
"learning_rate": 4.516111510668707e-06,
"loss": 0.6237,
"step": 416
},
{
"epoch": 0.41,
"grad_norm": 0.9412530438844999,
"learning_rate": 4.513805425196103e-06,
"loss": 0.6386,
"step": 417
},
{
"epoch": 0.41,
"grad_norm": 0.9175391505475148,
"learning_rate": 4.511494449416671e-06,
"loss": 0.6372,
"step": 418
},
{
"epoch": 0.41,
"grad_norm": 0.8965680547901378,
"learning_rate": 4.509178588942365e-06,
"loss": 0.6398,
"step": 419
},
{
"epoch": 0.41,
"grad_norm": 1.161349181913427,
"learning_rate": 4.506857849396998e-06,
"loss": 0.7087,
"step": 420
},
{
"epoch": 0.42,
"grad_norm": 0.9562290751417603,
"learning_rate": 4.504532236416234e-06,
"loss": 0.6969,
"step": 421
},
{
"epoch": 0.42,
"grad_norm": 0.9026029147011317,
"learning_rate": 4.502201755647571e-06,
"loss": 0.6637,
"step": 422
},
{
"epoch": 0.42,
"grad_norm": 0.9230372085626913,
"learning_rate": 4.499866412750324e-06,
"loss": 0.618,
"step": 423
},
{
"epoch": 0.42,
"grad_norm": 0.9413375641434438,
"learning_rate": 4.4975262133956235e-06,
"loss": 0.6054,
"step": 424
},
{
"epoch": 0.42,
"grad_norm": 0.9455820405339215,
"learning_rate": 4.495181163266384e-06,
"loss": 0.6606,
"step": 425
},
{
"epoch": 0.42,
"grad_norm": 0.9648271805379507,
"learning_rate": 4.492831268057307e-06,
"loss": 0.6676,
"step": 426
},
{
"epoch": 0.42,
"grad_norm": 0.8351069990624824,
"learning_rate": 4.490476533474854e-06,
"loss": 0.6479,
"step": 427
},
{
"epoch": 0.42,
"grad_norm": 0.9101496071500632,
"learning_rate": 4.488116965237244e-06,
"loss": 0.6413,
"step": 428
},
{
"epoch": 0.42,
"grad_norm": 0.8280285227695013,
"learning_rate": 4.485752569074429e-06,
"loss": 0.6223,
"step": 429
},
{
"epoch": 0.42,
"grad_norm": 0.8992598915107023,
"learning_rate": 4.4833833507280884e-06,
"loss": 0.6742,
"step": 430
},
{
"epoch": 0.43,
"grad_norm": 0.8923843348896123,
"learning_rate": 4.48100931595161e-06,
"loss": 0.5947,
"step": 431
},
{
"epoch": 0.43,
"grad_norm": 0.8664845993947492,
"learning_rate": 4.478630470510078e-06,
"loss": 0.6369,
"step": 432
},
{
"epoch": 0.43,
"grad_norm": 0.8726591582806938,
"learning_rate": 4.476246820180259e-06,
"loss": 0.6554,
"step": 433
},
{
"epoch": 0.43,
"grad_norm": 0.8410498499068318,
"learning_rate": 4.473858370750589e-06,
"loss": 0.6293,
"step": 434
},
{
"epoch": 0.43,
"grad_norm": 0.8755653995170062,
"learning_rate": 4.4714651280211555e-06,
"loss": 0.634,
"step": 435
},
{
"epoch": 0.43,
"grad_norm": 0.9524417549442475,
"learning_rate": 4.469067097803689e-06,
"loss": 0.6435,
"step": 436
},
{
"epoch": 0.43,
"grad_norm": 0.7960663615043987,
"learning_rate": 4.466664285921543e-06,
"loss": 0.6331,
"step": 437
},
{
"epoch": 0.43,
"grad_norm": 0.8888628596240454,
"learning_rate": 4.464256698209685e-06,
"loss": 0.6552,
"step": 438
},
{
"epoch": 0.43,
"grad_norm": 0.9118498141803711,
"learning_rate": 4.461844340514678e-06,
"loss": 0.5826,
"step": 439
},
{
"epoch": 0.43,
"grad_norm": 0.854812295494578,
"learning_rate": 4.459427218694671e-06,
"loss": 0.6156,
"step": 440
},
{
"epoch": 0.44,
"grad_norm": 0.8894677182707545,
"learning_rate": 4.457005338619379e-06,
"loss": 0.6316,
"step": 441
},
{
"epoch": 0.44,
"grad_norm": 0.92320969218264,
"learning_rate": 4.454578706170075e-06,
"loss": 0.6139,
"step": 442
},
{
"epoch": 0.44,
"grad_norm": 0.9227943336374008,
"learning_rate": 4.452147327239571e-06,
"loss": 0.6377,
"step": 443
},
{
"epoch": 0.44,
"grad_norm": 0.8967111083779129,
"learning_rate": 4.4497112077322045e-06,
"loss": 0.6004,
"step": 444
},
{
"epoch": 0.44,
"grad_norm": 0.8705464350911942,
"learning_rate": 4.447270353563828e-06,
"loss": 0.7149,
"step": 445
},
{
"epoch": 0.44,
"grad_norm": 0.9047682842604887,
"learning_rate": 4.444824770661788e-06,
"loss": 0.6214,
"step": 446
},
{
"epoch": 0.44,
"grad_norm": 0.8332571186935797,
"learning_rate": 4.442374464964916e-06,
"loss": 0.61,
"step": 447
},
{
"epoch": 0.44,
"grad_norm": 0.9191644952904162,
"learning_rate": 4.439919442423513e-06,
"loss": 0.6974,
"step": 448
},
{
"epoch": 0.44,
"grad_norm": 0.8351147721256271,
"learning_rate": 4.437459708999332e-06,
"loss": 0.6633,
"step": 449
},
{
"epoch": 0.44,
"grad_norm": 0.892336463583514,
"learning_rate": 4.434995270665569e-06,
"loss": 0.6406,
"step": 450
},
{
"epoch": 0.45,
"grad_norm": 0.8650896389402535,
"learning_rate": 4.432526133406843e-06,
"loss": 0.6345,
"step": 451
},
{
"epoch": 0.45,
"grad_norm": 0.8153513627930788,
"learning_rate": 4.430052303219185e-06,
"loss": 0.6188,
"step": 452
},
{
"epoch": 0.45,
"grad_norm": 0.8565464920766526,
"learning_rate": 4.42757378611002e-06,
"loss": 0.631,
"step": 453
},
{
"epoch": 0.45,
"grad_norm": 0.8399364160683044,
"learning_rate": 4.425090588098158e-06,
"loss": 0.6577,
"step": 454
},
{
"epoch": 0.45,
"grad_norm": 0.8912303770048037,
"learning_rate": 4.422602715213774e-06,
"loss": 0.6249,
"step": 455
},
{
"epoch": 0.45,
"grad_norm": 0.8665388252830907,
"learning_rate": 4.4201101734983965e-06,
"loss": 0.6273,
"step": 456
},
{
"epoch": 0.45,
"grad_norm": 0.8464141917671866,
"learning_rate": 4.4176129690048905e-06,
"loss": 0.5851,
"step": 457
},
{
"epoch": 0.45,
"grad_norm": 0.8811314655875526,
"learning_rate": 4.415111107797445e-06,
"loss": 0.6214,
"step": 458
},
{
"epoch": 0.45,
"grad_norm": 0.8083079113454086,
"learning_rate": 4.412604595951558e-06,
"loss": 0.6344,
"step": 459
},
{
"epoch": 0.45,
"grad_norm": 0.8720341388764445,
"learning_rate": 4.410093439554019e-06,
"loss": 0.632,
"step": 460
},
{
"epoch": 0.45,
"grad_norm": 0.8502476104259754,
"learning_rate": 4.407577644702897e-06,
"loss": 0.6184,
"step": 461
},
{
"epoch": 0.46,
"grad_norm": 0.8813994548710394,
"learning_rate": 4.405057217507527e-06,
"loss": 0.637,
"step": 462
},
{
"epoch": 0.46,
"grad_norm": 0.917114660031616,
"learning_rate": 4.40253216408849e-06,
"loss": 0.7224,
"step": 463
},
{
"epoch": 0.46,
"grad_norm": 0.8092450819851889,
"learning_rate": 4.400002490577604e-06,
"loss": 0.6247,
"step": 464
},
{
"epoch": 0.46,
"grad_norm": 0.9134141894112806,
"learning_rate": 4.397468203117905e-06,
"loss": 0.6813,
"step": 465
},
{
"epoch": 0.46,
"grad_norm": 0.8877869537323849,
"learning_rate": 4.394929307863633e-06,
"loss": 0.6205,
"step": 466
},
{
"epoch": 0.46,
"grad_norm": 0.866374769727955,
"learning_rate": 4.392385810980218e-06,
"loss": 0.6463,
"step": 467
},
{
"epoch": 0.46,
"grad_norm": 1.071041707971838,
"learning_rate": 4.3898377186442665e-06,
"loss": 0.6571,
"step": 468
},
{
"epoch": 0.46,
"grad_norm": 0.8411856812284965,
"learning_rate": 4.38728503704354e-06,
"loss": 0.6411,
"step": 469
},
{
"epoch": 0.46,
"grad_norm": 0.851584061852996,
"learning_rate": 4.38472777237695e-06,
"loss": 0.6966,
"step": 470
},
{
"epoch": 0.46,
"grad_norm": 0.8291807888706711,
"learning_rate": 4.382165930854534e-06,
"loss": 0.6025,
"step": 471
},
{
"epoch": 0.47,
"grad_norm": 0.8079509788666187,
"learning_rate": 4.379599518697444e-06,
"loss": 0.6461,
"step": 472
},
{
"epoch": 0.47,
"grad_norm": 0.8116984175253492,
"learning_rate": 4.377028542137933e-06,
"loss": 0.6172,
"step": 473
},
{
"epoch": 0.47,
"grad_norm": 0.8194670399241157,
"learning_rate": 4.374453007419336e-06,
"loss": 0.6244,
"step": 474
},
{
"epoch": 0.47,
"grad_norm": 0.8544501253810839,
"learning_rate": 4.371872920796059e-06,
"loss": 0.6085,
"step": 475
},
{
"epoch": 0.47,
"grad_norm": 0.8597333545854512,
"learning_rate": 4.369288288533561e-06,
"loss": 0.6757,
"step": 476
},
{
"epoch": 0.47,
"grad_norm": 0.912646868045953,
"learning_rate": 4.366699116908339e-06,
"loss": 0.6487,
"step": 477
},
{
"epoch": 0.47,
"grad_norm": 0.8476447032290744,
"learning_rate": 4.364105412207914e-06,
"loss": 0.5805,
"step": 478
},
{
"epoch": 0.47,
"grad_norm": 0.9241438674933707,
"learning_rate": 4.3615071807308165e-06,
"loss": 0.6758,
"step": 479
},
{
"epoch": 0.47,
"grad_norm": 0.8122777487524169,
"learning_rate": 4.358904428786565e-06,
"loss": 0.6153,
"step": 480
},
{
"epoch": 0.47,
"grad_norm": 0.9018439615133529,
"learning_rate": 4.356297162695662e-06,
"loss": 0.6453,
"step": 481
},
{
"epoch": 0.48,
"grad_norm": 0.9627678054923827,
"learning_rate": 4.353685388789567e-06,
"loss": 0.6282,
"step": 482
},
{
"epoch": 0.48,
"grad_norm": 0.8406101285686195,
"learning_rate": 4.351069113410689e-06,
"loss": 0.6078,
"step": 483
},
{
"epoch": 0.48,
"grad_norm": 0.835904176007674,
"learning_rate": 4.348448342912365e-06,
"loss": 0.6436,
"step": 484
},
{
"epoch": 0.48,
"grad_norm": 0.8568187537534087,
"learning_rate": 4.345823083658855e-06,
"loss": 0.6677,
"step": 485
},
{
"epoch": 0.48,
"grad_norm": 0.8347696865421858,
"learning_rate": 4.34319334202531e-06,
"loss": 0.6171,
"step": 486
},
{
"epoch": 0.48,
"grad_norm": 0.8348209799902971,
"learning_rate": 4.340559124397774e-06,
"loss": 0.6239,
"step": 487
},
{
"epoch": 0.48,
"grad_norm": 0.8852176634720593,
"learning_rate": 4.3379204371731555e-06,
"loss": 0.6573,
"step": 488
},
{
"epoch": 0.48,
"grad_norm": 0.8637190549045177,
"learning_rate": 4.335277286759218e-06,
"loss": 0.6394,
"step": 489
},
{
"epoch": 0.48,
"grad_norm": 0.8678111098963714,
"learning_rate": 4.332629679574566e-06,
"loss": 0.6011,
"step": 490
},
{
"epoch": 0.48,
"grad_norm": 0.9549679439132955,
"learning_rate": 4.3299776220486235e-06,
"loss": 0.6205,
"step": 491
},
{
"epoch": 0.49,
"grad_norm": 0.834971592773752,
"learning_rate": 4.3273211206216235e-06,
"loss": 0.6121,
"step": 492
},
{
"epoch": 0.49,
"grad_norm": 0.8863673003507534,
"learning_rate": 4.324660181744589e-06,
"loss": 0.6118,
"step": 493
},
{
"epoch": 0.49,
"grad_norm": 0.8984092199591801,
"learning_rate": 4.321994811879321e-06,
"loss": 0.6897,
"step": 494
},
{
"epoch": 0.49,
"grad_norm": 0.8880703411725283,
"learning_rate": 4.319325017498379e-06,
"loss": 0.5674,
"step": 495
},
{
"epoch": 0.49,
"grad_norm": 0.8213603864968535,
"learning_rate": 4.316650805085068e-06,
"loss": 0.5959,
"step": 496
},
{
"epoch": 0.49,
"grad_norm": 0.8521624706722543,
"learning_rate": 4.31397218113342e-06,
"loss": 0.6439,
"step": 497
},
{
"epoch": 0.49,
"grad_norm": 0.8407539759191036,
"learning_rate": 4.311289152148182e-06,
"loss": 0.6495,
"step": 498
},
{
"epoch": 0.49,
"grad_norm": 0.8832549384112272,
"learning_rate": 4.308601724644797e-06,
"loss": 0.6291,
"step": 499
},
{
"epoch": 0.49,
"grad_norm": 0.9007160602566869,
"learning_rate": 4.305909905149389e-06,
"loss": 0.6219,
"step": 500
},
{
"epoch": 0.49,
"grad_norm": 0.8798855352674151,
"learning_rate": 4.303213700198749e-06,
"loss": 0.6365,
"step": 501
},
{
"epoch": 0.5,
"grad_norm": 0.841611556568396,
"learning_rate": 4.300513116340317e-06,
"loss": 0.6464,
"step": 502
},
{
"epoch": 0.5,
"grad_norm": 0.9363258404112693,
"learning_rate": 4.297808160132165e-06,
"loss": 0.6181,
"step": 503
},
{
"epoch": 0.5,
"grad_norm": 0.8390395946351076,
"learning_rate": 4.295098838142985e-06,
"loss": 0.6186,
"step": 504
},
{
"epoch": 0.5,
"grad_norm": 0.8925228288644416,
"learning_rate": 4.292385156952069e-06,
"loss": 0.5934,
"step": 505
},
{
"epoch": 0.5,
"grad_norm": 0.8557639299914508,
"learning_rate": 4.289667123149296e-06,
"loss": 0.6458,
"step": 506
},
{
"epoch": 0.5,
"grad_norm": 0.8078616638133473,
"learning_rate": 4.2869447433351165e-06,
"loss": 0.6045,
"step": 507
},
{
"epoch": 0.5,
"eval_loss": 0.6127398610115051,
"eval_runtime": 1883.8371,
"eval_samples_per_second": 0.701,
"eval_steps_per_second": 0.078,
"step": 507
},
{
"epoch": 0.5,
"grad_norm": 0.8362298388304847,
"learning_rate": 4.284218024120531e-06,
"loss": 0.706,
"step": 508
},
{
"epoch": 0.5,
"grad_norm": 0.9282283575285319,
"learning_rate": 4.2814869721270805e-06,
"loss": 0.7029,
"step": 509
},
{
"epoch": 0.5,
"grad_norm": 0.8047669518916841,
"learning_rate": 4.278751593986826e-06,
"loss": 0.6209,
"step": 510
},
{
"epoch": 0.5,
"grad_norm": 0.8454914208142845,
"learning_rate": 4.276011896342336e-06,
"loss": 0.621,
"step": 511
},
{
"epoch": 0.51,
"grad_norm": 0.8078879153619455,
"learning_rate": 4.273267885846667e-06,
"loss": 0.6198,
"step": 512
},
{
"epoch": 0.51,
"grad_norm": 0.8462035097672782,
"learning_rate": 4.270519569163348e-06,
"loss": 0.6244,
"step": 513
},
{
"epoch": 0.51,
"grad_norm": 0.816672394583137,
"learning_rate": 4.267766952966369e-06,
"loss": 0.5808,
"step": 514
},
{
"epoch": 0.51,
"grad_norm": 0.8937772002097515,
"learning_rate": 4.2650100439401565e-06,
"loss": 0.6484,
"step": 515
},
{
"epoch": 0.51,
"grad_norm": 0.8250884650740417,
"learning_rate": 4.262248848779563e-06,
"loss": 0.6666,
"step": 516
},
{
"epoch": 0.51,
"grad_norm": 0.8610024828211696,
"learning_rate": 4.259483374189851e-06,
"loss": 0.5824,
"step": 517
},
{
"epoch": 0.51,
"grad_norm": 0.9330039890470406,
"learning_rate": 4.256713626886674e-06,
"loss": 0.6528,
"step": 518
},
{
"epoch": 0.51,
"grad_norm": 1.0641883529059932,
"learning_rate": 4.253939613596058e-06,
"loss": 0.6483,
"step": 519
},
{
"epoch": 0.51,
"grad_norm": 0.8107458510268721,
"learning_rate": 4.251161341054396e-06,
"loss": 0.5966,
"step": 520
},
{
"epoch": 0.51,
"grad_norm": 0.8834018306419973,
"learning_rate": 4.248378816008418e-06,
"loss": 0.6007,
"step": 521
},
{
"epoch": 0.52,
"grad_norm": 0.8655653950439697,
"learning_rate": 4.245592045215182e-06,
"loss": 0.6102,
"step": 522
},
{
"epoch": 0.52,
"grad_norm": 0.8412296785814055,
"learning_rate": 4.242801035442059e-06,
"loss": 0.6269,
"step": 523
},
{
"epoch": 0.52,
"grad_norm": 0.8492279489616671,
"learning_rate": 4.240005793466709e-06,
"loss": 0.6272,
"step": 524
},
{
"epoch": 0.52,
"grad_norm": 0.8524964006471626,
"learning_rate": 4.237206326077073e-06,
"loss": 0.6157,
"step": 525
},
{
"epoch": 0.52,
"grad_norm": 0.9309187782618586,
"learning_rate": 4.234402640071355e-06,
"loss": 0.6323,
"step": 526
},
{
"epoch": 0.52,
"grad_norm": 0.8767215287912865,
"learning_rate": 4.231594742257997e-06,
"loss": 0.6546,
"step": 527
},
{
"epoch": 0.52,
"grad_norm": 0.8857382937100764,
"learning_rate": 4.228782639455674e-06,
"loss": 0.5967,
"step": 528
},
{
"epoch": 0.52,
"grad_norm": 0.8878805691610596,
"learning_rate": 4.225966338493272e-06,
"loss": 0.6291,
"step": 529
},
{
"epoch": 0.52,
"grad_norm": 0.8533831355843948,
"learning_rate": 4.223145846209868e-06,
"loss": 0.6365,
"step": 530
},
{
"epoch": 0.52,
"grad_norm": 0.8780516405279318,
"learning_rate": 4.220321169454723e-06,
"loss": 0.621,
"step": 531
},
{
"epoch": 0.53,
"grad_norm": 0.8555312461179767,
"learning_rate": 4.217492315087255e-06,
"loss": 0.647,
"step": 532
},
{
"epoch": 0.53,
"grad_norm": 0.8609895152612035,
"learning_rate": 4.214659289977028e-06,
"loss": 0.5775,
"step": 533
},
{
"epoch": 0.53,
"grad_norm": 0.8296488468656131,
"learning_rate": 4.211822101003734e-06,
"loss": 0.6499,
"step": 534
},
{
"epoch": 0.53,
"grad_norm": 0.821402081187483,
"learning_rate": 4.2089807550571786e-06,
"loss": 0.5918,
"step": 535
},
{
"epoch": 0.53,
"grad_norm": 0.918003540760866,
"learning_rate": 4.20613525903726e-06,
"loss": 0.6449,
"step": 536
},
{
"epoch": 0.53,
"grad_norm": 0.8811180879144805,
"learning_rate": 4.203285619853954e-06,
"loss": 0.6343,
"step": 537
},
{
"epoch": 0.53,
"grad_norm": 0.8032697641623496,
"learning_rate": 4.200431844427299e-06,
"loss": 0.5849,
"step": 538
},
{
"epoch": 0.53,
"grad_norm": 0.9224510847524761,
"learning_rate": 4.197573939687378e-06,
"loss": 0.6671,
"step": 539
},
{
"epoch": 0.53,
"grad_norm": 0.8597523417116901,
"learning_rate": 4.1947119125743e-06,
"loss": 0.6001,
"step": 540
},
{
"epoch": 0.53,
"grad_norm": 0.858547476629971,
"learning_rate": 4.191845770038186e-06,
"loss": 0.6614,
"step": 541
},
{
"epoch": 0.53,
"grad_norm": 0.8316297295683677,
"learning_rate": 4.188975519039151e-06,
"loss": 0.7046,
"step": 542
},
{
"epoch": 0.54,
"grad_norm": 0.8750165143725187,
"learning_rate": 4.1861011665472864e-06,
"loss": 0.5857,
"step": 543
},
{
"epoch": 0.54,
"grad_norm": 0.8504473498402192,
"learning_rate": 4.183222719542643e-06,
"loss": 0.6212,
"step": 544
},
{
"epoch": 0.54,
"grad_norm": 0.8861097216987638,
"learning_rate": 4.180340185015216e-06,
"loss": 0.6298,
"step": 545
},
{
"epoch": 0.54,
"grad_norm": 0.8764506901623238,
"learning_rate": 4.177453569964925e-06,
"loss": 0.6299,
"step": 546
},
{
"epoch": 0.54,
"grad_norm": 0.8954878186353425,
"learning_rate": 4.174562881401602e-06,
"loss": 0.6435,
"step": 547
},
{
"epoch": 0.54,
"grad_norm": 0.8004194533630808,
"learning_rate": 4.171668126344968e-06,
"loss": 0.6462,
"step": 548
},
{
"epoch": 0.54,
"grad_norm": 0.8154879145019431,
"learning_rate": 4.168769311824619e-06,
"loss": 0.6249,
"step": 549
},
{
"epoch": 0.54,
"grad_norm": 0.8753691348791529,
"learning_rate": 4.1658664448800105e-06,
"loss": 0.6409,
"step": 550
},
{
"epoch": 0.54,
"grad_norm": 0.9129753096435906,
"learning_rate": 4.162959532560438e-06,
"loss": 0.6042,
"step": 551
},
{
"epoch": 0.54,
"grad_norm": 0.8135110815096118,
"learning_rate": 4.160048581925022e-06,
"loss": 0.6103,
"step": 552
},
{
"epoch": 0.55,
"grad_norm": 0.853647599461546,
"learning_rate": 4.1571336000426865e-06,
"loss": 0.6703,
"step": 553
},
{
"epoch": 0.55,
"grad_norm": 0.8443698357876949,
"learning_rate": 4.154214593992149e-06,
"loss": 0.5785,
"step": 554
},
{
"epoch": 0.55,
"grad_norm": 0.8279089687921122,
"learning_rate": 4.151291570861897e-06,
"loss": 0.5805,
"step": 555
},
{
"epoch": 0.55,
"grad_norm": 0.7909744989541445,
"learning_rate": 4.1483645377501726e-06,
"loss": 0.6439,
"step": 556
},
{
"epoch": 0.55,
"grad_norm": 0.8201980138747228,
"learning_rate": 4.145433501764958e-06,
"loss": 0.6641,
"step": 557
},
{
"epoch": 0.55,
"grad_norm": 0.8926818528101442,
"learning_rate": 4.142498470023952e-06,
"loss": 0.6016,
"step": 558
},
{
"epoch": 0.55,
"grad_norm": 0.8717064096758335,
"learning_rate": 4.139559449654561e-06,
"loss": 0.6323,
"step": 559
},
{
"epoch": 0.55,
"grad_norm": 0.850010692441458,
"learning_rate": 4.136616447793874e-06,
"loss": 0.5979,
"step": 560
},
{
"epoch": 0.55,
"grad_norm": 0.8329484168696811,
"learning_rate": 4.133669471588651e-06,
"loss": 0.6214,
"step": 561
},
{
"epoch": 0.55,
"grad_norm": 0.8319091624927326,
"learning_rate": 4.130718528195303e-06,
"loss": 0.652,
"step": 562
},
{
"epoch": 0.56,
"grad_norm": 0.9140031940509377,
"learning_rate": 4.127763624779873e-06,
"loss": 0.6062,
"step": 563
},
{
"epoch": 0.56,
"grad_norm": 0.904103009449078,
"learning_rate": 4.124804768518022e-06,
"loss": 0.6201,
"step": 564
},
{
"epoch": 0.56,
"grad_norm": 0.8410022890114313,
"learning_rate": 4.121841966595009e-06,
"loss": 0.6086,
"step": 565
},
{
"epoch": 0.56,
"grad_norm": 0.8940417054615136,
"learning_rate": 4.118875226205677e-06,
"loss": 0.6522,
"step": 566
},
{
"epoch": 0.56,
"grad_norm": 0.9155247713200788,
"learning_rate": 4.11590455455443e-06,
"loss": 0.5841,
"step": 567
},
{
"epoch": 0.56,
"grad_norm": 0.8745417589672394,
"learning_rate": 4.11292995885522e-06,
"loss": 0.6258,
"step": 568
},
{
"epoch": 0.56,
"grad_norm": 0.8552912069311052,
"learning_rate": 4.1099514463315286e-06,
"loss": 0.6056,
"step": 569
},
{
"epoch": 0.56,
"grad_norm": 0.8366769779121708,
"learning_rate": 4.106969024216348e-06,
"loss": 0.6652,
"step": 570
},
{
"epoch": 0.56,
"grad_norm": 0.8135609584995043,
"learning_rate": 4.103982699752167e-06,
"loss": 0.6228,
"step": 571
},
{
"epoch": 0.56,
"grad_norm": 0.8377970374149817,
"learning_rate": 4.1009924801909475e-06,
"loss": 0.6313,
"step": 572
},
{
"epoch": 0.57,
"grad_norm": 0.8318819640389654,
"learning_rate": 4.097998372794111e-06,
"loss": 0.6533,
"step": 573
},
{
"epoch": 0.57,
"grad_norm": 0.8758708325816799,
"learning_rate": 4.095000384832522e-06,
"loss": 0.6517,
"step": 574
},
{
"epoch": 0.57,
"grad_norm": 0.8190389257290644,
"learning_rate": 4.091998523586466e-06,
"loss": 0.6052,
"step": 575
},
{
"epoch": 0.57,
"grad_norm": 0.8446608189730106,
"learning_rate": 4.088992796345637e-06,
"loss": 0.6314,
"step": 576
},
{
"epoch": 0.57,
"grad_norm": 0.8652594802837115,
"learning_rate": 4.085983210409114e-06,
"loss": 0.6195,
"step": 577
},
{
"epoch": 0.57,
"grad_norm": 0.8351116333122931,
"learning_rate": 4.0829697730853505e-06,
"loss": 0.624,
"step": 578
},
{
"epoch": 0.57,
"grad_norm": 0.8704459285009329,
"learning_rate": 4.0799524916921475e-06,
"loss": 0.6504,
"step": 579
},
{
"epoch": 0.57,
"grad_norm": 0.903077675003753,
"learning_rate": 4.076931373556646e-06,
"loss": 0.6218,
"step": 580
},
{
"epoch": 0.57,
"grad_norm": 1.2268852729835975,
"learning_rate": 4.073906426015301e-06,
"loss": 0.6487,
"step": 581
},
{
"epoch": 0.57,
"grad_norm": 0.8429954352584018,
"learning_rate": 4.0708776564138685e-06,
"loss": 0.6383,
"step": 582
},
{
"epoch": 0.58,
"grad_norm": 0.8555120915062305,
"learning_rate": 4.067845072107384e-06,
"loss": 0.6088,
"step": 583
},
{
"epoch": 0.58,
"grad_norm": 0.8550901642574732,
"learning_rate": 4.064808680460149e-06,
"loss": 0.6196,
"step": 584
},
{
"epoch": 0.58,
"grad_norm": 0.8040403373015266,
"learning_rate": 4.061768488845707e-06,
"loss": 0.6224,
"step": 585
},
{
"epoch": 0.58,
"grad_norm": 0.8175245764774571,
"learning_rate": 4.058724504646834e-06,
"loss": 0.5801,
"step": 586
},
{
"epoch": 0.58,
"grad_norm": 0.8718375181238908,
"learning_rate": 4.055676735255513e-06,
"loss": 0.6243,
"step": 587
},
{
"epoch": 0.58,
"grad_norm": 0.7799454280424266,
"learning_rate": 4.0526251880729205e-06,
"loss": 0.6152,
"step": 588
},
{
"epoch": 0.58,
"grad_norm": 0.8395548950336932,
"learning_rate": 4.049569870509404e-06,
"loss": 0.6391,
"step": 589
},
{
"epoch": 0.58,
"grad_norm": 0.8893708670958256,
"learning_rate": 4.046510789984471e-06,
"loss": 0.6104,
"step": 590
},
{
"epoch": 0.58,
"grad_norm": 0.7958226662626613,
"learning_rate": 4.043447953926763e-06,
"loss": 0.6467,
"step": 591
},
{
"epoch": 0.58,
"grad_norm": 0.8439920849678747,
"learning_rate": 4.040381369774045e-06,
"loss": 0.5917,
"step": 592
},
{
"epoch": 0.59,
"grad_norm": 0.8561339618371849,
"learning_rate": 4.03731104497318e-06,
"loss": 0.6868,
"step": 593
},
{
"epoch": 0.59,
"grad_norm": 0.880827080603074,
"learning_rate": 4.034236986980119e-06,
"loss": 0.6217,
"step": 594
},
{
"epoch": 0.59,
"grad_norm": 0.9125142796540433,
"learning_rate": 4.031159203259876e-06,
"loss": 0.6241,
"step": 595
},
{
"epoch": 0.59,
"grad_norm": 0.8470398941235018,
"learning_rate": 4.028077701286512e-06,
"loss": 0.689,
"step": 596
},
{
"epoch": 0.59,
"grad_norm": 0.8695577595082639,
"learning_rate": 4.02499248854312e-06,
"loss": 0.6194,
"step": 597
},
{
"epoch": 0.59,
"grad_norm": 0.8400799986259637,
"learning_rate": 4.021903572521802e-06,
"loss": 0.6123,
"step": 598
},
{
"epoch": 0.59,
"grad_norm": 0.8364906173856129,
"learning_rate": 4.018810960723654e-06,
"loss": 0.6403,
"step": 599
},
{
"epoch": 0.59,
"grad_norm": 0.8426020752299488,
"learning_rate": 4.015714660658745e-06,
"loss": 0.6261,
"step": 600
},
{
"epoch": 0.59,
"grad_norm": 0.8639159393487216,
"learning_rate": 4.012614679846103e-06,
"loss": 0.6469,
"step": 601
},
{
"epoch": 0.59,
"grad_norm": 0.8426962439957932,
"learning_rate": 4.009511025813694e-06,
"loss": 0.6292,
"step": 602
},
{
"epoch": 0.6,
"grad_norm": 0.8311029210618062,
"learning_rate": 4.0064037060984015e-06,
"loss": 0.6388,
"step": 603
},
{
"epoch": 0.6,
"grad_norm": 0.8530506717592211,
"learning_rate": 4.003292728246015e-06,
"loss": 0.6716,
"step": 604
},
{
"epoch": 0.6,
"grad_norm": 0.8440602463846489,
"learning_rate": 4.000178099811203e-06,
"loss": 0.5825,
"step": 605
},
{
"epoch": 0.6,
"grad_norm": 0.8527447447182788,
"learning_rate": 3.997059828357501e-06,
"loss": 0.6405,
"step": 606
},
{
"epoch": 0.6,
"grad_norm": 0.8141210608713626,
"learning_rate": 3.993937921457292e-06,
"loss": 0.5794,
"step": 607
},
{
"epoch": 0.6,
"grad_norm": 0.8167088274295972,
"learning_rate": 3.990812386691786e-06,
"loss": 0.5785,
"step": 608
},
{
"epoch": 0.6,
"grad_norm": 0.8416848345796162,
"learning_rate": 3.987683231651003e-06,
"loss": 0.6082,
"step": 609
},
{
"epoch": 0.6,
"grad_norm": 0.841934252054496,
"learning_rate": 3.984550463933754e-06,
"loss": 0.6462,
"step": 610
},
{
"epoch": 0.6,
"grad_norm": 0.8803588200069083,
"learning_rate": 3.981414091147626e-06,
"loss": 0.5668,
"step": 611
},
{
"epoch": 0.6,
"grad_norm": 0.8446219400321544,
"learning_rate": 3.978274120908957e-06,
"loss": 0.6334,
"step": 612
},
{
"epoch": 0.6,
"grad_norm": 0.837550005498191,
"learning_rate": 3.975130560842821e-06,
"loss": 0.6179,
"step": 613
},
{
"epoch": 0.61,
"grad_norm": 1.004865035425333,
"learning_rate": 3.971983418583012e-06,
"loss": 0.6747,
"step": 614
},
{
"epoch": 0.61,
"grad_norm": 0.8557125045552572,
"learning_rate": 3.968832701772022e-06,
"loss": 0.6069,
"step": 615
},
{
"epoch": 0.61,
"grad_norm": 0.8832288543033557,
"learning_rate": 3.965678418061023e-06,
"loss": 0.6512,
"step": 616
},
{
"epoch": 0.61,
"grad_norm": 1.050854968266542,
"learning_rate": 3.962520575109849e-06,
"loss": 0.6139,
"step": 617
},
{
"epoch": 0.61,
"grad_norm": 0.8367647706094705,
"learning_rate": 3.9593591805869755e-06,
"loss": 0.6423,
"step": 618
},
{
"epoch": 0.61,
"grad_norm": 0.9902929127011927,
"learning_rate": 3.956194242169506e-06,
"loss": 0.5839,
"step": 619
},
{
"epoch": 0.61,
"grad_norm": 0.8538710862131312,
"learning_rate": 3.953025767543148e-06,
"loss": 0.6355,
"step": 620
},
{
"epoch": 0.61,
"grad_norm": 0.8406210033884886,
"learning_rate": 3.949853764402196e-06,
"loss": 0.6368,
"step": 621
},
{
"epoch": 0.61,
"grad_norm": 0.9035073071147527,
"learning_rate": 3.946678240449515e-06,
"loss": 0.6007,
"step": 622
},
{
"epoch": 0.61,
"grad_norm": 0.8519818744840768,
"learning_rate": 3.943499203396517e-06,
"loss": 0.6058,
"step": 623
},
{
"epoch": 0.62,
"grad_norm": 0.8848827749849268,
"learning_rate": 3.940316660963147e-06,
"loss": 0.6592,
"step": 624
},
{
"epoch": 0.62,
"grad_norm": 0.8514691094807268,
"learning_rate": 3.937130620877863e-06,
"loss": 0.6305,
"step": 625
},
{
"epoch": 0.62,
"grad_norm": 0.8673549125405013,
"learning_rate": 3.933941090877615e-06,
"loss": 0.6525,
"step": 626
},
{
"epoch": 0.62,
"grad_norm": 0.8170507594506436,
"learning_rate": 3.93074807870783e-06,
"loss": 0.6054,
"step": 627
},
{
"epoch": 0.62,
"grad_norm": 0.9160705523256952,
"learning_rate": 3.927551592122389e-06,
"loss": 0.646,
"step": 628
},
{
"epoch": 0.62,
"grad_norm": 0.8218544345647915,
"learning_rate": 3.92435163888361e-06,
"loss": 0.627,
"step": 629
},
{
"epoch": 0.62,
"grad_norm": 0.9097780389616746,
"learning_rate": 3.921148226762231e-06,
"loss": 0.6122,
"step": 630
},
{
"epoch": 0.62,
"grad_norm": 0.7955169076765822,
"learning_rate": 3.9179413635373895e-06,
"loss": 0.6221,
"step": 631
},
{
"epoch": 0.62,
"grad_norm": 0.9196174957020604,
"learning_rate": 3.914731056996604e-06,
"loss": 0.6248,
"step": 632
},
{
"epoch": 0.62,
"grad_norm": 0.8564166348680387,
"learning_rate": 3.911517314935752e-06,
"loss": 0.6629,
"step": 633
},
{
"epoch": 0.63,
"grad_norm": 0.861955774140363,
"learning_rate": 3.908300145159055e-06,
"loss": 0.5845,
"step": 634
},
{
"epoch": 0.63,
"grad_norm": 0.8237943505322816,
"learning_rate": 3.905079555479061e-06,
"loss": 0.5752,
"step": 635
},
{
"epoch": 0.63,
"grad_norm": 0.8254490831345543,
"learning_rate": 3.90185555371662e-06,
"loss": 0.6124,
"step": 636
},
{
"epoch": 0.63,
"grad_norm": 0.7892820407938396,
"learning_rate": 3.898628147700869e-06,
"loss": 0.6145,
"step": 637
},
{
"epoch": 0.63,
"grad_norm": 0.8608765813825437,
"learning_rate": 3.895397345269211e-06,
"loss": 0.6205,
"step": 638
},
{
"epoch": 0.63,
"grad_norm": 0.8395603666500568,
"learning_rate": 3.892163154267295e-06,
"loss": 0.6649,
"step": 639
},
{
"epoch": 0.63,
"grad_norm": 0.9131396543054635,
"learning_rate": 3.888925582549006e-06,
"loss": 0.6121,
"step": 640
},
{
"epoch": 0.63,
"grad_norm": 0.9090584462851223,
"learning_rate": 3.88568463797643e-06,
"loss": 0.5809,
"step": 641
},
{
"epoch": 0.63,
"grad_norm": 0.7938541089543004,
"learning_rate": 3.882440328419849e-06,
"loss": 0.6079,
"step": 642
},
{
"epoch": 0.63,
"grad_norm": 0.8068071932206416,
"learning_rate": 3.879192661757715e-06,
"loss": 0.5728,
"step": 643
},
{
"epoch": 0.64,
"grad_norm": 0.8578705877241041,
"learning_rate": 3.875941645876631e-06,
"loss": 0.6426,
"step": 644
},
{
"epoch": 0.64,
"grad_norm": 0.9087218932248154,
"learning_rate": 3.872687288671335e-06,
"loss": 0.5947,
"step": 645
},
{
"epoch": 0.64,
"grad_norm": 0.8484390918118917,
"learning_rate": 3.869429598044679e-06,
"loss": 0.5985,
"step": 646
},
{
"epoch": 0.64,
"grad_norm": 0.9202346447435071,
"learning_rate": 3.866168581907609e-06,
"loss": 0.6258,
"step": 647
},
{
"epoch": 0.64,
"grad_norm": 0.9340901392950748,
"learning_rate": 3.8629042481791475e-06,
"loss": 0.6289,
"step": 648
},
{
"epoch": 0.64,
"grad_norm": 0.8635034011239118,
"learning_rate": 3.859636604786372e-06,
"loss": 0.643,
"step": 649
},
{
"epoch": 0.64,
"grad_norm": 0.8415497288944668,
"learning_rate": 3.856365659664399e-06,
"loss": 0.6341,
"step": 650
},
{
"epoch": 0.64,
"grad_norm": 0.8687282559878431,
"learning_rate": 3.853091420756362e-06,
"loss": 0.6481,
"step": 651
},
{
"epoch": 0.64,
"grad_norm": 1.0072749621790154,
"learning_rate": 3.849813896013392e-06,
"loss": 0.6715,
"step": 652
},
{
"epoch": 0.64,
"grad_norm": 0.8246193635953545,
"learning_rate": 3.846533093394601e-06,
"loss": 0.5799,
"step": 653
},
{
"epoch": 0.65,
"grad_norm": 0.8558794445823575,
"learning_rate": 3.8432490208670605e-06,
"loss": 0.6019,
"step": 654
},
{
"epoch": 0.65,
"grad_norm": 0.8349778480098637,
"learning_rate": 3.839961686405782e-06,
"loss": 0.6254,
"step": 655
},
{
"epoch": 0.65,
"grad_norm": 0.8199486664572769,
"learning_rate": 3.836671097993698e-06,
"loss": 0.6129,
"step": 656
},
{
"epoch": 0.65,
"grad_norm": 0.9459661992839397,
"learning_rate": 3.833377263621646e-06,
"loss": 0.6327,
"step": 657
},
{
"epoch": 0.65,
"grad_norm": 0.8250918846236105,
"learning_rate": 3.830080191288342e-06,
"loss": 0.6636,
"step": 658
},
{
"epoch": 0.65,
"grad_norm": 0.9390093353108422,
"learning_rate": 3.826779889000366e-06,
"loss": 0.6384,
"step": 659
},
{
"epoch": 0.65,
"grad_norm": 0.7978120610077674,
"learning_rate": 3.823476364772143e-06,
"loss": 0.5865,
"step": 660
},
{
"epoch": 0.65,
"grad_norm": 0.9061114135678785,
"learning_rate": 3.82016962662592e-06,
"loss": 0.6342,
"step": 661
},
{
"epoch": 0.65,
"grad_norm": 0.8072077947758938,
"learning_rate": 3.816859682591752e-06,
"loss": 0.5847,
"step": 662
},
{
"epoch": 0.65,
"grad_norm": 0.8123266086238162,
"learning_rate": 3.8135465407074756e-06,
"loss": 0.5951,
"step": 663
},
{
"epoch": 0.66,
"grad_norm": 0.8400833347987473,
"learning_rate": 3.810230209018694e-06,
"loss": 0.6343,
"step": 664
},
{
"epoch": 0.66,
"grad_norm": 0.8774221118702056,
"learning_rate": 3.8069106955787593e-06,
"loss": 0.6027,
"step": 665
},
{
"epoch": 0.66,
"grad_norm": 0.8469674582077675,
"learning_rate": 3.8035880084487454e-06,
"loss": 0.6427,
"step": 666
},
{
"epoch": 0.66,
"grad_norm": 0.8713388963235409,
"learning_rate": 3.8002621556974367e-06,
"loss": 0.6498,
"step": 667
},
{
"epoch": 0.66,
"grad_norm": 0.8494210390587402,
"learning_rate": 3.796933145401304e-06,
"loss": 0.7154,
"step": 668
},
{
"epoch": 0.66,
"grad_norm": 0.8273270122789752,
"learning_rate": 3.7936009856444854e-06,
"loss": 0.5999,
"step": 669
},
{
"epoch": 0.66,
"grad_norm": 0.8382150794392165,
"learning_rate": 3.790265684518767e-06,
"loss": 0.626,
"step": 670
},
{
"epoch": 0.66,
"grad_norm": 0.788293287889455,
"learning_rate": 3.7869272501235644e-06,
"loss": 0.595,
"step": 671
},
{
"epoch": 0.66,
"grad_norm": 0.8272921725994223,
"learning_rate": 3.7835856905659015e-06,
"loss": 0.6007,
"step": 672
},
{
"epoch": 0.66,
"grad_norm": 0.8622000588979104,
"learning_rate": 3.7802410139603908e-06,
"loss": 0.599,
"step": 673
},
{
"epoch": 0.67,
"grad_norm": 0.8358507677494942,
"learning_rate": 3.7768932284292147e-06,
"loss": 0.641,
"step": 674
},
{
"epoch": 0.67,
"grad_norm": 0.8625794861605586,
"learning_rate": 3.773542342102105e-06,
"loss": 0.5919,
"step": 675
},
{
"epoch": 0.67,
"grad_norm": 0.8416399086204703,
"learning_rate": 3.770188363116324e-06,
"loss": 0.5554,
"step": 676
},
{
"epoch": 0.67,
"grad_norm": 0.8418267359188863,
"learning_rate": 3.766831299616644e-06,
"loss": 0.6304,
"step": 677
},
{
"epoch": 0.67,
"grad_norm": 0.8762169067425597,
"learning_rate": 3.7634711597553274e-06,
"loss": 0.6476,
"step": 678
},
{
"epoch": 0.67,
"grad_norm": 0.9404479760658981,
"learning_rate": 3.7601079516921076e-06,
"loss": 0.6499,
"step": 679
},
{
"epoch": 0.67,
"grad_norm": 0.8947001559317359,
"learning_rate": 3.7567416835941674e-06,
"loss": 0.69,
"step": 680
},
{
"epoch": 0.67,
"grad_norm": 0.7966747108540816,
"learning_rate": 3.7533723636361236e-06,
"loss": 0.6372,
"step": 681
},
{
"epoch": 0.67,
"grad_norm": 0.8544936589074825,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.5779,
"step": 682
},
{
"epoch": 0.67,
"grad_norm": 0.8178872102657411,
"learning_rate": 3.746624600875216e-06,
"loss": 0.5636,
"step": 683
},
{
"epoch": 0.68,
"grad_norm": 0.8332061128370889,
"learning_rate": 3.7432461744585595e-06,
"loss": 0.5995,
"step": 684
},
{
"epoch": 0.68,
"grad_norm": 0.863474799322471,
"learning_rate": 3.7398647289541703e-06,
"loss": 0.629,
"step": 685
},
{
"epoch": 0.68,
"grad_norm": 0.865438085828472,
"learning_rate": 3.736480272573519e-06,
"loss": 0.638,
"step": 686
},
{
"epoch": 0.68,
"grad_norm": 0.8433955975404765,
"learning_rate": 3.7330928135353904e-06,
"loss": 0.6567,
"step": 687
},
{
"epoch": 0.68,
"grad_norm": 0.9145902886360865,
"learning_rate": 3.7297023600658586e-06,
"loss": 0.6144,
"step": 688
},
{
"epoch": 0.68,
"grad_norm": 0.8556239801373773,
"learning_rate": 3.7263089203982698e-06,
"loss": 0.6016,
"step": 689
},
{
"epoch": 0.68,
"grad_norm": 0.8208625440531927,
"learning_rate": 3.722912502773224e-06,
"loss": 0.644,
"step": 690
},
{
"epoch": 0.68,
"grad_norm": 0.9949073359631555,
"learning_rate": 3.719513115438548e-06,
"loss": 0.6436,
"step": 691
},
{
"epoch": 0.68,
"grad_norm": 0.8308047358059517,
"learning_rate": 3.716110766649285e-06,
"loss": 0.5918,
"step": 692
},
{
"epoch": 0.68,
"grad_norm": 0.8488700445625043,
"learning_rate": 3.712705464667667e-06,
"loss": 0.5992,
"step": 693
},
{
"epoch": 0.68,
"grad_norm": 0.8496884126493682,
"learning_rate": 3.7092972177630998e-06,
"loss": 0.652,
"step": 694
},
{
"epoch": 0.69,
"grad_norm": 0.8883418066922215,
"learning_rate": 3.7058860342121385e-06,
"loss": 0.5873,
"step": 695
},
{
"epoch": 0.69,
"grad_norm": 0.8400599741065571,
"learning_rate": 3.7024719222984696e-06,
"loss": 0.6079,
"step": 696
},
{
"epoch": 0.69,
"grad_norm": 0.8438768318930868,
"learning_rate": 3.699054890312892e-06,
"loss": 0.6272,
"step": 697
},
{
"epoch": 0.69,
"grad_norm": 0.8881813441097738,
"learning_rate": 3.695634946553296e-06,
"loss": 0.602,
"step": 698
},
{
"epoch": 0.69,
"grad_norm": 0.8390601326013466,
"learning_rate": 3.6922120993246406e-06,
"loss": 0.6486,
"step": 699
},
{
"epoch": 0.69,
"grad_norm": 0.8182123415184759,
"learning_rate": 3.6887863569389388e-06,
"loss": 0.6081,
"step": 700
},
{
"epoch": 0.69,
"grad_norm": 0.825114071415925,
"learning_rate": 3.6853577277152295e-06,
"loss": 0.6142,
"step": 701
},
{
"epoch": 0.69,
"grad_norm": 0.8525114312935935,
"learning_rate": 3.681926219979568e-06,
"loss": 0.6433,
"step": 702
},
{
"epoch": 0.69,
"grad_norm": 0.9146597445291013,
"learning_rate": 3.6784918420649952e-06,
"loss": 0.6623,
"step": 703
},
{
"epoch": 0.69,
"grad_norm": 0.8462781824353365,
"learning_rate": 3.675054602311522e-06,
"loss": 0.66,
"step": 704
},
{
"epoch": 0.7,
"grad_norm": 0.9014059561209856,
"learning_rate": 3.6716145090661115e-06,
"loss": 0.5729,
"step": 705
},
{
"epoch": 0.7,
"grad_norm": 0.9206429639224136,
"learning_rate": 3.6681715706826555e-06,
"loss": 0.6226,
"step": 706
},
{
"epoch": 0.7,
"grad_norm": 0.8518509022390937,
"learning_rate": 3.6647257955219535e-06,
"loss": 0.6359,
"step": 707
},
{
"epoch": 0.7,
"grad_norm": 0.9151576998752515,
"learning_rate": 3.6612771919516944e-06,
"loss": 0.6264,
"step": 708
},
{
"epoch": 0.7,
"grad_norm": 0.8746527924270058,
"learning_rate": 3.6578257683464363e-06,
"loss": 0.6372,
"step": 709
},
{
"epoch": 0.7,
"grad_norm": 0.8963857822510269,
"learning_rate": 3.654371533087586e-06,
"loss": 0.6381,
"step": 710
},
{
"epoch": 0.7,
"grad_norm": 0.893619853609527,
"learning_rate": 3.6509144945633747e-06,
"loss": 0.6118,
"step": 711
},
{
"epoch": 0.7,
"grad_norm": 0.8612944499918704,
"learning_rate": 3.6474546611688446e-06,
"loss": 0.6569,
"step": 712
},
{
"epoch": 0.7,
"grad_norm": 0.9267879655807548,
"learning_rate": 3.643992041305824e-06,
"loss": 0.6296,
"step": 713
},
{
"epoch": 0.7,
"grad_norm": 0.8324239177328866,
"learning_rate": 3.640526643382908e-06,
"loss": 0.596,
"step": 714
},
{
"epoch": 0.71,
"grad_norm": 0.8303516050409866,
"learning_rate": 3.6370584758154366e-06,
"loss": 0.627,
"step": 715
},
{
"epoch": 0.71,
"grad_norm": 0.8257066905266303,
"learning_rate": 3.6335875470254763e-06,
"loss": 0.6204,
"step": 716
},
{
"epoch": 0.71,
"grad_norm": 0.8617688384824457,
"learning_rate": 3.6301138654418e-06,
"loss": 0.5991,
"step": 717
},
{
"epoch": 0.71,
"grad_norm": 0.8331747116361415,
"learning_rate": 3.626637439499864e-06,
"loss": 0.6316,
"step": 718
},
{
"epoch": 0.71,
"grad_norm": 0.9111998000553265,
"learning_rate": 3.623158277641789e-06,
"loss": 0.5996,
"step": 719
},
{
"epoch": 0.71,
"grad_norm": 0.9347960312555601,
"learning_rate": 3.6196763883163407e-06,
"loss": 0.6208,
"step": 720
},
{
"epoch": 0.71,
"grad_norm": 0.8228735877621802,
"learning_rate": 3.6161917799789076e-06,
"loss": 0.6654,
"step": 721
},
{
"epoch": 0.71,
"grad_norm": 0.7692626652167038,
"learning_rate": 3.612704461091481e-06,
"loss": 0.6309,
"step": 722
},
{
"epoch": 0.71,
"grad_norm": 0.8558647649798884,
"learning_rate": 3.6092144401226347e-06,
"loss": 0.5716,
"step": 723
},
{
"epoch": 0.71,
"grad_norm": 0.8195467347471203,
"learning_rate": 3.6057217255475034e-06,
"loss": 0.5914,
"step": 724
},
{
"epoch": 0.72,
"grad_norm": 0.8194512413081951,
"learning_rate": 3.6022263258477636e-06,
"loss": 0.6197,
"step": 725
},
{
"epoch": 0.72,
"grad_norm": 0.8704175696506994,
"learning_rate": 3.598728249511613e-06,
"loss": 0.6095,
"step": 726
},
{
"epoch": 0.72,
"grad_norm": 0.8493811573838203,
"learning_rate": 3.595227505033747e-06,
"loss": 0.6232,
"step": 727
},
{
"epoch": 0.72,
"grad_norm": 0.790925203596828,
"learning_rate": 3.5917241009153424e-06,
"loss": 0.5643,
"step": 728
},
{
"epoch": 0.72,
"grad_norm": 0.8269684214873226,
"learning_rate": 3.588218045664035e-06,
"loss": 0.5822,
"step": 729
},
{
"epoch": 0.72,
"grad_norm": 0.8194321737982796,
"learning_rate": 3.5847093477938955e-06,
"loss": 0.6121,
"step": 730
},
{
"epoch": 0.72,
"grad_norm": 1.7164330849560612,
"learning_rate": 3.5811980158254156e-06,
"loss": 0.6089,
"step": 731
},
{
"epoch": 0.72,
"grad_norm": 0.8748550271603309,
"learning_rate": 3.5776840582854815e-06,
"loss": 0.5805,
"step": 732
},
{
"epoch": 0.72,
"grad_norm": 0.8636172206842424,
"learning_rate": 3.5741674837073563e-06,
"loss": 0.6128,
"step": 733
},
{
"epoch": 0.72,
"grad_norm": 0.8282087905244047,
"learning_rate": 3.5706483006306567e-06,
"loss": 0.6459,
"step": 734
},
{
"epoch": 0.73,
"grad_norm": 0.8848259311647253,
"learning_rate": 3.5671265176013363e-06,
"loss": 0.6277,
"step": 735
},
{
"epoch": 0.73,
"grad_norm": 0.8119988447523083,
"learning_rate": 3.5636021431716604e-06,
"loss": 0.623,
"step": 736
},
{
"epoch": 0.73,
"grad_norm": 0.8342845309664513,
"learning_rate": 3.5600751859001873e-06,
"loss": 0.6282,
"step": 737
},
{
"epoch": 0.73,
"grad_norm": 0.8410918679240155,
"learning_rate": 3.556545654351749e-06,
"loss": 0.591,
"step": 738
},
{
"epoch": 0.73,
"grad_norm": 0.8796336800622948,
"learning_rate": 3.553013557097428e-06,
"loss": 0.6341,
"step": 739
},
{
"epoch": 0.73,
"grad_norm": 0.7917875470526526,
"learning_rate": 3.549478902714536e-06,
"loss": 0.604,
"step": 740
},
{
"epoch": 0.73,
"grad_norm": 0.8337369916578986,
"learning_rate": 3.545941699786596e-06,
"loss": 0.6125,
"step": 741
},
{
"epoch": 0.73,
"grad_norm": 0.8490891695105536,
"learning_rate": 3.542401956903321e-06,
"loss": 0.5988,
"step": 742
},
{
"epoch": 0.73,
"grad_norm": 0.8452367560916697,
"learning_rate": 3.5388596826605885e-06,
"loss": 0.5787,
"step": 743
},
{
"epoch": 0.73,
"grad_norm": 0.8688484430410042,
"learning_rate": 3.5353148856604265e-06,
"loss": 0.6813,
"step": 744
},
{
"epoch": 0.74,
"grad_norm": 0.8286016233816287,
"learning_rate": 3.531767574510987e-06,
"loss": 0.6342,
"step": 745
},
{
"epoch": 0.74,
"grad_norm": 0.8515417361722135,
"learning_rate": 3.5282177578265295e-06,
"loss": 0.6163,
"step": 746
},
{
"epoch": 0.74,
"grad_norm": 0.788811099590474,
"learning_rate": 3.5246654442273952e-06,
"loss": 0.5822,
"step": 747
},
{
"epoch": 0.74,
"grad_norm": 0.799045340559827,
"learning_rate": 3.521110642339991e-06,
"loss": 0.6142,
"step": 748
},
{
"epoch": 0.74,
"grad_norm": 0.8437467452590093,
"learning_rate": 3.5175533607967656e-06,
"loss": 0.5822,
"step": 749
},
{
"epoch": 0.74,
"grad_norm": 0.8563735262905348,
"learning_rate": 3.513993608236188e-06,
"loss": 0.6669,
"step": 750
},
{
"epoch": 0.74,
"grad_norm": 0.8355492451040473,
"learning_rate": 3.51043139330273e-06,
"loss": 0.6051,
"step": 751
},
{
"epoch": 0.74,
"grad_norm": 0.9036193008645055,
"learning_rate": 3.5068667246468437e-06,
"loss": 0.5772,
"step": 752
},
{
"epoch": 0.74,
"grad_norm": 0.9220169686470918,
"learning_rate": 3.503299610924935e-06,
"loss": 0.5966,
"step": 753
},
{
"epoch": 0.74,
"grad_norm": 0.8451566800102057,
"learning_rate": 3.499730060799352e-06,
"loss": 0.6292,
"step": 754
},
{
"epoch": 0.75,
"grad_norm": 0.9102528483160476,
"learning_rate": 3.496158082938359e-06,
"loss": 0.5723,
"step": 755
},
{
"epoch": 0.75,
"grad_norm": 1.0868296175811145,
"learning_rate": 3.492583686016113e-06,
"loss": 0.6884,
"step": 756
},
{
"epoch": 0.75,
"grad_norm": 0.8535175516558623,
"learning_rate": 3.4890068787126475e-06,
"loss": 0.5513,
"step": 757
},
{
"epoch": 0.75,
"grad_norm": 0.8854803755423458,
"learning_rate": 3.485427669713849e-06,
"loss": 0.5858,
"step": 758
},
{
"epoch": 0.75,
"grad_norm": 0.8274288286764514,
"learning_rate": 3.481846067711436e-06,
"loss": 0.5906,
"step": 759
},
{
"epoch": 0.75,
"grad_norm": 0.8383612336715415,
"learning_rate": 3.4782620814029376e-06,
"loss": 0.6489,
"step": 760
},
{
"epoch": 0.75,
"grad_norm": 0.8219239495153828,
"learning_rate": 3.474675719491675e-06,
"loss": 0.6328,
"step": 761
},
{
"epoch": 0.75,
"grad_norm": 0.87449995235705,
"learning_rate": 3.471086990686737e-06,
"loss": 0.5782,
"step": 762
},
{
"epoch": 0.75,
"grad_norm": 0.8486500828100978,
"learning_rate": 3.4674959037029593e-06,
"loss": 0.6338,
"step": 763
},
{
"epoch": 0.75,
"grad_norm": 0.8464185221334688,
"learning_rate": 3.4639024672609045e-06,
"loss": 0.616,
"step": 764
},
{
"epoch": 0.75,
"grad_norm": 0.8795568484465794,
"learning_rate": 3.4603066900868425e-06,
"loss": 0.62,
"step": 765
},
{
"epoch": 0.76,
"grad_norm": 0.7898963360806371,
"learning_rate": 3.4567085809127247e-06,
"loss": 0.6165,
"step": 766
},
{
"epoch": 0.76,
"grad_norm": 0.834736333485077,
"learning_rate": 3.4531081484761676e-06,
"loss": 0.6291,
"step": 767
},
{
"epoch": 0.76,
"grad_norm": 0.8198807461377275,
"learning_rate": 3.4495054015204282e-06,
"loss": 0.6341,
"step": 768
},
{
"epoch": 0.76,
"grad_norm": 0.8089496064861047,
"learning_rate": 3.4459003487943842e-06,
"loss": 0.6323,
"step": 769
},
{
"epoch": 0.76,
"grad_norm": 0.9015873348579728,
"learning_rate": 3.4422929990525133e-06,
"loss": 0.5976,
"step": 770
},
{
"epoch": 0.76,
"grad_norm": 0.8101825375577076,
"learning_rate": 3.438683361054872e-06,
"loss": 0.6088,
"step": 771
},
{
"epoch": 0.76,
"grad_norm": 0.8464641333673643,
"learning_rate": 3.4350714435670706e-06,
"loss": 0.6348,
"step": 772
},
{
"epoch": 0.76,
"grad_norm": 0.876759997482892,
"learning_rate": 3.4314572553602577e-06,
"loss": 0.6229,
"step": 773
},
{
"epoch": 0.76,
"grad_norm": 0.7958084255478849,
"learning_rate": 3.427840805211095e-06,
"loss": 0.6231,
"step": 774
},
{
"epoch": 0.76,
"grad_norm": 0.8287287958310313,
"learning_rate": 3.424222101901738e-06,
"loss": 0.5963,
"step": 775
},
{
"epoch": 0.77,
"grad_norm": 0.8142217551737962,
"learning_rate": 3.4206011542198115e-06,
"loss": 0.6336,
"step": 776
},
{
"epoch": 0.77,
"grad_norm": 0.805828999419117,
"learning_rate": 3.416977970958393e-06,
"loss": 0.5936,
"step": 777
},
{
"epoch": 0.77,
"grad_norm": 0.8224599427095683,
"learning_rate": 3.4133525609159883e-06,
"loss": 0.6746,
"step": 778
},
{
"epoch": 0.77,
"grad_norm": 0.8304652958460922,
"learning_rate": 3.409724932896509e-06,
"loss": 0.6736,
"step": 779
},
{
"epoch": 0.77,
"grad_norm": 0.7912513015896195,
"learning_rate": 3.406095095709254e-06,
"loss": 0.6085,
"step": 780
},
{
"epoch": 0.77,
"grad_norm": 0.8246365476659197,
"learning_rate": 3.4024630581688895e-06,
"loss": 0.5968,
"step": 781
},
{
"epoch": 0.77,
"grad_norm": 0.8288445554081133,
"learning_rate": 3.398828829095419e-06,
"loss": 0.6241,
"step": 782
},
{
"epoch": 0.77,
"grad_norm": 0.8548262155784426,
"learning_rate": 3.395192417314174e-06,
"loss": 0.6391,
"step": 783
},
{
"epoch": 0.77,
"grad_norm": 0.7713543131187703,
"learning_rate": 3.391553831655783e-06,
"loss": 0.5857,
"step": 784
},
{
"epoch": 0.77,
"grad_norm": 0.8085787898759782,
"learning_rate": 3.3879130809561543e-06,
"loss": 0.5905,
"step": 785
},
{
"epoch": 0.78,
"grad_norm": 0.8110232754655301,
"learning_rate": 3.384270174056454e-06,
"loss": 0.5678,
"step": 786
},
{
"epoch": 0.78,
"grad_norm": 0.8263659965643595,
"learning_rate": 3.3806251198030843e-06,
"loss": 0.6386,
"step": 787
},
{
"epoch": 0.78,
"grad_norm": 0.9161733903902445,
"learning_rate": 3.3769779270476614e-06,
"loss": 0.6271,
"step": 788
},
{
"epoch": 0.78,
"grad_norm": 0.8281838229897066,
"learning_rate": 3.3733286046469955e-06,
"loss": 0.5625,
"step": 789
},
{
"epoch": 0.78,
"grad_norm": 0.8275110443615774,
"learning_rate": 3.369677161463068e-06,
"loss": 0.606,
"step": 790
},
{
"epoch": 0.78,
"grad_norm": 0.8291505968037055,
"learning_rate": 3.3660236063630116e-06,
"loss": 0.6052,
"step": 791
},
{
"epoch": 0.78,
"grad_norm": 0.8513874699652647,
"learning_rate": 3.3623679482190834e-06,
"loss": 0.6159,
"step": 792
},
{
"epoch": 0.78,
"grad_norm": 0.861694134467597,
"learning_rate": 3.358710195908653e-06,
"loss": 0.6178,
"step": 793
},
{
"epoch": 0.78,
"grad_norm": 0.8353375015177325,
"learning_rate": 3.3550503583141726e-06,
"loss": 0.563,
"step": 794
},
{
"epoch": 0.78,
"grad_norm": 0.8544792831170283,
"learning_rate": 3.351388444323157e-06,
"loss": 0.6131,
"step": 795
},
{
"epoch": 0.79,
"grad_norm": 0.8454973440583377,
"learning_rate": 3.3477244628281667e-06,
"loss": 0.5999,
"step": 796
},
{
"epoch": 0.79,
"grad_norm": 0.7948897675289834,
"learning_rate": 3.3440584227267814e-06,
"loss": 0.6587,
"step": 797
},
{
"epoch": 0.79,
"grad_norm": 0.8207217899114025,
"learning_rate": 3.3403903329215777e-06,
"loss": 0.5899,
"step": 798
},
{
"epoch": 0.79,
"grad_norm": 0.9250944862790339,
"learning_rate": 3.3367202023201128e-06,
"loss": 0.6491,
"step": 799
},
{
"epoch": 0.79,
"grad_norm": 0.7983017822178072,
"learning_rate": 3.3330480398348988e-06,
"loss": 0.5705,
"step": 800
},
{
"epoch": 0.79,
"grad_norm": 0.8699135194548229,
"learning_rate": 3.3293738543833807e-06,
"loss": 0.6335,
"step": 801
},
{
"epoch": 0.79,
"grad_norm": 0.84153902102691,
"learning_rate": 3.3256976548879183e-06,
"loss": 0.6058,
"step": 802
},
{
"epoch": 0.79,
"grad_norm": 0.8542237620487997,
"learning_rate": 3.3220194502757602e-06,
"loss": 0.6194,
"step": 803
},
{
"epoch": 0.79,
"grad_norm": 0.7924943868095786,
"learning_rate": 3.3183392494790264e-06,
"loss": 0.6274,
"step": 804
},
{
"epoch": 0.79,
"grad_norm": 0.8336114140592508,
"learning_rate": 3.3146570614346814e-06,
"loss": 0.5773,
"step": 805
},
{
"epoch": 0.8,
"grad_norm": 0.8639260233810916,
"learning_rate": 3.3109728950845184e-06,
"loss": 0.5685,
"step": 806
},
{
"epoch": 0.8,
"grad_norm": 0.8158312287141508,
"learning_rate": 3.3072867593751335e-06,
"loss": 0.5698,
"step": 807
},
{
"epoch": 0.8,
"grad_norm": 0.8751459349914905,
"learning_rate": 3.303598663257904e-06,
"loss": 0.6269,
"step": 808
},
{
"epoch": 0.8,
"grad_norm": 0.8053527944592563,
"learning_rate": 3.2999086156889715e-06,
"loss": 0.6604,
"step": 809
},
{
"epoch": 0.8,
"grad_norm": 0.8610616380496376,
"learning_rate": 3.2962166256292116e-06,
"loss": 0.6122,
"step": 810
},
{
"epoch": 0.8,
"grad_norm": 0.8426268663118117,
"learning_rate": 3.292522702044221e-06,
"loss": 0.6023,
"step": 811
},
{
"epoch": 0.8,
"grad_norm": 0.8283059132461468,
"learning_rate": 3.288826853904289e-06,
"loss": 0.6368,
"step": 812
},
{
"epoch": 0.8,
"grad_norm": 0.9098952954025415,
"learning_rate": 3.285129090184381e-06,
"loss": 0.5802,
"step": 813
},
{
"epoch": 0.8,
"grad_norm": 0.881481454188651,
"learning_rate": 3.281429419864112e-06,
"loss": 0.5848,
"step": 814
},
{
"epoch": 0.8,
"grad_norm": 0.8281945418633309,
"learning_rate": 3.277727851927727e-06,
"loss": 0.6097,
"step": 815
},
{
"epoch": 0.81,
"grad_norm": 0.882553902183949,
"learning_rate": 3.2740243953640827e-06,
"loss": 0.6596,
"step": 816
},
{
"epoch": 0.81,
"grad_norm": 0.8657763942244997,
"learning_rate": 3.2703190591666174e-06,
"loss": 0.6209,
"step": 817
},
{
"epoch": 0.81,
"grad_norm": 0.8358399157957145,
"learning_rate": 3.2666118523333363e-06,
"loss": 0.5879,
"step": 818
},
{
"epoch": 0.81,
"grad_norm": 0.8429824602810644,
"learning_rate": 3.2629027838667863e-06,
"loss": 0.6147,
"step": 819
},
{
"epoch": 0.81,
"grad_norm": 0.895622236703056,
"learning_rate": 3.259191862774037e-06,
"loss": 0.6342,
"step": 820
},
{
"epoch": 0.81,
"grad_norm": 0.869081721554779,
"learning_rate": 3.2554790980666545e-06,
"loss": 0.6288,
"step": 821
},
{
"epoch": 0.81,
"grad_norm": 0.8548842199692267,
"learning_rate": 3.2517644987606827e-06,
"loss": 0.6071,
"step": 822
},
{
"epoch": 0.81,
"grad_norm": 0.9265871658506936,
"learning_rate": 3.2480480738766222e-06,
"loss": 0.6125,
"step": 823
},
{
"epoch": 0.81,
"grad_norm": 0.8372005330678954,
"learning_rate": 3.244329832439404e-06,
"loss": 0.6184,
"step": 824
},
{
"epoch": 0.81,
"grad_norm": 0.7956763361747755,
"learning_rate": 3.2406097834783724e-06,
"loss": 0.6217,
"step": 825
},
{
"epoch": 0.82,
"grad_norm": 0.8181556551626763,
"learning_rate": 3.236887936027261e-06,
"loss": 0.5953,
"step": 826
},
{
"epoch": 0.82,
"grad_norm": 0.8857603345703071,
"learning_rate": 3.2331642991241695e-06,
"loss": 0.6547,
"step": 827
},
{
"epoch": 0.82,
"grad_norm": 0.8699553256972498,
"learning_rate": 3.2294388818115447e-06,
"loss": 0.6462,
"step": 828
},
{
"epoch": 0.82,
"grad_norm": 0.8260959526938845,
"learning_rate": 3.225711693136156e-06,
"loss": 0.6375,
"step": 829
},
{
"epoch": 0.82,
"grad_norm": 0.8585673049197299,
"learning_rate": 3.221982742149075e-06,
"loss": 0.5986,
"step": 830
},
{
"epoch": 0.82,
"grad_norm": 0.8401813928908182,
"learning_rate": 3.2182520379056516e-06,
"loss": 0.6087,
"step": 831
},
{
"epoch": 0.82,
"grad_norm": 0.8659784868378224,
"learning_rate": 3.2145195894654947e-06,
"loss": 0.6199,
"step": 832
},
{
"epoch": 0.82,
"grad_norm": 0.9223475931615208,
"learning_rate": 3.210785405892448e-06,
"loss": 0.5873,
"step": 833
},
{
"epoch": 0.82,
"grad_norm": 0.9447111876979355,
"learning_rate": 3.207049496254569e-06,
"loss": 0.5778,
"step": 834
},
{
"epoch": 0.82,
"grad_norm": 0.9766649870230963,
"learning_rate": 3.203311869624107e-06,
"loss": 0.6555,
"step": 835
},
{
"epoch": 0.83,
"grad_norm": 0.8477023018068901,
"learning_rate": 3.199572535077481e-06,
"loss": 0.5809,
"step": 836
},
{
"epoch": 0.83,
"grad_norm": 0.8206501838600238,
"learning_rate": 3.195831501695256e-06,
"loss": 0.6056,
"step": 837
},
{
"epoch": 0.83,
"grad_norm": 0.870594387289095,
"learning_rate": 3.1920887785621233e-06,
"loss": 0.6143,
"step": 838
},
{
"epoch": 0.83,
"grad_norm": 0.8149272829160001,
"learning_rate": 3.1883443747668806e-06,
"loss": 0.6117,
"step": 839
},
{
"epoch": 0.83,
"grad_norm": 0.8529746046688006,
"learning_rate": 3.1845982994024006e-06,
"loss": 0.6374,
"step": 840
},
{
"epoch": 0.83,
"grad_norm": 0.8412258694783452,
"learning_rate": 3.180850561565621e-06,
"loss": 0.6003,
"step": 841
},
{
"epoch": 0.83,
"grad_norm": 0.8444286299374963,
"learning_rate": 3.1771011703575134e-06,
"loss": 0.6059,
"step": 842
},
{
"epoch": 0.83,
"grad_norm": 0.8312527111815757,
"learning_rate": 3.173350134883066e-06,
"loss": 0.6153,
"step": 843
},
{
"epoch": 0.83,
"grad_norm": 0.8372205351537885,
"learning_rate": 3.169597464251258e-06,
"loss": 0.5939,
"step": 844
},
{
"epoch": 0.83,
"grad_norm": 0.807605821367219,
"learning_rate": 3.1658431675750424e-06,
"loss": 0.6059,
"step": 845
},
{
"epoch": 0.83,
"grad_norm": 0.7860979892611595,
"learning_rate": 3.162087253971318e-06,
"loss": 0.6629,
"step": 846
},
{
"epoch": 0.84,
"grad_norm": 0.940414176770529,
"learning_rate": 3.1583297325609117e-06,
"loss": 0.6028,
"step": 847
},
{
"epoch": 0.84,
"grad_norm": 0.8151977099439447,
"learning_rate": 3.1545706124685553e-06,
"loss": 0.6662,
"step": 848
},
{
"epoch": 0.84,
"grad_norm": 0.8471672058322735,
"learning_rate": 3.1508099028228613e-06,
"loss": 0.5856,
"step": 849
},
{
"epoch": 0.84,
"grad_norm": 0.8663713675701746,
"learning_rate": 3.147047612756302e-06,
"loss": 0.5481,
"step": 850
},
{
"epoch": 0.84,
"grad_norm": 0.7769840915544656,
"learning_rate": 3.1432837514051893e-06,
"loss": 0.5613,
"step": 851
},
{
"epoch": 0.84,
"grad_norm": 0.8592230044240439,
"learning_rate": 3.139518327909651e-06,
"loss": 0.6063,
"step": 852
},
{
"epoch": 0.84,
"grad_norm": 0.8597869095388183,
"learning_rate": 3.1357513514136044e-06,
"loss": 0.5747,
"step": 853
},
{
"epoch": 0.84,
"grad_norm": 0.8372404454667257,
"learning_rate": 3.1319828310647437e-06,
"loss": 0.6134,
"step": 854
},
{
"epoch": 0.84,
"grad_norm": 0.9298489053411231,
"learning_rate": 3.1282127760145094e-06,
"loss": 0.6078,
"step": 855
},
{
"epoch": 0.84,
"grad_norm": 0.9416390217217425,
"learning_rate": 3.1244411954180677e-06,
"loss": 0.5963,
"step": 856
},
{
"epoch": 0.85,
"grad_norm": 0.802868802927103,
"learning_rate": 3.120668098434291e-06,
"loss": 0.6058,
"step": 857
},
{
"epoch": 0.85,
"grad_norm": 0.9220598631922068,
"learning_rate": 3.116893494225734e-06,
"loss": 0.5966,
"step": 858
},
{
"epoch": 0.85,
"grad_norm": 0.9215503847795026,
"learning_rate": 3.113117391958612e-06,
"loss": 0.6112,
"step": 859
},
{
"epoch": 0.85,
"grad_norm": 0.7987198854171116,
"learning_rate": 3.109339800802777e-06,
"loss": 0.6388,
"step": 860
},
{
"epoch": 0.85,
"grad_norm": 0.8460741120385099,
"learning_rate": 3.1055607299316966e-06,
"loss": 0.5802,
"step": 861
},
{
"epoch": 0.85,
"grad_norm": 0.9262573376433733,
"learning_rate": 3.1017801885224332e-06,
"loss": 0.6559,
"step": 862
},
{
"epoch": 0.85,
"grad_norm": 0.8649803631458961,
"learning_rate": 3.097998185755618e-06,
"loss": 0.5749,
"step": 863
},
{
"epoch": 0.85,
"grad_norm": 0.8129582206860377,
"learning_rate": 3.094214730815433e-06,
"loss": 0.623,
"step": 864
},
{
"epoch": 0.85,
"grad_norm": 0.8894384638551944,
"learning_rate": 3.0904298328895865e-06,
"loss": 0.6156,
"step": 865
},
{
"epoch": 0.85,
"grad_norm": 0.9197272755662508,
"learning_rate": 3.0866435011692884e-06,
"loss": 0.6072,
"step": 866
},
{
"epoch": 0.86,
"grad_norm": 0.833467743832381,
"learning_rate": 3.082855744849234e-06,
"loss": 0.5942,
"step": 867
},
{
"epoch": 0.86,
"grad_norm": 0.8447839217824836,
"learning_rate": 3.0790665731275764e-06,
"loss": 0.5904,
"step": 868
},
{
"epoch": 0.86,
"grad_norm": 0.8347608532015025,
"learning_rate": 3.0752759952059047e-06,
"loss": 0.6212,
"step": 869
},
{
"epoch": 0.86,
"grad_norm": 0.7674030693122568,
"learning_rate": 3.0714840202892243e-06,
"loss": 0.6607,
"step": 870
},
{
"epoch": 0.86,
"grad_norm": 0.9057260645967865,
"learning_rate": 3.0676906575859335e-06,
"loss": 0.6377,
"step": 871
},
{
"epoch": 0.86,
"grad_norm": 0.7996224752255925,
"learning_rate": 3.063895916307799e-06,
"loss": 0.6332,
"step": 872
},
{
"epoch": 0.86,
"grad_norm": 0.8266387730228465,
"learning_rate": 3.0600998056699367e-06,
"loss": 0.5976,
"step": 873
},
{
"epoch": 0.86,
"grad_norm": 0.7758894383405645,
"learning_rate": 3.056302334890786e-06,
"loss": 0.5835,
"step": 874
},
{
"epoch": 0.86,
"grad_norm": 0.8070378254400489,
"learning_rate": 3.0525035131920926e-06,
"loss": 0.5945,
"step": 875
},
{
"epoch": 0.86,
"grad_norm": 0.8667704019187145,
"learning_rate": 3.0487033497988794e-06,
"loss": 0.605,
"step": 876
},
{
"epoch": 0.87,
"grad_norm": 0.8087365050450245,
"learning_rate": 3.0449018539394274e-06,
"loss": 0.6171,
"step": 877
},
{
"epoch": 0.87,
"grad_norm": 0.8106936486403415,
"learning_rate": 3.0410990348452572e-06,
"loss": 0.5878,
"step": 878
},
{
"epoch": 0.87,
"grad_norm": 0.8343865081867896,
"learning_rate": 3.037294901751099e-06,
"loss": 0.6159,
"step": 879
},
{
"epoch": 0.87,
"grad_norm": 0.9111920750589219,
"learning_rate": 3.0334894638948753e-06,
"loss": 0.6618,
"step": 880
},
{
"epoch": 0.87,
"grad_norm": 0.7905509417329776,
"learning_rate": 3.0296827305176767e-06,
"loss": 0.6421,
"step": 881
},
{
"epoch": 0.87,
"grad_norm": 0.8528082694556166,
"learning_rate": 3.02587471086374e-06,
"loss": 0.5966,
"step": 882
},
{
"epoch": 0.87,
"grad_norm": 0.8683617424120555,
"learning_rate": 3.022065414180425e-06,
"loss": 0.614,
"step": 883
},
{
"epoch": 0.87,
"grad_norm": 0.810261949769031,
"learning_rate": 3.0182548497181946e-06,
"loss": 0.6054,
"step": 884
},
{
"epoch": 0.87,
"grad_norm": 0.8458357510533312,
"learning_rate": 3.0144430267305874e-06,
"loss": 0.6012,
"step": 885
},
{
"epoch": 0.87,
"grad_norm": 0.8221353641749821,
"learning_rate": 3.0106299544742013e-06,
"loss": 0.6554,
"step": 886
},
{
"epoch": 0.88,
"grad_norm": 0.8230859903173897,
"learning_rate": 3.006815642208665e-06,
"loss": 0.6155,
"step": 887
},
{
"epoch": 0.88,
"grad_norm": 0.8897307384369784,
"learning_rate": 3.0030000991966213e-06,
"loss": 0.5947,
"step": 888
},
{
"epoch": 0.88,
"grad_norm": 0.8616094455160354,
"learning_rate": 2.999183334703699e-06,
"loss": 0.6392,
"step": 889
},
{
"epoch": 0.88,
"grad_norm": 0.8045566406491605,
"learning_rate": 2.9953653579984945e-06,
"loss": 0.613,
"step": 890
},
{
"epoch": 0.88,
"grad_norm": 0.8491995106395055,
"learning_rate": 2.991546178352548e-06,
"loss": 0.6732,
"step": 891
},
{
"epoch": 0.88,
"grad_norm": 0.8568125335081748,
"learning_rate": 2.9877258050403214e-06,
"loss": 0.6375,
"step": 892
},
{
"epoch": 0.88,
"grad_norm": 0.8403200791165655,
"learning_rate": 2.9839042473391734e-06,
"loss": 0.6355,
"step": 893
},
{
"epoch": 0.88,
"grad_norm": 0.8160289798230667,
"learning_rate": 2.980081514529341e-06,
"loss": 0.5816,
"step": 894
},
{
"epoch": 0.88,
"grad_norm": 0.8325072686110231,
"learning_rate": 2.9762576158939127e-06,
"loss": 0.6107,
"step": 895
},
{
"epoch": 0.88,
"grad_norm": 0.8532640950528609,
"learning_rate": 2.972432560718811e-06,
"loss": 0.6047,
"step": 896
},
{
"epoch": 0.89,
"grad_norm": 0.825222634002753,
"learning_rate": 2.9686063582927636e-06,
"loss": 0.6503,
"step": 897
},
{
"epoch": 0.89,
"grad_norm": 0.8374408245675915,
"learning_rate": 2.964779017907287e-06,
"loss": 0.5852,
"step": 898
},
{
"epoch": 0.89,
"grad_norm": 0.8009118608947914,
"learning_rate": 2.9609505488566585e-06,
"loss": 0.5691,
"step": 899
},
{
"epoch": 0.89,
"grad_norm": 0.8167294558750782,
"learning_rate": 2.9571209604378993e-06,
"loss": 0.6175,
"step": 900
},
{
"epoch": 0.89,
"grad_norm": 0.8232727662689424,
"learning_rate": 2.9532902619507465e-06,
"loss": 0.5732,
"step": 901
},
{
"epoch": 0.89,
"grad_norm": 0.846978796971522,
"learning_rate": 2.9494584626976318e-06,
"loss": 0.6105,
"step": 902
},
{
"epoch": 0.89,
"grad_norm": 0.8164610234787385,
"learning_rate": 2.9456255719836648e-06,
"loss": 0.566,
"step": 903
},
{
"epoch": 0.89,
"grad_norm": 0.8582764336563975,
"learning_rate": 2.941791599116601e-06,
"loss": 0.6275,
"step": 904
},
{
"epoch": 0.89,
"grad_norm": 0.777249993314137,
"learning_rate": 2.9379565534068243e-06,
"loss": 0.5805,
"step": 905
},
{
"epoch": 0.89,
"grad_norm": 0.8500028002405607,
"learning_rate": 2.9341204441673267e-06,
"loss": 0.5914,
"step": 906
},
{
"epoch": 0.9,
"grad_norm": 0.8175240407487714,
"learning_rate": 2.93028328071368e-06,
"loss": 0.586,
"step": 907
},
{
"epoch": 0.9,
"grad_norm": 0.8046556687399473,
"learning_rate": 2.926445072364017e-06,
"loss": 0.6353,
"step": 908
},
{
"epoch": 0.9,
"grad_norm": 0.8172777770063785,
"learning_rate": 2.9226058284390093e-06,
"loss": 0.6274,
"step": 909
},
{
"epoch": 0.9,
"grad_norm": 0.9129786089314338,
"learning_rate": 2.9187655582618413e-06,
"loss": 0.6398,
"step": 910
},
{
"epoch": 0.9,
"grad_norm": 0.8123191016829922,
"learning_rate": 2.91492427115819e-06,
"loss": 0.5638,
"step": 911
},
{
"epoch": 0.9,
"grad_norm": 0.7801688483442424,
"learning_rate": 2.911081976456202e-06,
"loss": 0.5872,
"step": 912
},
{
"epoch": 0.9,
"grad_norm": 0.8096931952713965,
"learning_rate": 2.9072386834864723e-06,
"loss": 0.5545,
"step": 913
},
{
"epoch": 0.9,
"grad_norm": 0.8346885149840537,
"learning_rate": 2.903394401582017e-06,
"loss": 0.593,
"step": 914
},
{
"epoch": 0.9,
"grad_norm": 0.8708319246348792,
"learning_rate": 2.899549140078256e-06,
"loss": 0.6032,
"step": 915
},
{
"epoch": 0.9,
"grad_norm": 0.8116588188201339,
"learning_rate": 2.895702908312987e-06,
"loss": 0.6215,
"step": 916
},
{
"epoch": 0.91,
"grad_norm": 0.8068958158119758,
"learning_rate": 2.8918557156263662e-06,
"loss": 0.5682,
"step": 917
},
{
"epoch": 0.91,
"grad_norm": 0.7858662353988665,
"learning_rate": 2.888007571360879e-06,
"loss": 0.5923,
"step": 918
},
{
"epoch": 0.91,
"grad_norm": 0.8771050496241385,
"learning_rate": 2.8841584848613254e-06,
"loss": 0.6514,
"step": 919
},
{
"epoch": 0.91,
"grad_norm": 0.8296510990241097,
"learning_rate": 2.880308465474792e-06,
"loss": 0.606,
"step": 920
},
{
"epoch": 0.91,
"grad_norm": 0.7889136835873297,
"learning_rate": 2.876457522550631e-06,
"loss": 0.5862,
"step": 921
},
{
"epoch": 0.91,
"grad_norm": 2.925493035659946,
"learning_rate": 2.872605665440436e-06,
"loss": 0.6542,
"step": 922
},
{
"epoch": 0.91,
"grad_norm": 0.8507683639836144,
"learning_rate": 2.8687529034980244e-06,
"loss": 0.5491,
"step": 923
},
{
"epoch": 0.91,
"grad_norm": 0.9174925682822145,
"learning_rate": 2.8648992460794054e-06,
"loss": 0.6571,
"step": 924
},
{
"epoch": 0.91,
"grad_norm": 0.7942861243562647,
"learning_rate": 2.8610447025427685e-06,
"loss": 0.6239,
"step": 925
},
{
"epoch": 0.91,
"grad_norm": 0.8075965123150352,
"learning_rate": 2.8571892822484502e-06,
"loss": 0.6194,
"step": 926
},
{
"epoch": 0.91,
"grad_norm": 0.7953063343629782,
"learning_rate": 2.8533329945589192e-06,
"loss": 0.5954,
"step": 927
},
{
"epoch": 0.92,
"grad_norm": 0.8417169635844408,
"learning_rate": 2.849475848838749e-06,
"loss": 0.5967,
"step": 928
},
{
"epoch": 0.92,
"grad_norm": 0.8434785540022463,
"learning_rate": 2.8456178544545976e-06,
"loss": 0.5901,
"step": 929
},
{
"epoch": 0.92,
"grad_norm": 0.8173771863580619,
"learning_rate": 2.841759020775184e-06,
"loss": 0.6201,
"step": 930
},
{
"epoch": 0.92,
"grad_norm": 0.8148818341326955,
"learning_rate": 2.8378993571712638e-06,
"loss": 0.5838,
"step": 931
},
{
"epoch": 0.92,
"grad_norm": 0.8076114270975754,
"learning_rate": 2.8340388730156097e-06,
"loss": 0.5921,
"step": 932
},
{
"epoch": 0.92,
"grad_norm": 0.829687636666006,
"learning_rate": 2.8301775776829875e-06,
"loss": 0.6039,
"step": 933
},
{
"epoch": 0.92,
"grad_norm": 0.9197178593956111,
"learning_rate": 2.82631548055013e-06,
"loss": 0.5839,
"step": 934
},
{
"epoch": 0.92,
"grad_norm": 0.8011771461166681,
"learning_rate": 2.822452590995719e-06,
"loss": 0.625,
"step": 935
},
{
"epoch": 0.92,
"grad_norm": 0.8071429478414894,
"learning_rate": 2.8185889184003614e-06,
"loss": 0.6258,
"step": 936
},
{
"epoch": 0.92,
"grad_norm": 0.8473353081081582,
"learning_rate": 2.814724472146564e-06,
"loss": 0.6264,
"step": 937
},
{
"epoch": 0.93,
"grad_norm": 0.8351071263143917,
"learning_rate": 2.8108592616187135e-06,
"loss": 0.5849,
"step": 938
},
{
"epoch": 0.93,
"grad_norm": 0.8433252071598873,
"learning_rate": 2.806993296203052e-06,
"loss": 0.5811,
"step": 939
},
{
"epoch": 0.93,
"grad_norm": 1.6097167209325385,
"learning_rate": 2.8031265852876537e-06,
"loss": 0.5958,
"step": 940
},
{
"epoch": 0.93,
"grad_norm": 0.868697036480233,
"learning_rate": 2.7992591382624064e-06,
"loss": 0.6281,
"step": 941
},
{
"epoch": 0.93,
"grad_norm": 0.851428078011761,
"learning_rate": 2.7953909645189824e-06,
"loss": 0.6174,
"step": 942
},
{
"epoch": 0.93,
"grad_norm": 0.8278638035201089,
"learning_rate": 2.791522073450819e-06,
"loss": 0.6232,
"step": 943
},
{
"epoch": 0.93,
"grad_norm": 0.795856052219787,
"learning_rate": 2.7876524744530967e-06,
"loss": 0.6053,
"step": 944
},
{
"epoch": 0.93,
"grad_norm": 0.8545427598167615,
"learning_rate": 2.7837821769227154e-06,
"loss": 0.606,
"step": 945
},
{
"epoch": 0.93,
"grad_norm": 0.7872291169661336,
"learning_rate": 2.7799111902582697e-06,
"loss": 0.5937,
"step": 946
},
{
"epoch": 0.93,
"grad_norm": 0.8222342924067905,
"learning_rate": 2.7760395238600286e-06,
"loss": 0.6429,
"step": 947
},
{
"epoch": 0.94,
"grad_norm": 0.8324916626961192,
"learning_rate": 2.7721671871299115e-06,
"loss": 0.594,
"step": 948
},
{
"epoch": 0.94,
"grad_norm": 0.7949219615171061,
"learning_rate": 2.7682941894714664e-06,
"loss": 0.6344,
"step": 949
},
{
"epoch": 0.94,
"grad_norm": 0.7915081056376099,
"learning_rate": 2.764420540289845e-06,
"loss": 0.647,
"step": 950
},
{
"epoch": 0.94,
"grad_norm": 0.8221089204021264,
"learning_rate": 2.7605462489917817e-06,
"loss": 0.613,
"step": 951
},
{
"epoch": 0.94,
"grad_norm": 0.7556912305467864,
"learning_rate": 2.7566713249855715e-06,
"loss": 0.5927,
"step": 952
},
{
"epoch": 0.94,
"grad_norm": 0.8516869877340751,
"learning_rate": 2.7527957776810432e-06,
"loss": 0.5737,
"step": 953
},
{
"epoch": 0.94,
"grad_norm": 0.8954878925348272,
"learning_rate": 2.748919616489542e-06,
"loss": 0.5881,
"step": 954
},
{
"epoch": 0.94,
"grad_norm": 0.7814783166367757,
"learning_rate": 2.7450428508239024e-06,
"loss": 0.5817,
"step": 955
},
{
"epoch": 0.94,
"grad_norm": 0.7761831452626766,
"learning_rate": 2.7411654900984257e-06,
"loss": 0.6423,
"step": 956
},
{
"epoch": 0.94,
"grad_norm": 0.8610326997473378,
"learning_rate": 2.7372875437288603e-06,
"loss": 0.5698,
"step": 957
},
{
"epoch": 0.95,
"grad_norm": 0.8037534451700079,
"learning_rate": 2.733409021132377e-06,
"loss": 0.5618,
"step": 958
},
{
"epoch": 0.95,
"grad_norm": 0.8267194190693397,
"learning_rate": 2.729529931727544e-06,
"loss": 0.6146,
"step": 959
},
{
"epoch": 0.95,
"grad_norm": 0.8595187209919314,
"learning_rate": 2.7256502849343062e-06,
"loss": 0.6139,
"step": 960
},
{
"epoch": 0.95,
"grad_norm": 0.9151590718279631,
"learning_rate": 2.7217700901739637e-06,
"loss": 0.5933,
"step": 961
},
{
"epoch": 0.95,
"grad_norm": 0.7743662392639246,
"learning_rate": 2.717889356869146e-06,
"loss": 0.6067,
"step": 962
},
{
"epoch": 0.95,
"grad_norm": 0.7692375464347264,
"learning_rate": 2.7140080944437903e-06,
"loss": 0.6007,
"step": 963
},
{
"epoch": 0.95,
"grad_norm": 0.8243938846614656,
"learning_rate": 2.710126312323119e-06,
"loss": 0.6439,
"step": 964
},
{
"epoch": 0.95,
"grad_norm": 0.869984720476869,
"learning_rate": 2.706244019933618e-06,
"loss": 0.6077,
"step": 965
},
{
"epoch": 0.95,
"grad_norm": 0.8579806687629068,
"learning_rate": 2.7023612267030085e-06,
"loss": 0.6735,
"step": 966
},
{
"epoch": 0.95,
"grad_norm": 0.7651338842188523,
"learning_rate": 2.6984779420602324e-06,
"loss": 0.5633,
"step": 967
},
{
"epoch": 0.96,
"grad_norm": 0.8845999584695565,
"learning_rate": 2.694594175435422e-06,
"loss": 0.6655,
"step": 968
},
{
"epoch": 0.96,
"grad_norm": 0.9552402309050972,
"learning_rate": 2.6907099362598815e-06,
"loss": 0.6015,
"step": 969
},
{
"epoch": 0.96,
"grad_norm": 0.7881059310601948,
"learning_rate": 2.686825233966061e-06,
"loss": 0.6393,
"step": 970
},
{
"epoch": 0.96,
"grad_norm": 0.8145877809003402,
"learning_rate": 2.6829400779875377e-06,
"loss": 0.5862,
"step": 971
},
{
"epoch": 0.96,
"grad_norm": 0.8749887484577397,
"learning_rate": 2.679054477758988e-06,
"loss": 0.6127,
"step": 972
},
{
"epoch": 0.96,
"grad_norm": 0.8610636451400849,
"learning_rate": 2.6751684427161684e-06,
"loss": 0.6101,
"step": 973
},
{
"epoch": 0.96,
"grad_norm": 0.8990043854731499,
"learning_rate": 2.6712819822958917e-06,
"loss": 0.6828,
"step": 974
},
{
"epoch": 0.96,
"grad_norm": 0.8412551914209038,
"learning_rate": 2.6673951059360036e-06,
"loss": 0.613,
"step": 975
},
{
"epoch": 0.96,
"grad_norm": 0.9159241308527234,
"learning_rate": 2.663507823075358e-06,
"loss": 0.5765,
"step": 976
},
{
"epoch": 0.96,
"grad_norm": 0.8840845410112719,
"learning_rate": 2.6596201431537977e-06,
"loss": 0.6241,
"step": 977
},
{
"epoch": 0.97,
"grad_norm": 0.853193970562842,
"learning_rate": 2.6557320756121306e-06,
"loss": 0.552,
"step": 978
},
{
"epoch": 0.97,
"grad_norm": 0.8275645940427406,
"learning_rate": 2.651843629892103e-06,
"loss": 0.568,
"step": 979
},
{
"epoch": 0.97,
"grad_norm": 0.7816643401147234,
"learning_rate": 2.647954815436382e-06,
"loss": 0.6002,
"step": 980
},
{
"epoch": 0.97,
"grad_norm": 0.8122193509157052,
"learning_rate": 2.6440656416885302e-06,
"loss": 0.6705,
"step": 981
},
{
"epoch": 0.97,
"grad_norm": 0.7916641408063855,
"learning_rate": 2.6401761180929798e-06,
"loss": 0.5727,
"step": 982
},
{
"epoch": 0.97,
"grad_norm": 0.823136144697698,
"learning_rate": 2.6362862540950163e-06,
"loss": 0.6495,
"step": 983
},
{
"epoch": 0.97,
"grad_norm": 0.9635376499725725,
"learning_rate": 2.6323960591407487e-06,
"loss": 0.6187,
"step": 984
},
{
"epoch": 0.97,
"grad_norm": 0.8237142858677388,
"learning_rate": 2.6285055426770935e-06,
"loss": 0.5672,
"step": 985
},
{
"epoch": 0.97,
"grad_norm": 0.841511477918458,
"learning_rate": 2.624614714151743e-06,
"loss": 0.5995,
"step": 986
},
{
"epoch": 0.97,
"grad_norm": 0.865394366604303,
"learning_rate": 2.6207235830131517e-06,
"loss": 0.5992,
"step": 987
},
{
"epoch": 0.98,
"grad_norm": 0.8810526841971326,
"learning_rate": 2.616832158710506e-06,
"loss": 0.65,
"step": 988
},
{
"epoch": 0.98,
"grad_norm": 0.83728728954254,
"learning_rate": 2.612940450693706e-06,
"loss": 0.5785,
"step": 989
},
{
"epoch": 0.98,
"grad_norm": 0.8094909685342696,
"learning_rate": 2.6090484684133406e-06,
"loss": 0.6586,
"step": 990
},
{
"epoch": 0.98,
"grad_norm": 0.8383394818111478,
"learning_rate": 2.6051562213206633e-06,
"loss": 0.6471,
"step": 991
},
{
"epoch": 0.98,
"grad_norm": 0.8008496703545273,
"learning_rate": 2.6012637188675717e-06,
"loss": 0.6041,
"step": 992
},
{
"epoch": 0.98,
"grad_norm": 0.8631369907215043,
"learning_rate": 2.5973709705065836e-06,
"loss": 0.6147,
"step": 993
},
{
"epoch": 0.98,
"grad_norm": 0.8266779122262543,
"learning_rate": 2.593477985690815e-06,
"loss": 0.6196,
"step": 994
},
{
"epoch": 0.98,
"grad_norm": 0.7893764214118881,
"learning_rate": 2.589584773873953e-06,
"loss": 0.6189,
"step": 995
},
{
"epoch": 0.98,
"grad_norm": 0.8482167729005808,
"learning_rate": 2.5856913445102383e-06,
"loss": 0.6131,
"step": 996
},
{
"epoch": 0.98,
"grad_norm": 0.8436526224259587,
"learning_rate": 2.5817977070544408e-06,
"loss": 0.6285,
"step": 997
},
{
"epoch": 0.98,
"grad_norm": 0.8898122115831852,
"learning_rate": 2.577903870961833e-06,
"loss": 0.5756,
"step": 998
},
{
"epoch": 0.99,
"grad_norm": 0.8022952501948407,
"learning_rate": 2.5740098456881718e-06,
"loss": 0.5794,
"step": 999
},
{
"epoch": 0.99,
"grad_norm": 0.7719948709570122,
"learning_rate": 2.5701156406896726e-06,
"loss": 0.5584,
"step": 1000
},
{
"epoch": 0.99,
"grad_norm": 0.8361192518803613,
"learning_rate": 2.5662212654229884e-06,
"loss": 0.5866,
"step": 1001
},
{
"epoch": 0.99,
"grad_norm": 0.8128856577929028,
"learning_rate": 2.5623267293451827e-06,
"loss": 0.5976,
"step": 1002
},
{
"epoch": 0.99,
"grad_norm": 0.8061981083828511,
"learning_rate": 2.5584320419137127e-06,
"loss": 0.5893,
"step": 1003
},
{
"epoch": 0.99,
"grad_norm": 0.8541963254842264,
"learning_rate": 2.554537212586403e-06,
"loss": 0.5794,
"step": 1004
},
{
"epoch": 0.99,
"grad_norm": 0.8341460804008688,
"learning_rate": 2.5506422508214207e-06,
"loss": 0.5897,
"step": 1005
},
{
"epoch": 0.99,
"grad_norm": 0.8003569253426912,
"learning_rate": 2.5467471660772557e-06,
"loss": 0.62,
"step": 1006
},
{
"epoch": 0.99,
"grad_norm": 0.8162229148335823,
"learning_rate": 2.5428519678126974e-06,
"loss": 0.5881,
"step": 1007
},
{
"epoch": 0.99,
"grad_norm": 0.8407184867945466,
"learning_rate": 2.538956665486808e-06,
"loss": 0.6183,
"step": 1008
},
{
"epoch": 1.0,
"grad_norm": 0.815433687807614,
"learning_rate": 2.535061268558906e-06,
"loss": 0.6176,
"step": 1009
},
{
"epoch": 1.0,
"grad_norm": 0.7869662406073055,
"learning_rate": 2.531165786488538e-06,
"loss": 0.5672,
"step": 1010
},
{
"epoch": 1.0,
"grad_norm": 0.8189601120399821,
"learning_rate": 2.527270228735456e-06,
"loss": 0.5557,
"step": 1011
},
{
"epoch": 1.0,
"grad_norm": 0.8194269588514274,
"learning_rate": 2.5233746047595984e-06,
"loss": 0.5744,
"step": 1012
},
{
"epoch": 1.0,
"grad_norm": 0.933322233446511,
"learning_rate": 2.5194789240210623e-06,
"loss": 0.5976,
"step": 1013
},
{
"epoch": 1.0,
"grad_norm": 0.941214440145205,
"learning_rate": 2.5155831959800843e-06,
"loss": 0.5986,
"step": 1014
},
{
"epoch": 1.0,
"eval_loss": 0.5868439078330994,
"eval_runtime": 1883.2861,
"eval_samples_per_second": 0.701,
"eval_steps_per_second": 0.078,
"step": 1014
}
],
"logging_steps": 1,
"max_steps": 2026,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 507,
"total_flos": 477581908377600.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}