JW17's picture
Add files using upload-large-folder tool
abfe084 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"Batch Mean": 2.64666748046875,
"accuracy": 0.484375,
"epoch": 0,
"step": 0
},
{
"epoch": 0.0025,
"grad_norm": 9.6364107131958,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.723,
"step": 1
},
{
"Batch Mean": 2.6478805541992188,
"accuracy": 0.46875,
"epoch": 0.0025,
"step": 1
},
{
"epoch": 0.005,
"grad_norm": 7.393254280090332,
"learning_rate": 5.000000000000001e-07,
"loss": 0.7006,
"step": 2
},
{
"Batch Mean": 2.62982177734375,
"accuracy": 0.4453125,
"epoch": 0.005,
"step": 2
},
{
"epoch": 0.0075,
"grad_norm": 8.464498519897461,
"learning_rate": 7.5e-07,
"loss": 0.7093,
"step": 3
},
{
"Batch Mean": 2.6655197143554688,
"accuracy": 0.5,
"epoch": 0.0075,
"step": 3
},
{
"epoch": 0.01,
"grad_norm": 7.2742462158203125,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.7101,
"step": 4
},
{
"Batch Mean": 2.616424560546875,
"accuracy": 0.4296875,
"epoch": 0.01,
"step": 4
},
{
"epoch": 0.0125,
"grad_norm": 8.216493606567383,
"learning_rate": 1.25e-06,
"loss": 0.7145,
"step": 5
},
{
"Batch Mean": 2.6591033935546875,
"accuracy": 0.546875,
"epoch": 0.0125,
"step": 5
},
{
"epoch": 0.015,
"grad_norm": 8.033185958862305,
"learning_rate": 1.5e-06,
"loss": 0.6821,
"step": 6
},
{
"Batch Mean": 2.6329345703125,
"accuracy": 0.640625,
"epoch": 0.015,
"step": 6
},
{
"epoch": 0.0175,
"grad_norm": 10.476421356201172,
"learning_rate": 1.75e-06,
"loss": 0.6911,
"step": 7
},
{
"Batch Mean": 2.680908203125,
"accuracy": 0.5859375,
"epoch": 0.0175,
"step": 7
},
{
"epoch": 0.02,
"grad_norm": 9.513436317443848,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.6859,
"step": 8
},
{
"Batch Mean": 2.658843994140625,
"accuracy": 0.53125,
"epoch": 0.02,
"step": 8
},
{
"epoch": 0.0225,
"grad_norm": 7.415161609649658,
"learning_rate": 2.25e-06,
"loss": 0.6839,
"step": 9
},
{
"Batch Mean": 2.74237060546875,
"accuracy": 0.6328125,
"epoch": 0.0225,
"step": 9
},
{
"epoch": 0.025,
"grad_norm": 6.572882175445557,
"learning_rate": 2.5e-06,
"loss": 0.6557,
"step": 10
},
{
"Batch Mean": 2.9183349609375,
"accuracy": 0.65625,
"epoch": 0.025,
"step": 10
},
{
"epoch": 0.0275,
"grad_norm": 6.594631671905518,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.6336,
"step": 11
},
{
"Batch Mean": 2.929656982421875,
"accuracy": 0.546875,
"epoch": 0.0275,
"step": 11
},
{
"epoch": 0.03,
"grad_norm": 6.199557304382324,
"learning_rate": 3e-06,
"loss": 0.6784,
"step": 12
},
{
"Batch Mean": 2.938323974609375,
"accuracy": 0.59375,
"epoch": 0.03,
"step": 12
},
{
"epoch": 0.0325,
"grad_norm": 8.091628074645996,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.6887,
"step": 13
},
{
"Batch Mean": 3.0481414794921875,
"accuracy": 0.6171875,
"epoch": 0.0325,
"step": 13
},
{
"epoch": 0.035,
"grad_norm": 8.876925468444824,
"learning_rate": 3.5e-06,
"loss": 0.6421,
"step": 14
},
{
"Batch Mean": 3.2818450927734375,
"accuracy": 0.703125,
"epoch": 0.035,
"step": 14
},
{
"epoch": 0.0375,
"grad_norm": 10.33357048034668,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.5853,
"step": 15
},
{
"Batch Mean": 3.2837677001953125,
"accuracy": 0.625,
"epoch": 0.0375,
"step": 15
},
{
"epoch": 0.04,
"grad_norm": 11.141650199890137,
"learning_rate": 4.000000000000001e-06,
"loss": 0.6495,
"step": 16
},
{
"Batch Mean": 3.2836532592773438,
"accuracy": 0.6015625,
"epoch": 0.04,
"step": 16
},
{
"epoch": 0.0425,
"grad_norm": 8.093420028686523,
"learning_rate": 4.25e-06,
"loss": 0.6607,
"step": 17
},
{
"Batch Mean": 3.3738174438476562,
"accuracy": 0.65625,
"epoch": 0.0425,
"step": 17
},
{
"epoch": 0.045,
"grad_norm": 8.171963691711426,
"learning_rate": 4.5e-06,
"loss": 0.5946,
"step": 18
},
{
"Batch Mean": 3.8035507202148438,
"accuracy": 0.6015625,
"epoch": 0.045,
"step": 18
},
{
"epoch": 0.0475,
"grad_norm": 10.379010200500488,
"learning_rate": 4.75e-06,
"loss": 0.62,
"step": 19
},
{
"Batch Mean": 4.241037845611572,
"accuracy": 0.7421875,
"epoch": 0.0475,
"step": 19
},
{
"epoch": 0.05,
"grad_norm": 8.072366714477539,
"learning_rate": 5e-06,
"loss": 0.6094,
"step": 20
},
{
"Batch Mean": 4.397010803222656,
"accuracy": 0.65625,
"epoch": 0.05,
"step": 20
},
{
"epoch": 0.0525,
"grad_norm": 7.2537384033203125,
"learning_rate": 4.986842105263158e-06,
"loss": 0.5916,
"step": 21
},
{
"Batch Mean": 4.36688232421875,
"accuracy": 0.671875,
"epoch": 0.0525,
"step": 21
},
{
"epoch": 0.055,
"grad_norm": 8.559549331665039,
"learning_rate": 4.973684210526316e-06,
"loss": 0.5884,
"step": 22
},
{
"Batch Mean": 4.482940673828125,
"accuracy": 0.6640625,
"epoch": 0.055,
"step": 22
},
{
"epoch": 0.0575,
"grad_norm": 8.731099128723145,
"learning_rate": 4.960526315789474e-06,
"loss": 0.6106,
"step": 23
},
{
"Batch Mean": 4.41278076171875,
"accuracy": 0.7109375,
"epoch": 0.0575,
"step": 23
},
{
"epoch": 0.06,
"grad_norm": 6.149895191192627,
"learning_rate": 4.947368421052632e-06,
"loss": 0.5569,
"step": 24
},
{
"Batch Mean": 4.1328277587890625,
"accuracy": 0.6484375,
"epoch": 0.06,
"step": 24
},
{
"epoch": 0.0625,
"grad_norm": 6.8678879737854,
"learning_rate": 4.9342105263157895e-06,
"loss": 0.5729,
"step": 25
},
{
"Batch Mean": 3.8360061645507812,
"accuracy": 0.6484375,
"epoch": 0.0625,
"step": 25
},
{
"epoch": 0.065,
"grad_norm": 6.591639041900635,
"learning_rate": 4.921052631578948e-06,
"loss": 0.5902,
"step": 26
},
{
"Batch Mean": 3.9048538208007812,
"accuracy": 0.6953125,
"epoch": 0.065,
"step": 26
},
{
"epoch": 0.0675,
"grad_norm": 5.855933666229248,
"learning_rate": 4.907894736842106e-06,
"loss": 0.5666,
"step": 27
},
{
"Batch Mean": 3.7672348022460938,
"accuracy": 0.6953125,
"epoch": 0.0675,
"step": 27
},
{
"epoch": 0.07,
"grad_norm": 5.393594264984131,
"learning_rate": 4.894736842105264e-06,
"loss": 0.5508,
"step": 28
},
{
"Batch Mean": 3.723773956298828,
"accuracy": 0.65625,
"epoch": 0.07,
"step": 28
},
{
"epoch": 0.0725,
"grad_norm": 5.428353786468506,
"learning_rate": 4.881578947368422e-06,
"loss": 0.5694,
"step": 29
},
{
"Batch Mean": 3.7498779296875,
"accuracy": 0.6953125,
"epoch": 0.0725,
"step": 29
},
{
"epoch": 0.075,
"grad_norm": 5.005551338195801,
"learning_rate": 4.8684210526315795e-06,
"loss": 0.5905,
"step": 30
},
{
"Batch Mean": 3.564330577850342,
"accuracy": 0.671875,
"epoch": 0.075,
"step": 30
},
{
"epoch": 0.0775,
"grad_norm": 6.4090681076049805,
"learning_rate": 4.855263157894737e-06,
"loss": 0.5901,
"step": 31
},
{
"Batch Mean": 3.5886306762695312,
"accuracy": 0.7421875,
"epoch": 0.0775,
"step": 31
},
{
"epoch": 0.08,
"grad_norm": 6.025192737579346,
"learning_rate": 4.842105263157895e-06,
"loss": 0.5265,
"step": 32
},
{
"Batch Mean": 3.64593505859375,
"accuracy": 0.6953125,
"epoch": 0.08,
"step": 32
},
{
"epoch": 0.0825,
"grad_norm": 6.129398345947266,
"learning_rate": 4.828947368421053e-06,
"loss": 0.5083,
"step": 33
},
{
"Batch Mean": 3.5973358154296875,
"accuracy": 0.6875,
"epoch": 0.0825,
"step": 33
},
{
"epoch": 0.085,
"grad_norm": 6.91660737991333,
"learning_rate": 4.815789473684211e-06,
"loss": 0.5365,
"step": 34
},
{
"Batch Mean": 3.6286163330078125,
"accuracy": 0.7109375,
"epoch": 0.085,
"step": 34
},
{
"epoch": 0.0875,
"grad_norm": 7.553361892700195,
"learning_rate": 4.802631578947369e-06,
"loss": 0.5196,
"step": 35
},
{
"Batch Mean": 3.5937328338623047,
"accuracy": 0.7109375,
"epoch": 0.0875,
"step": 35
},
{
"epoch": 0.09,
"grad_norm": 7.884591579437256,
"learning_rate": 4.789473684210527e-06,
"loss": 0.5533,
"step": 36
},
{
"Batch Mean": 3.28558349609375,
"accuracy": 0.6796875,
"epoch": 0.09,
"step": 36
},
{
"epoch": 0.0925,
"grad_norm": 8.118135452270508,
"learning_rate": 4.7763157894736844e-06,
"loss": 0.5145,
"step": 37
},
{
"Batch Mean": 3.079477310180664,
"accuracy": 0.7421875,
"epoch": 0.0925,
"step": 37
},
{
"epoch": 0.095,
"grad_norm": 10.431463241577148,
"learning_rate": 4.763157894736842e-06,
"loss": 0.6,
"step": 38
},
{
"Batch Mean": 2.9296703338623047,
"accuracy": 0.7578125,
"epoch": 0.095,
"step": 38
},
{
"epoch": 0.0975,
"grad_norm": 8.055816650390625,
"learning_rate": 4.75e-06,
"loss": 0.445,
"step": 39
},
{
"Batch Mean": 2.9282093048095703,
"accuracy": 0.7109375,
"epoch": 0.0975,
"step": 39
},
{
"epoch": 0.1,
"grad_norm": 9.155073165893555,
"learning_rate": 4.736842105263158e-06,
"loss": 0.5568,
"step": 40
},
{
"Batch Mean": 2.4473862648010254,
"accuracy": 0.6171875,
"epoch": 0.1,
"step": 40
},
{
"epoch": 0.1025,
"grad_norm": 8.89163589477539,
"learning_rate": 4.723684210526316e-06,
"loss": 0.5934,
"step": 41
},
{
"Batch Mean": 2.4439845085144043,
"accuracy": 0.6953125,
"epoch": 0.1025,
"step": 41
},
{
"epoch": 0.105,
"grad_norm": 7.411705017089844,
"learning_rate": 4.710526315789474e-06,
"loss": 0.5522,
"step": 42
},
{
"Batch Mean": 1.7335999011993408,
"accuracy": 0.734375,
"epoch": 0.105,
"step": 42
},
{
"epoch": 0.1075,
"grad_norm": 7.0730109214782715,
"learning_rate": 4.697368421052632e-06,
"loss": 0.5157,
"step": 43
},
{
"Batch Mean": 1.7379779815673828,
"accuracy": 0.71875,
"epoch": 0.1075,
"step": 43
},
{
"epoch": 0.11,
"grad_norm": 7.879677772521973,
"learning_rate": 4.68421052631579e-06,
"loss": 0.5405,
"step": 44
},
{
"Batch Mean": 1.7085151672363281,
"accuracy": 0.6796875,
"epoch": 0.11,
"step": 44
},
{
"epoch": 0.1125,
"grad_norm": 6.630401611328125,
"learning_rate": 4.671052631578948e-06,
"loss": 0.564,
"step": 45
},
{
"Batch Mean": 1.7431907653808594,
"accuracy": 0.734375,
"epoch": 0.1125,
"step": 45
},
{
"epoch": 0.115,
"grad_norm": 6.569342136383057,
"learning_rate": 4.657894736842106e-06,
"loss": 0.4969,
"step": 46
},
{
"Batch Mean": 2.4792041778564453,
"accuracy": 0.6953125,
"epoch": 0.115,
"step": 46
},
{
"epoch": 0.1175,
"grad_norm": 6.198298931121826,
"learning_rate": 4.6447368421052635e-06,
"loss": 0.5501,
"step": 47
},
{
"Batch Mean": 2.4960689544677734,
"accuracy": 0.7578125,
"epoch": 0.1175,
"step": 47
},
{
"epoch": 0.12,
"grad_norm": 5.984739780426025,
"learning_rate": 4.631578947368421e-06,
"loss": 0.4513,
"step": 48
},
{
"Batch Mean": 2.7437705993652344,
"accuracy": 0.71875,
"epoch": 0.12,
"step": 48
},
{
"epoch": 0.1225,
"grad_norm": 6.669623374938965,
"learning_rate": 4.618421052631579e-06,
"loss": 0.5404,
"step": 49
},
{
"Batch Mean": 2.672210693359375,
"accuracy": 0.75,
"epoch": 0.1225,
"step": 49
},
{
"epoch": 0.125,
"grad_norm": 6.217676639556885,
"learning_rate": 4.605263157894737e-06,
"loss": 0.4668,
"step": 50
},
{
"Batch Mean": 2.7935009002685547,
"accuracy": 0.75,
"epoch": 0.125,
"step": 50
},
{
"epoch": 0.1275,
"grad_norm": 7.666802883148193,
"learning_rate": 4.592105263157895e-06,
"loss": 0.5491,
"step": 51
},
{
"Batch Mean": 2.651923179626465,
"accuracy": 0.734375,
"epoch": 0.1275,
"step": 51
},
{
"epoch": 0.13,
"grad_norm": 9.799866676330566,
"learning_rate": 4.578947368421053e-06,
"loss": 0.5173,
"step": 52
},
{
"Batch Mean": 2.9287376403808594,
"accuracy": 0.734375,
"epoch": 0.13,
"step": 52
},
{
"epoch": 0.1325,
"grad_norm": 8.577540397644043,
"learning_rate": 4.565789473684211e-06,
"loss": 0.5279,
"step": 53
},
{
"Batch Mean": 3.0377674102783203,
"accuracy": 0.7265625,
"epoch": 0.1325,
"step": 53
},
{
"epoch": 0.135,
"grad_norm": 8.099453926086426,
"learning_rate": 4.552631578947369e-06,
"loss": 0.5122,
"step": 54
},
{
"Batch Mean": 3.3550057411193848,
"accuracy": 0.6953125,
"epoch": 0.135,
"step": 54
},
{
"epoch": 0.1375,
"grad_norm": 9.597841262817383,
"learning_rate": 4.539473684210527e-06,
"loss": 0.5034,
"step": 55
},
{
"Batch Mean": 3.3833670616149902,
"accuracy": 0.7578125,
"epoch": 0.1375,
"step": 55
},
{
"epoch": 0.14,
"grad_norm": 9.208806037902832,
"learning_rate": 4.526315789473685e-06,
"loss": 0.53,
"step": 56
},
{
"Batch Mean": 3.255979537963867,
"accuracy": 0.7890625,
"epoch": 0.14,
"step": 56
},
{
"epoch": 0.1425,
"grad_norm": 7.923739910125732,
"learning_rate": 4.513157894736843e-06,
"loss": 0.4315,
"step": 57
},
{
"Batch Mean": 3.003221273422241,
"accuracy": 0.7265625,
"epoch": 0.1425,
"step": 57
},
{
"epoch": 0.145,
"grad_norm": 8.64991569519043,
"learning_rate": 4.5e-06,
"loss": 0.5038,
"step": 58
},
{
"Batch Mean": 3.122058868408203,
"accuracy": 0.78125,
"epoch": 0.145,
"step": 58
},
{
"epoch": 0.1475,
"grad_norm": 8.633460998535156,
"learning_rate": 4.4868421052631584e-06,
"loss": 0.4886,
"step": 59
},
{
"Batch Mean": 2.8752236366271973,
"accuracy": 0.7109375,
"epoch": 0.1475,
"step": 59
},
{
"epoch": 0.15,
"grad_norm": 8.399279594421387,
"learning_rate": 4.473684210526316e-06,
"loss": 0.5239,
"step": 60
},
{
"Batch Mean": 2.694100856781006,
"accuracy": 0.7578125,
"epoch": 0.15,
"step": 60
},
{
"epoch": 0.1525,
"grad_norm": 7.846778869628906,
"learning_rate": 4.460526315789474e-06,
"loss": 0.4538,
"step": 61
},
{
"Batch Mean": 2.59914493560791,
"accuracy": 0.8046875,
"epoch": 0.1525,
"step": 61
},
{
"epoch": 0.155,
"grad_norm": 7.844125270843506,
"learning_rate": 4.447368421052632e-06,
"loss": 0.4234,
"step": 62
},
{
"Batch Mean": 2.431262493133545,
"accuracy": 0.75,
"epoch": 0.155,
"step": 62
},
{
"epoch": 0.1575,
"grad_norm": 8.15548038482666,
"learning_rate": 4.43421052631579e-06,
"loss": 0.4695,
"step": 63
},
{
"Batch Mean": 2.162135124206543,
"accuracy": 0.7421875,
"epoch": 0.1575,
"step": 63
},
{
"epoch": 0.16,
"grad_norm": 9.166322708129883,
"learning_rate": 4.4210526315789476e-06,
"loss": 0.4898,
"step": 64
},
{
"Batch Mean": 1.8282737731933594,
"accuracy": 0.75,
"epoch": 0.16,
"step": 64
},
{
"epoch": 0.1625,
"grad_norm": 9.52655029296875,
"learning_rate": 4.407894736842105e-06,
"loss": 0.4651,
"step": 65
},
{
"Batch Mean": 1.9480819702148438,
"accuracy": 0.7890625,
"epoch": 0.1625,
"step": 65
},
{
"epoch": 0.165,
"grad_norm": 8.491720199584961,
"learning_rate": 4.394736842105263e-06,
"loss": 0.4578,
"step": 66
},
{
"Batch Mean": 1.8442394733428955,
"accuracy": 0.7734375,
"epoch": 0.165,
"step": 66
},
{
"epoch": 0.1675,
"grad_norm": 9.01707649230957,
"learning_rate": 4.381578947368421e-06,
"loss": 0.4858,
"step": 67
},
{
"Batch Mean": 1.2759554386138916,
"accuracy": 0.6953125,
"epoch": 0.1675,
"step": 67
},
{
"epoch": 0.17,
"grad_norm": 10.790990829467773,
"learning_rate": 4.368421052631579e-06,
"loss": 0.5368,
"step": 68
},
{
"Batch Mean": 0.916658878326416,
"accuracy": 0.7109375,
"epoch": 0.17,
"step": 68
},
{
"epoch": 0.1725,
"grad_norm": 10.27574634552002,
"learning_rate": 4.3552631578947375e-06,
"loss": 0.5475,
"step": 69
},
{
"Batch Mean": 0.9389474391937256,
"accuracy": 0.6875,
"epoch": 0.1725,
"step": 69
},
{
"epoch": 0.175,
"grad_norm": 9.463624954223633,
"learning_rate": 4.342105263157895e-06,
"loss": 0.5295,
"step": 70
},
{
"Batch Mean": 0.6801865100860596,
"accuracy": 0.78125,
"epoch": 0.175,
"step": 70
},
{
"epoch": 0.1775,
"grad_norm": 7.813968658447266,
"learning_rate": 4.328947368421053e-06,
"loss": 0.4602,
"step": 71
},
{
"Batch Mean": 0.6634016036987305,
"accuracy": 0.78125,
"epoch": 0.1775,
"step": 71
},
{
"epoch": 0.18,
"grad_norm": 7.322629928588867,
"learning_rate": 4.315789473684211e-06,
"loss": 0.4328,
"step": 72
},
{
"Batch Mean": 0.518225908279419,
"accuracy": 0.8359375,
"epoch": 0.18,
"step": 72
},
{
"epoch": 0.1825,
"grad_norm": 7.121971130371094,
"learning_rate": 4.302631578947369e-06,
"loss": 0.3997,
"step": 73
},
{
"Batch Mean": 0.6286078691482544,
"accuracy": 0.75,
"epoch": 0.1825,
"step": 73
},
{
"epoch": 0.185,
"grad_norm": 8.812067031860352,
"learning_rate": 4.289473684210527e-06,
"loss": 0.5345,
"step": 74
},
{
"Batch Mean": 0.4055500030517578,
"accuracy": 0.78125,
"epoch": 0.185,
"step": 74
},
{
"epoch": 0.1875,
"grad_norm": 8.031818389892578,
"learning_rate": 4.276315789473684e-06,
"loss": 0.4436,
"step": 75
},
{
"Batch Mean": 0.6844688057899475,
"accuracy": 0.6640625,
"epoch": 0.1875,
"step": 75
},
{
"epoch": 0.19,
"grad_norm": 9.14492130279541,
"learning_rate": 4.2631578947368425e-06,
"loss": 0.5727,
"step": 76
},
{
"Batch Mean": 0.549891471862793,
"accuracy": 0.7578125,
"epoch": 0.19,
"step": 76
},
{
"epoch": 0.1925,
"grad_norm": 8.721407890319824,
"learning_rate": 4.25e-06,
"loss": 0.4997,
"step": 77
},
{
"Batch Mean": 0.3632768988609314,
"accuracy": 0.703125,
"epoch": 0.1925,
"step": 77
},
{
"epoch": 0.195,
"grad_norm": 8.776328086853027,
"learning_rate": 4.236842105263158e-06,
"loss": 0.5442,
"step": 78
},
{
"Batch Mean": 0.4604153633117676,
"accuracy": 0.7421875,
"epoch": 0.195,
"step": 78
},
{
"epoch": 0.1975,
"grad_norm": 7.281062126159668,
"learning_rate": 4.223684210526316e-06,
"loss": 0.4793,
"step": 79
},
{
"Batch Mean": 0.605268120765686,
"accuracy": 0.765625,
"epoch": 0.1975,
"step": 79
},
{
"epoch": 0.2,
"grad_norm": 7.533178329467773,
"learning_rate": 4.210526315789474e-06,
"loss": 0.4338,
"step": 80
},
{
"Batch Mean": 0.9789919257164001,
"accuracy": 0.75,
"epoch": 0.2,
"step": 80
},
{
"epoch": 0.2025,
"grad_norm": 7.2433671951293945,
"learning_rate": 4.197368421052632e-06,
"loss": 0.4785,
"step": 81
},
{
"Batch Mean": 1.114579200744629,
"accuracy": 0.71875,
"epoch": 0.2025,
"step": 81
},
{
"epoch": 0.205,
"grad_norm": 7.2934064865112305,
"learning_rate": 4.18421052631579e-06,
"loss": 0.4836,
"step": 82
},
{
"Batch Mean": 1.0528812408447266,
"accuracy": 0.7578125,
"epoch": 0.205,
"step": 82
},
{
"epoch": 0.2075,
"grad_norm": 7.544822692871094,
"learning_rate": 4.171052631578948e-06,
"loss": 0.5077,
"step": 83
},
{
"Batch Mean": 1.1365894079208374,
"accuracy": 0.671875,
"epoch": 0.2075,
"step": 83
},
{
"epoch": 0.21,
"grad_norm": 8.340349197387695,
"learning_rate": 4.157894736842106e-06,
"loss": 0.579,
"step": 84
},
{
"Batch Mean": 1.7320103645324707,
"accuracy": 0.8125,
"epoch": 0.21,
"step": 84
},
{
"epoch": 0.2125,
"grad_norm": 6.645724296569824,
"learning_rate": 4.144736842105263e-06,
"loss": 0.4114,
"step": 85
},
{
"Batch Mean": 1.443530559539795,
"accuracy": 0.7578125,
"epoch": 0.2125,
"step": 85
},
{
"epoch": 0.215,
"grad_norm": 6.647582530975342,
"learning_rate": 4.1315789473684216e-06,
"loss": 0.4369,
"step": 86
},
{
"Batch Mean": 1.5358202457427979,
"accuracy": 0.7578125,
"epoch": 0.215,
"step": 86
},
{
"epoch": 0.2175,
"grad_norm": 7.315903186798096,
"learning_rate": 4.118421052631579e-06,
"loss": 0.4895,
"step": 87
},
{
"Batch Mean": 1.7256877422332764,
"accuracy": 0.7734375,
"epoch": 0.2175,
"step": 87
},
{
"epoch": 0.22,
"grad_norm": 8.285420417785645,
"learning_rate": 4.105263157894737e-06,
"loss": 0.5035,
"step": 88
},
{
"Batch Mean": 1.5997413396835327,
"accuracy": 0.75,
"epoch": 0.22,
"step": 88
},
{
"epoch": 0.2225,
"grad_norm": 7.990715026855469,
"learning_rate": 4.092105263157895e-06,
"loss": 0.51,
"step": 89
},
{
"Batch Mean": 1.6425485610961914,
"accuracy": 0.734375,
"epoch": 0.2225,
"step": 89
},
{
"epoch": 0.225,
"grad_norm": 8.72641658782959,
"learning_rate": 4.078947368421053e-06,
"loss": 0.4908,
"step": 90
},
{
"Batch Mean": 1.9212679862976074,
"accuracy": 0.84375,
"epoch": 0.225,
"step": 90
},
{
"epoch": 0.2275,
"grad_norm": 7.567220211029053,
"learning_rate": 4.065789473684211e-06,
"loss": 0.4152,
"step": 91
},
{
"Batch Mean": 1.6822540760040283,
"accuracy": 0.8046875,
"epoch": 0.2275,
"step": 91
},
{
"epoch": 0.23,
"grad_norm": 7.402642726898193,
"learning_rate": 4.052631578947368e-06,
"loss": 0.4054,
"step": 92
},
{
"Batch Mean": 1.5377750396728516,
"accuracy": 0.75,
"epoch": 0.23,
"step": 92
},
{
"epoch": 0.2325,
"grad_norm": 8.574016571044922,
"learning_rate": 4.0394736842105265e-06,
"loss": 0.4739,
"step": 93
},
{
"Batch Mean": 1.332385540008545,
"accuracy": 0.734375,
"epoch": 0.2325,
"step": 93
},
{
"epoch": 0.235,
"grad_norm": 8.827500343322754,
"learning_rate": 4.026315789473684e-06,
"loss": 0.4983,
"step": 94
},
{
"Batch Mean": 1.0707147121429443,
"accuracy": 0.8046875,
"epoch": 0.235,
"step": 94
},
{
"epoch": 0.2375,
"grad_norm": 7.975078105926514,
"learning_rate": 4.013157894736842e-06,
"loss": 0.4709,
"step": 95
},
{
"Batch Mean": 1.1354491710662842,
"accuracy": 0.75,
"epoch": 0.2375,
"step": 95
},
{
"epoch": 0.24,
"grad_norm": 7.764720916748047,
"learning_rate": 4.000000000000001e-06,
"loss": 0.4697,
"step": 96
},
{
"Batch Mean": 0.7834959030151367,
"accuracy": 0.765625,
"epoch": 0.24,
"step": 96
},
{
"epoch": 0.2425,
"grad_norm": 8.38682746887207,
"learning_rate": 3.986842105263158e-06,
"loss": 0.46,
"step": 97
},
{
"Batch Mean": 0.9441508650779724,
"accuracy": 0.8359375,
"epoch": 0.2425,
"step": 97
},
{
"epoch": 0.245,
"grad_norm": 8.565149307250977,
"learning_rate": 3.9736842105263165e-06,
"loss": 0.4071,
"step": 98
},
{
"Batch Mean": 0.8886473178863525,
"accuracy": 0.8203125,
"epoch": 0.245,
"step": 98
},
{
"epoch": 0.2475,
"grad_norm": 7.316027641296387,
"learning_rate": 3.960526315789474e-06,
"loss": 0.413,
"step": 99
},
{
"Batch Mean": 1.3381073474884033,
"accuracy": 0.765625,
"epoch": 0.2475,
"step": 99
},
{
"epoch": 0.25,
"grad_norm": 7.380762577056885,
"learning_rate": 3.947368421052632e-06,
"loss": 0.4639,
"step": 100
},
{
"Batch Mean": 1.8181347846984863,
"accuracy": 0.78125,
"epoch": 0.25,
"step": 100
},
{
"epoch": 0.2525,
"grad_norm": 8.348108291625977,
"learning_rate": 3.93421052631579e-06,
"loss": 0.4568,
"step": 101
},
{
"Batch Mean": 1.850348711013794,
"accuracy": 0.828125,
"epoch": 0.2525,
"step": 101
},
{
"epoch": 0.255,
"grad_norm": 7.533435821533203,
"learning_rate": 3.921052631578947e-06,
"loss": 0.4471,
"step": 102
},
{
"Batch Mean": 1.8302688598632812,
"accuracy": 0.7265625,
"epoch": 0.255,
"step": 102
},
{
"epoch": 0.2575,
"grad_norm": 7.560754776000977,
"learning_rate": 3.907894736842106e-06,
"loss": 0.4922,
"step": 103
},
{
"Batch Mean": 1.832013487815857,
"accuracy": 0.7734375,
"epoch": 0.2575,
"step": 103
},
{
"epoch": 0.26,
"grad_norm": 8.4998197555542,
"learning_rate": 3.894736842105263e-06,
"loss": 0.4636,
"step": 104
},
{
"Batch Mean": 1.9380416870117188,
"accuracy": 0.7421875,
"epoch": 0.26,
"step": 104
},
{
"epoch": 0.2625,
"grad_norm": 8.759364128112793,
"learning_rate": 3.8815789473684214e-06,
"loss": 0.5117,
"step": 105
},
{
"Batch Mean": 2.0537302494049072,
"accuracy": 0.78125,
"epoch": 0.2625,
"step": 105
},
{
"epoch": 0.265,
"grad_norm": 7.300965785980225,
"learning_rate": 3.868421052631579e-06,
"loss": 0.3956,
"step": 106
},
{
"Batch Mean": 2.236231803894043,
"accuracy": 0.7890625,
"epoch": 0.265,
"step": 106
},
{
"epoch": 0.2675,
"grad_norm": 7.826519012451172,
"learning_rate": 3.855263157894737e-06,
"loss": 0.462,
"step": 107
},
{
"Batch Mean": 2.2571160793304443,
"accuracy": 0.78125,
"epoch": 0.2675,
"step": 107
},
{
"epoch": 0.27,
"grad_norm": 7.177440643310547,
"learning_rate": 3.842105263157895e-06,
"loss": 0.4264,
"step": 108
},
{
"Batch Mean": 2.3395166397094727,
"accuracy": 0.8359375,
"epoch": 0.27,
"step": 108
},
{
"epoch": 0.2725,
"grad_norm": 7.556861877441406,
"learning_rate": 3.828947368421053e-06,
"loss": 0.3825,
"step": 109
},
{
"Batch Mean": 2.5364012718200684,
"accuracy": 0.7890625,
"epoch": 0.2725,
"step": 109
},
{
"epoch": 0.275,
"grad_norm": 8.785236358642578,
"learning_rate": 3.815789473684211e-06,
"loss": 0.4915,
"step": 110
},
{
"Batch Mean": 2.545170783996582,
"accuracy": 0.71875,
"epoch": 0.275,
"step": 110
},
{
"epoch": 0.2775,
"grad_norm": 8.062837600708008,
"learning_rate": 3.802631578947369e-06,
"loss": 0.5025,
"step": 111
},
{
"Batch Mean": 2.6232008934020996,
"accuracy": 0.7734375,
"epoch": 0.2775,
"step": 111
},
{
"epoch": 0.28,
"grad_norm": 7.3966498374938965,
"learning_rate": 3.789473684210527e-06,
"loss": 0.4145,
"step": 112
},
{
"Batch Mean": 2.351048469543457,
"accuracy": 0.6953125,
"epoch": 0.28,
"step": 112
},
{
"epoch": 0.2825,
"grad_norm": 7.810081958770752,
"learning_rate": 3.7763157894736847e-06,
"loss": 0.5147,
"step": 113
},
{
"Batch Mean": 1.6083295345306396,
"accuracy": 0.796875,
"epoch": 0.2825,
"step": 113
},
{
"epoch": 0.285,
"grad_norm": 6.8766703605651855,
"learning_rate": 3.7631578947368426e-06,
"loss": 0.4275,
"step": 114
},
{
"Batch Mean": 1.594513177871704,
"accuracy": 0.8046875,
"epoch": 0.285,
"step": 114
},
{
"epoch": 0.2875,
"grad_norm": 7.07267427444458,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.4197,
"step": 115
},
{
"Batch Mean": 1.6583623886108398,
"accuracy": 0.7421875,
"epoch": 0.2875,
"step": 115
},
{
"epoch": 0.29,
"grad_norm": 8.415107727050781,
"learning_rate": 3.736842105263158e-06,
"loss": 0.4723,
"step": 116
},
{
"Batch Mean": 1.6947704553604126,
"accuracy": 0.8203125,
"epoch": 0.29,
"step": 116
},
{
"epoch": 0.2925,
"grad_norm": 7.132028102874756,
"learning_rate": 3.723684210526316e-06,
"loss": 0.4039,
"step": 117
},
{
"Batch Mean": 1.7144229412078857,
"accuracy": 0.8046875,
"epoch": 0.2925,
"step": 117
},
{
"epoch": 0.295,
"grad_norm": 6.850518703460693,
"learning_rate": 3.710526315789474e-06,
"loss": 0.4148,
"step": 118
},
{
"Batch Mean": 1.4002912044525146,
"accuracy": 0.828125,
"epoch": 0.295,
"step": 118
},
{
"epoch": 0.2975,
"grad_norm": 7.008551120758057,
"learning_rate": 3.6973684210526317e-06,
"loss": 0.3972,
"step": 119
},
{
"Batch Mean": 1.1955918073654175,
"accuracy": 0.7890625,
"epoch": 0.2975,
"step": 119
},
{
"epoch": 0.3,
"grad_norm": 8.176568984985352,
"learning_rate": 3.6842105263157896e-06,
"loss": 0.4894,
"step": 120
},
{
"Batch Mean": 1.4260971546173096,
"accuracy": 0.765625,
"epoch": 0.3,
"step": 120
},
{
"epoch": 0.3025,
"grad_norm": 7.905389308929443,
"learning_rate": 3.6710526315789476e-06,
"loss": 0.4796,
"step": 121
},
{
"Batch Mean": 1.1381725072860718,
"accuracy": 0.84375,
"epoch": 0.3025,
"step": 121
},
{
"epoch": 0.305,
"grad_norm": 7.464083194732666,
"learning_rate": 3.657894736842106e-06,
"loss": 0.387,
"step": 122
},
{
"Batch Mean": 1.064018726348877,
"accuracy": 0.8125,
"epoch": 0.305,
"step": 122
},
{
"epoch": 0.3075,
"grad_norm": 8.280699729919434,
"learning_rate": 3.644736842105264e-06,
"loss": 0.4154,
"step": 123
},
{
"Batch Mean": 1.3708758354187012,
"accuracy": 0.765625,
"epoch": 0.3075,
"step": 123
},
{
"epoch": 0.31,
"grad_norm": 9.830754280090332,
"learning_rate": 3.6315789473684217e-06,
"loss": 0.5003,
"step": 124
},
{
"Batch Mean": 1.162580132484436,
"accuracy": 0.828125,
"epoch": 0.31,
"step": 124
},
{
"epoch": 0.3125,
"grad_norm": 8.671696662902832,
"learning_rate": 3.618421052631579e-06,
"loss": 0.4389,
"step": 125
},
{
"Batch Mean": 0.671147346496582,
"accuracy": 0.8125,
"epoch": 0.3125,
"step": 125
},
{
"epoch": 0.315,
"grad_norm": 8.493339538574219,
"learning_rate": 3.605263157894737e-06,
"loss": 0.4434,
"step": 126
},
{
"Batch Mean": 0.8207384347915649,
"accuracy": 0.8203125,
"epoch": 0.315,
"step": 126
},
{
"epoch": 0.3175,
"grad_norm": 8.274676322937012,
"learning_rate": 3.592105263157895e-06,
"loss": 0.4057,
"step": 127
},
{
"Batch Mean": 0.5156043767929077,
"accuracy": 0.75,
"epoch": 0.3175,
"step": 127
},
{
"epoch": 0.32,
"grad_norm": 9.094855308532715,
"learning_rate": 3.578947368421053e-06,
"loss": 0.4661,
"step": 128
},
{
"Batch Mean": 0.43193674087524414,
"accuracy": 0.8203125,
"epoch": 0.32,
"step": 128
},
{
"epoch": 0.3225,
"grad_norm": 8.729266166687012,
"learning_rate": 3.565789473684211e-06,
"loss": 0.4586,
"step": 129
},
{
"Batch Mean": 0.30504658818244934,
"accuracy": 0.7421875,
"epoch": 0.3225,
"step": 129
},
{
"epoch": 0.325,
"grad_norm": 11.113021850585938,
"learning_rate": 3.5526315789473687e-06,
"loss": 0.4894,
"step": 130
},
{
"Batch Mean": 0.3271394371986389,
"accuracy": 0.7734375,
"epoch": 0.325,
"step": 130
},
{
"epoch": 0.3275,
"grad_norm": 8.692556381225586,
"learning_rate": 3.5394736842105266e-06,
"loss": 0.4227,
"step": 131
},
{
"Batch Mean": 0.6269352436065674,
"accuracy": 0.7734375,
"epoch": 0.3275,
"step": 131
},
{
"epoch": 0.33,
"grad_norm": 8.046867370605469,
"learning_rate": 3.5263157894736846e-06,
"loss": 0.496,
"step": 132
},
{
"Batch Mean": 0.6559182405471802,
"accuracy": 0.765625,
"epoch": 0.33,
"step": 132
},
{
"epoch": 0.3325,
"grad_norm": 7.385076999664307,
"learning_rate": 3.513157894736842e-06,
"loss": 0.4459,
"step": 133
},
{
"Batch Mean": 0.5328922271728516,
"accuracy": 0.7578125,
"epoch": 0.3325,
"step": 133
},
{
"epoch": 0.335,
"grad_norm": 7.108922481536865,
"learning_rate": 3.5e-06,
"loss": 0.428,
"step": 134
},
{
"Batch Mean": 1.1600193977355957,
"accuracy": 0.828125,
"epoch": 0.335,
"step": 134
},
{
"epoch": 0.3375,
"grad_norm": 6.962152004241943,
"learning_rate": 3.486842105263158e-06,
"loss": 0.4237,
"step": 135
},
{
"Batch Mean": 1.1642009019851685,
"accuracy": 0.6953125,
"epoch": 0.3375,
"step": 135
},
{
"epoch": 0.34,
"grad_norm": 7.879281520843506,
"learning_rate": 3.473684210526316e-06,
"loss": 0.528,
"step": 136
},
{
"Batch Mean": 1.0588830709457397,
"accuracy": 0.6953125,
"epoch": 0.34,
"step": 136
},
{
"epoch": 0.3425,
"grad_norm": 9.492472648620605,
"learning_rate": 3.460526315789474e-06,
"loss": 0.5365,
"step": 137
},
{
"Batch Mean": 1.3413481712341309,
"accuracy": 0.8046875,
"epoch": 0.3425,
"step": 137
},
{
"epoch": 0.345,
"grad_norm": 6.448349952697754,
"learning_rate": 3.447368421052632e-06,
"loss": 0.4449,
"step": 138
},
{
"Batch Mean": 1.3028790950775146,
"accuracy": 0.7578125,
"epoch": 0.345,
"step": 138
},
{
"epoch": 0.3475,
"grad_norm": 6.54162073135376,
"learning_rate": 3.43421052631579e-06,
"loss": 0.4109,
"step": 139
},
{
"Batch Mean": 1.206446886062622,
"accuracy": 0.7734375,
"epoch": 0.3475,
"step": 139
},
{
"epoch": 0.35,
"grad_norm": 7.594364166259766,
"learning_rate": 3.421052631578948e-06,
"loss": 0.4771,
"step": 140
},
{
"Batch Mean": 1.425430417060852,
"accuracy": 0.7734375,
"epoch": 0.35,
"step": 140
},
{
"epoch": 0.3525,
"grad_norm": 6.744152069091797,
"learning_rate": 3.4078947368421057e-06,
"loss": 0.4458,
"step": 141
},
{
"Batch Mean": 1.4311294555664062,
"accuracy": 0.765625,
"epoch": 0.3525,
"step": 141
},
{
"epoch": 0.355,
"grad_norm": 6.789750576019287,
"learning_rate": 3.3947368421052636e-06,
"loss": 0.4798,
"step": 142
},
{
"Batch Mean": 1.6251521110534668,
"accuracy": 0.765625,
"epoch": 0.355,
"step": 142
},
{
"epoch": 0.3575,
"grad_norm": 7.658289909362793,
"learning_rate": 3.381578947368421e-06,
"loss": 0.5209,
"step": 143
},
{
"Batch Mean": 1.5901494026184082,
"accuracy": 0.7890625,
"epoch": 0.3575,
"step": 143
},
{
"epoch": 0.36,
"grad_norm": 7.741049289703369,
"learning_rate": 3.368421052631579e-06,
"loss": 0.448,
"step": 144
},
{
"Batch Mean": 1.7485175132751465,
"accuracy": 0.71875,
"epoch": 0.36,
"step": 144
},
{
"epoch": 0.3625,
"grad_norm": 7.933503150939941,
"learning_rate": 3.355263157894737e-06,
"loss": 0.4745,
"step": 145
},
{
"Batch Mean": 1.8909566402435303,
"accuracy": 0.796875,
"epoch": 0.3625,
"step": 145
},
{
"epoch": 0.365,
"grad_norm": 7.310222148895264,
"learning_rate": 3.342105263157895e-06,
"loss": 0.4671,
"step": 146
},
{
"Batch Mean": 2.035611152648926,
"accuracy": 0.75,
"epoch": 0.365,
"step": 146
},
{
"epoch": 0.3675,
"grad_norm": 7.8623809814453125,
"learning_rate": 3.3289473684210528e-06,
"loss": 0.5079,
"step": 147
},
{
"Batch Mean": 2.0848422050476074,
"accuracy": 0.8203125,
"epoch": 0.3675,
"step": 147
},
{
"epoch": 0.37,
"grad_norm": 7.38020133972168,
"learning_rate": 3.3157894736842107e-06,
"loss": 0.5131,
"step": 148
},
{
"Batch Mean": 1.9938535690307617,
"accuracy": 0.7734375,
"epoch": 0.37,
"step": 148
},
{
"epoch": 0.3725,
"grad_norm": 7.401918411254883,
"learning_rate": 3.302631578947369e-06,
"loss": 0.4059,
"step": 149
},
{
"Batch Mean": 1.9994306564331055,
"accuracy": 0.8515625,
"epoch": 0.3725,
"step": 149
},
{
"epoch": 0.375,
"grad_norm": 7.515151500701904,
"learning_rate": 3.289473684210527e-06,
"loss": 0.4198,
"step": 150
},
{
"Batch Mean": 1.9627342224121094,
"accuracy": 0.78125,
"epoch": 0.375,
"step": 150
},
{
"epoch": 0.3775,
"grad_norm": 9.086411476135254,
"learning_rate": 3.276315789473685e-06,
"loss": 0.407,
"step": 151
},
{
"Batch Mean": 2.247910499572754,
"accuracy": 0.75,
"epoch": 0.3775,
"step": 151
},
{
"epoch": 0.38,
"grad_norm": 8.9763765335083,
"learning_rate": 3.2631578947368423e-06,
"loss": 0.5274,
"step": 152
},
{
"Batch Mean": 2.261256694793701,
"accuracy": 0.8125,
"epoch": 0.38,
"step": 152
},
{
"epoch": 0.3825,
"grad_norm": 6.913736820220947,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.4049,
"step": 153
},
{
"Batch Mean": 2.136556386947632,
"accuracy": 0.78125,
"epoch": 0.3825,
"step": 153
},
{
"epoch": 0.385,
"grad_norm": 7.755904674530029,
"learning_rate": 3.236842105263158e-06,
"loss": 0.449,
"step": 154
},
{
"Batch Mean": 2.088193416595459,
"accuracy": 0.8046875,
"epoch": 0.385,
"step": 154
},
{
"epoch": 0.3875,
"grad_norm": 7.1718316078186035,
"learning_rate": 3.223684210526316e-06,
"loss": 0.4129,
"step": 155
},
{
"Batch Mean": 2.5068817138671875,
"accuracy": 0.796875,
"epoch": 0.3875,
"step": 155
},
{
"epoch": 0.39,
"grad_norm": 9.461467742919922,
"learning_rate": 3.210526315789474e-06,
"loss": 0.4147,
"step": 156
},
{
"Batch Mean": 2.0449132919311523,
"accuracy": 0.7578125,
"epoch": 0.39,
"step": 156
},
{
"epoch": 0.3925,
"grad_norm": 8.785501480102539,
"learning_rate": 3.197368421052632e-06,
"loss": 0.478,
"step": 157
},
{
"Batch Mean": 2.2665648460388184,
"accuracy": 0.8203125,
"epoch": 0.3925,
"step": 157
},
{
"epoch": 0.395,
"grad_norm": 7.906212329864502,
"learning_rate": 3.1842105263157898e-06,
"loss": 0.4083,
"step": 158
},
{
"Batch Mean": 1.8799333572387695,
"accuracy": 0.734375,
"epoch": 0.395,
"step": 158
},
{
"epoch": 0.3975,
"grad_norm": 9.25344467163086,
"learning_rate": 3.1710526315789477e-06,
"loss": 0.5007,
"step": 159
},
{
"Batch Mean": 2.178339719772339,
"accuracy": 0.828125,
"epoch": 0.3975,
"step": 159
},
{
"epoch": 0.4,
"grad_norm": 8.476263046264648,
"learning_rate": 3.157894736842105e-06,
"loss": 0.4184,
"step": 160
},
{
"Batch Mean": 1.9270563125610352,
"accuracy": 0.7421875,
"epoch": 0.4,
"step": 160
},
{
"epoch": 0.4025,
"grad_norm": 10.214958190917969,
"learning_rate": 3.144736842105263e-06,
"loss": 0.5542,
"step": 161
},
{
"Batch Mean": 1.8700056076049805,
"accuracy": 0.796875,
"epoch": 0.4025,
"step": 161
},
{
"epoch": 0.405,
"grad_norm": 8.01285171508789,
"learning_rate": 3.131578947368421e-06,
"loss": 0.4171,
"step": 162
},
{
"Batch Mean": 1.8484151363372803,
"accuracy": 0.765625,
"epoch": 0.405,
"step": 162
},
{
"epoch": 0.4075,
"grad_norm": 8.116409301757812,
"learning_rate": 3.1184210526315793e-06,
"loss": 0.4576,
"step": 163
},
{
"Batch Mean": 1.7719857692718506,
"accuracy": 0.78125,
"epoch": 0.4075,
"step": 163
},
{
"epoch": 0.41,
"grad_norm": 7.865448951721191,
"learning_rate": 3.1052631578947372e-06,
"loss": 0.4393,
"step": 164
},
{
"Batch Mean": 1.7638423442840576,
"accuracy": 0.78125,
"epoch": 0.41,
"step": 164
},
{
"epoch": 0.4125,
"grad_norm": 7.379011631011963,
"learning_rate": 3.092105263157895e-06,
"loss": 0.4278,
"step": 165
},
{
"Batch Mean": 1.7345421314239502,
"accuracy": 0.671875,
"epoch": 0.4125,
"step": 165
},
{
"epoch": 0.415,
"grad_norm": 8.332331657409668,
"learning_rate": 3.078947368421053e-06,
"loss": 0.5219,
"step": 166
},
{
"Batch Mean": 1.5037906169891357,
"accuracy": 0.8203125,
"epoch": 0.415,
"step": 166
},
{
"epoch": 0.4175,
"grad_norm": 7.413763523101807,
"learning_rate": 3.065789473684211e-06,
"loss": 0.4208,
"step": 167
},
{
"Batch Mean": 1.7763586044311523,
"accuracy": 0.84375,
"epoch": 0.4175,
"step": 167
},
{
"epoch": 0.42,
"grad_norm": 6.86100435256958,
"learning_rate": 3.052631578947369e-06,
"loss": 0.3848,
"step": 168
},
{
"Batch Mean": 1.7984695434570312,
"accuracy": 0.7421875,
"epoch": 0.42,
"step": 168
},
{
"epoch": 0.4225,
"grad_norm": 8.179668426513672,
"learning_rate": 3.0394736842105268e-06,
"loss": 0.4362,
"step": 169
},
{
"Batch Mean": 2.192814588546753,
"accuracy": 0.8046875,
"epoch": 0.4225,
"step": 169
},
{
"epoch": 0.425,
"grad_norm": 7.526256561279297,
"learning_rate": 3.0263157894736843e-06,
"loss": 0.4229,
"step": 170
},
{
"Batch Mean": 2.0280096530914307,
"accuracy": 0.75,
"epoch": 0.425,
"step": 170
},
{
"epoch": 0.4275,
"grad_norm": 9.94598388671875,
"learning_rate": 3.013157894736842e-06,
"loss": 0.4399,
"step": 171
},
{
"Batch Mean": 2.0560245513916016,
"accuracy": 0.7890625,
"epoch": 0.4275,
"step": 171
},
{
"epoch": 0.43,
"grad_norm": 8.51555347442627,
"learning_rate": 3e-06,
"loss": 0.4532,
"step": 172
},
{
"Batch Mean": 2.4426515102386475,
"accuracy": 0.796875,
"epoch": 0.43,
"step": 172
},
{
"epoch": 0.4325,
"grad_norm": 10.147135734558105,
"learning_rate": 2.986842105263158e-06,
"loss": 0.415,
"step": 173
},
{
"Batch Mean": 2.6248912811279297,
"accuracy": 0.765625,
"epoch": 0.4325,
"step": 173
},
{
"epoch": 0.435,
"grad_norm": 8.979937553405762,
"learning_rate": 2.973684210526316e-06,
"loss": 0.4281,
"step": 174
},
{
"Batch Mean": 2.5382537841796875,
"accuracy": 0.75,
"epoch": 0.435,
"step": 174
},
{
"epoch": 0.4375,
"grad_norm": 9.164665222167969,
"learning_rate": 2.960526315789474e-06,
"loss": 0.5332,
"step": 175
},
{
"Batch Mean": 2.4894802570343018,
"accuracy": 0.8515625,
"epoch": 0.4375,
"step": 175
},
{
"epoch": 0.44,
"grad_norm": 8.5990571975708,
"learning_rate": 2.9473684210526317e-06,
"loss": 0.3928,
"step": 176
},
{
"Batch Mean": 2.163804054260254,
"accuracy": 0.8359375,
"epoch": 0.44,
"step": 176
},
{
"epoch": 0.4425,
"grad_norm": 7.983039855957031,
"learning_rate": 2.93421052631579e-06,
"loss": 0.3724,
"step": 177
},
{
"Batch Mean": 2.3338851928710938,
"accuracy": 0.71875,
"epoch": 0.4425,
"step": 177
},
{
"epoch": 0.445,
"grad_norm": 11.577138900756836,
"learning_rate": 2.921052631578948e-06,
"loss": 0.5237,
"step": 178
},
{
"Batch Mean": 2.19907808303833,
"accuracy": 0.78125,
"epoch": 0.445,
"step": 178
},
{
"epoch": 0.4475,
"grad_norm": 9.622861862182617,
"learning_rate": 2.907894736842106e-06,
"loss": 0.4411,
"step": 179
},
{
"Batch Mean": 2.456371307373047,
"accuracy": 0.8203125,
"epoch": 0.4475,
"step": 179
},
{
"epoch": 0.45,
"grad_norm": 8.121110916137695,
"learning_rate": 2.8947368421052634e-06,
"loss": 0.3666,
"step": 180
},
{
"Batch Mean": 2.4925358295440674,
"accuracy": 0.8046875,
"epoch": 0.45,
"step": 180
},
{
"epoch": 0.4525,
"grad_norm": 10.249773025512695,
"learning_rate": 2.8815789473684213e-06,
"loss": 0.4987,
"step": 181
},
{
"Batch Mean": 2.46488094329834,
"accuracy": 0.84375,
"epoch": 0.4525,
"step": 181
},
{
"epoch": 0.455,
"grad_norm": 7.278886318206787,
"learning_rate": 2.868421052631579e-06,
"loss": 0.3417,
"step": 182
},
{
"Batch Mean": 2.4173011779785156,
"accuracy": 0.8125,
"epoch": 0.455,
"step": 182
},
{
"epoch": 0.4575,
"grad_norm": 8.626398086547852,
"learning_rate": 2.855263157894737e-06,
"loss": 0.4077,
"step": 183
},
{
"Batch Mean": 1.8599066734313965,
"accuracy": 0.7421875,
"epoch": 0.4575,
"step": 183
},
{
"epoch": 0.46,
"grad_norm": 9.768396377563477,
"learning_rate": 2.842105263157895e-06,
"loss": 0.4882,
"step": 184
},
{
"Batch Mean": 2.145785331726074,
"accuracy": 0.78125,
"epoch": 0.46,
"step": 184
},
{
"epoch": 0.4625,
"grad_norm": 8.507255554199219,
"learning_rate": 2.828947368421053e-06,
"loss": 0.4024,
"step": 185
},
{
"Batch Mean": 1.7814726829528809,
"accuracy": 0.7578125,
"epoch": 0.4625,
"step": 185
},
{
"epoch": 0.465,
"grad_norm": 10.355148315429688,
"learning_rate": 2.815789473684211e-06,
"loss": 0.5292,
"step": 186
},
{
"Batch Mean": 1.888627529144287,
"accuracy": 0.828125,
"epoch": 0.465,
"step": 186
},
{
"epoch": 0.4675,
"grad_norm": 8.215280532836914,
"learning_rate": 2.8026315789473683e-06,
"loss": 0.3985,
"step": 187
},
{
"Batch Mean": 1.8559679985046387,
"accuracy": 0.828125,
"epoch": 0.4675,
"step": 187
},
{
"epoch": 0.47,
"grad_norm": 7.013787269592285,
"learning_rate": 2.789473684210526e-06,
"loss": 0.3815,
"step": 188
},
{
"Batch Mean": 1.7180118560791016,
"accuracy": 0.7578125,
"epoch": 0.47,
"step": 188
},
{
"epoch": 0.4725,
"grad_norm": 7.246432781219482,
"learning_rate": 2.776315789473684e-06,
"loss": 0.4432,
"step": 189
},
{
"Batch Mean": 1.5764282941818237,
"accuracy": 0.8046875,
"epoch": 0.4725,
"step": 189
},
{
"epoch": 0.475,
"grad_norm": 7.489908218383789,
"learning_rate": 2.7631578947368424e-06,
"loss": 0.4256,
"step": 190
},
{
"Batch Mean": 1.2490241527557373,
"accuracy": 0.796875,
"epoch": 0.475,
"step": 190
},
{
"epoch": 0.4775,
"grad_norm": 7.128645420074463,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.4488,
"step": 191
},
{
"Batch Mean": 1.0759793519973755,
"accuracy": 0.828125,
"epoch": 0.4775,
"step": 191
},
{
"epoch": 0.48,
"grad_norm": 7.570965766906738,
"learning_rate": 2.7368421052631583e-06,
"loss": 0.4073,
"step": 192
},
{
"Batch Mean": 0.8960497379302979,
"accuracy": 0.84375,
"epoch": 0.48,
"step": 192
},
{
"epoch": 0.4825,
"grad_norm": 7.548012733459473,
"learning_rate": 2.723684210526316e-06,
"loss": 0.3733,
"step": 193
},
{
"Batch Mean": 0.9645734429359436,
"accuracy": 0.765625,
"epoch": 0.4825,
"step": 193
},
{
"epoch": 0.485,
"grad_norm": 7.658840656280518,
"learning_rate": 2.710526315789474e-06,
"loss": 0.4456,
"step": 194
},
{
"Batch Mean": 0.9235124588012695,
"accuracy": 0.8515625,
"epoch": 0.485,
"step": 194
},
{
"epoch": 0.4875,
"grad_norm": 7.451051235198975,
"learning_rate": 2.697368421052632e-06,
"loss": 0.3823,
"step": 195
},
{
"Batch Mean": 1.27029550075531,
"accuracy": 0.84375,
"epoch": 0.4875,
"step": 195
},
{
"epoch": 0.49,
"grad_norm": 7.330119609832764,
"learning_rate": 2.68421052631579e-06,
"loss": 0.3983,
"step": 196
},
{
"Batch Mean": 1.3359482288360596,
"accuracy": 0.7578125,
"epoch": 0.49,
"step": 196
},
{
"epoch": 0.4925,
"grad_norm": 7.700873374938965,
"learning_rate": 2.6710526315789474e-06,
"loss": 0.4517,
"step": 197
},
{
"Batch Mean": 1.4910411834716797,
"accuracy": 0.7265625,
"epoch": 0.4925,
"step": 197
},
{
"epoch": 0.495,
"grad_norm": 8.952484130859375,
"learning_rate": 2.6578947368421053e-06,
"loss": 0.4592,
"step": 198
},
{
"Batch Mean": 1.6832847595214844,
"accuracy": 0.8359375,
"epoch": 0.495,
"step": 198
},
{
"epoch": 0.4975,
"grad_norm": 8.010570526123047,
"learning_rate": 2.644736842105263e-06,
"loss": 0.4087,
"step": 199
},
{
"Batch Mean": 1.5343718528747559,
"accuracy": 0.796875,
"epoch": 0.4975,
"step": 199
},
{
"epoch": 0.5,
"grad_norm": 8.992362976074219,
"learning_rate": 2.631578947368421e-06,
"loss": 0.4823,
"step": 200
},
{
"Batch Mean": 1.6328332424163818,
"accuracy": 0.7734375,
"epoch": 0.5,
"step": 200
},
{
"epoch": 0.5025,
"grad_norm": 9.035256385803223,
"learning_rate": 2.618421052631579e-06,
"loss": 0.4471,
"step": 201
},
{
"Batch Mean": 1.5844993591308594,
"accuracy": 0.78125,
"epoch": 0.5025,
"step": 201
},
{
"epoch": 0.505,
"grad_norm": 7.833075046539307,
"learning_rate": 2.605263157894737e-06,
"loss": 0.4245,
"step": 202
},
{
"Batch Mean": 1.411280632019043,
"accuracy": 0.7734375,
"epoch": 0.505,
"step": 202
},
{
"epoch": 0.5075,
"grad_norm": 9.432007789611816,
"learning_rate": 2.592105263157895e-06,
"loss": 0.4416,
"step": 203
},
{
"Batch Mean": 1.6097602844238281,
"accuracy": 0.7421875,
"epoch": 0.5075,
"step": 203
},
{
"epoch": 0.51,
"grad_norm": 8.289928436279297,
"learning_rate": 2.578947368421053e-06,
"loss": 0.4749,
"step": 204
},
{
"Batch Mean": 1.1805787086486816,
"accuracy": 0.734375,
"epoch": 0.51,
"step": 204
},
{
"epoch": 0.5125,
"grad_norm": 9.437891960144043,
"learning_rate": 2.565789473684211e-06,
"loss": 0.4626,
"step": 205
},
{
"Batch Mean": 1.347944736480713,
"accuracy": 0.7265625,
"epoch": 0.5125,
"step": 205
},
{
"epoch": 0.515,
"grad_norm": 8.658241271972656,
"learning_rate": 2.552631578947369e-06,
"loss": 0.4763,
"step": 206
},
{
"Batch Mean": 1.2932205200195312,
"accuracy": 0.7890625,
"epoch": 0.515,
"step": 206
},
{
"epoch": 0.5175,
"grad_norm": 8.141422271728516,
"learning_rate": 2.5394736842105265e-06,
"loss": 0.4264,
"step": 207
},
{
"Batch Mean": 1.2558472156524658,
"accuracy": 0.8046875,
"epoch": 0.5175,
"step": 207
},
{
"epoch": 0.52,
"grad_norm": 7.476047515869141,
"learning_rate": 2.5263157894736844e-06,
"loss": 0.4308,
"step": 208
},
{
"Batch Mean": 0.9620341062545776,
"accuracy": 0.7421875,
"epoch": 0.52,
"step": 208
},
{
"epoch": 0.5225,
"grad_norm": 7.889948844909668,
"learning_rate": 2.5131578947368423e-06,
"loss": 0.4499,
"step": 209
},
{
"Batch Mean": 0.7413516640663147,
"accuracy": 0.7265625,
"epoch": 0.5225,
"step": 209
},
{
"epoch": 0.525,
"grad_norm": 9.15515422821045,
"learning_rate": 2.5e-06,
"loss": 0.5032,
"step": 210
},
{
"Batch Mean": 0.9634220600128174,
"accuracy": 0.8515625,
"epoch": 0.525,
"step": 210
},
{
"epoch": 0.5275,
"grad_norm": 6.711307048797607,
"learning_rate": 2.486842105263158e-06,
"loss": 0.3679,
"step": 211
},
{
"Batch Mean": 1.191461205482483,
"accuracy": 0.734375,
"epoch": 0.5275,
"step": 211
},
{
"epoch": 0.53,
"grad_norm": 7.998767375946045,
"learning_rate": 2.473684210526316e-06,
"loss": 0.4737,
"step": 212
},
{
"Batch Mean": 0.9631187915802002,
"accuracy": 0.828125,
"epoch": 0.53,
"step": 212
},
{
"epoch": 0.5325,
"grad_norm": 5.967334747314453,
"learning_rate": 2.460526315789474e-06,
"loss": 0.3451,
"step": 213
},
{
"Batch Mean": 0.8651614189147949,
"accuracy": 0.7734375,
"epoch": 0.5325,
"step": 213
},
{
"epoch": 0.535,
"grad_norm": 7.710539817810059,
"learning_rate": 2.447368421052632e-06,
"loss": 0.4404,
"step": 214
},
{
"Batch Mean": 0.9871734380722046,
"accuracy": 0.8515625,
"epoch": 0.535,
"step": 214
},
{
"epoch": 0.5375,
"grad_norm": 7.020612716674805,
"learning_rate": 2.4342105263157898e-06,
"loss": 0.3469,
"step": 215
},
{
"Batch Mean": 0.9098066091537476,
"accuracy": 0.765625,
"epoch": 0.5375,
"step": 215
},
{
"epoch": 0.54,
"grad_norm": 8.405688285827637,
"learning_rate": 2.4210526315789477e-06,
"loss": 0.5086,
"step": 216
},
{
"Batch Mean": 0.9320878982543945,
"accuracy": 0.7734375,
"epoch": 0.54,
"step": 216
},
{
"epoch": 0.5425,
"grad_norm": 7.488485336303711,
"learning_rate": 2.4078947368421056e-06,
"loss": 0.4145,
"step": 217
},
{
"Batch Mean": 1.1252110004425049,
"accuracy": 0.7734375,
"epoch": 0.5425,
"step": 217
},
{
"epoch": 0.545,
"grad_norm": 8.458834648132324,
"learning_rate": 2.3947368421052635e-06,
"loss": 0.4336,
"step": 218
},
{
"Batch Mean": 1.0384407043457031,
"accuracy": 0.828125,
"epoch": 0.545,
"step": 218
},
{
"epoch": 0.5475,
"grad_norm": 8.343704223632812,
"learning_rate": 2.381578947368421e-06,
"loss": 0.3845,
"step": 219
},
{
"Batch Mean": 1.3833621740341187,
"accuracy": 0.7734375,
"epoch": 0.5475,
"step": 219
},
{
"epoch": 0.55,
"grad_norm": 9.676087379455566,
"learning_rate": 2.368421052631579e-06,
"loss": 0.4359,
"step": 220
},
{
"Batch Mean": 0.9786429405212402,
"accuracy": 0.7890625,
"epoch": 0.55,
"step": 220
},
{
"epoch": 0.5525,
"grad_norm": 8.946784019470215,
"learning_rate": 2.355263157894737e-06,
"loss": 0.4391,
"step": 221
},
{
"Batch Mean": 1.0276886224746704,
"accuracy": 0.796875,
"epoch": 0.5525,
"step": 221
},
{
"epoch": 0.555,
"grad_norm": 9.185587882995605,
"learning_rate": 2.342105263157895e-06,
"loss": 0.4326,
"step": 222
},
{
"Batch Mean": 1.312666416168213,
"accuracy": 0.765625,
"epoch": 0.555,
"step": 222
},
{
"epoch": 0.5575,
"grad_norm": 9.896113395690918,
"learning_rate": 2.328947368421053e-06,
"loss": 0.4861,
"step": 223
},
{
"Batch Mean": 1.1942353248596191,
"accuracy": 0.828125,
"epoch": 0.5575,
"step": 223
},
{
"epoch": 0.56,
"grad_norm": 7.770511150360107,
"learning_rate": 2.3157894736842105e-06,
"loss": 0.3452,
"step": 224
},
{
"Batch Mean": 0.8957247734069824,
"accuracy": 0.765625,
"epoch": 0.56,
"step": 224
},
{
"epoch": 0.5625,
"grad_norm": 10.309408187866211,
"learning_rate": 2.3026315789473684e-06,
"loss": 0.459,
"step": 225
},
{
"Batch Mean": 0.9890605807304382,
"accuracy": 0.7734375,
"epoch": 0.5625,
"step": 225
},
{
"epoch": 0.565,
"grad_norm": 10.081012725830078,
"learning_rate": 2.2894736842105263e-06,
"loss": 0.433,
"step": 226
},
{
"Batch Mean": 0.37744632363319397,
"accuracy": 0.7734375,
"epoch": 0.565,
"step": 226
},
{
"epoch": 0.5675,
"grad_norm": 10.793180465698242,
"learning_rate": 2.2763157894736847e-06,
"loss": 0.4462,
"step": 227
},
{
"Batch Mean": 0.5035256147384644,
"accuracy": 0.796875,
"epoch": 0.5675,
"step": 227
},
{
"epoch": 0.57,
"grad_norm": 10.840557098388672,
"learning_rate": 2.2631578947368426e-06,
"loss": 0.4301,
"step": 228
},
{
"Batch Mean": 0.3500272035598755,
"accuracy": 0.828125,
"epoch": 0.57,
"step": 228
},
{
"epoch": 0.5725,
"grad_norm": 7.773880958557129,
"learning_rate": 2.25e-06,
"loss": 0.3467,
"step": 229
},
{
"Batch Mean": 0.548717737197876,
"accuracy": 0.8125,
"epoch": 0.5725,
"step": 229
},
{
"epoch": 0.575,
"grad_norm": 9.137742042541504,
"learning_rate": 2.236842105263158e-06,
"loss": 0.4347,
"step": 230
},
{
"Batch Mean": 0.3661317825317383,
"accuracy": 0.84375,
"epoch": 0.575,
"step": 230
},
{
"epoch": 0.5775,
"grad_norm": 7.887228012084961,
"learning_rate": 2.223684210526316e-06,
"loss": 0.3833,
"step": 231
},
{
"Batch Mean": 0.12111562490463257,
"accuracy": 0.7890625,
"epoch": 0.5775,
"step": 231
},
{
"epoch": 0.58,
"grad_norm": 7.976273536682129,
"learning_rate": 2.2105263157894738e-06,
"loss": 0.4134,
"step": 232
},
{
"Batch Mean": 0.4660627841949463,
"accuracy": 0.78125,
"epoch": 0.58,
"step": 232
},
{
"epoch": 0.5825,
"grad_norm": 9.632377624511719,
"learning_rate": 2.1973684210526317e-06,
"loss": 0.4059,
"step": 233
},
{
"Batch Mean": 0.21030330657958984,
"accuracy": 0.78125,
"epoch": 0.5825,
"step": 233
},
{
"epoch": 0.585,
"grad_norm": 8.921050071716309,
"learning_rate": 2.1842105263157896e-06,
"loss": 0.4555,
"step": 234
},
{
"Batch Mean": 0.2276170253753662,
"accuracy": 0.890625,
"epoch": 0.585,
"step": 234
},
{
"epoch": 0.5875,
"grad_norm": 7.977242469787598,
"learning_rate": 2.1710526315789475e-06,
"loss": 0.3518,
"step": 235
},
{
"Batch Mean": 0.11401081085205078,
"accuracy": 0.828125,
"epoch": 0.5875,
"step": 235
},
{
"epoch": 0.59,
"grad_norm": 8.604510307312012,
"learning_rate": 2.1578947368421054e-06,
"loss": 0.3273,
"step": 236
},
{
"Batch Mean": 0.21731328964233398,
"accuracy": 0.7578125,
"epoch": 0.59,
"step": 236
},
{
"epoch": 0.5925,
"grad_norm": 11.264445304870605,
"learning_rate": 2.1447368421052633e-06,
"loss": 0.5015,
"step": 237
},
{
"Batch Mean": 0.24432829022407532,
"accuracy": 0.8125,
"epoch": 0.5925,
"step": 237
},
{
"epoch": 0.595,
"grad_norm": 11.254815101623535,
"learning_rate": 2.1315789473684212e-06,
"loss": 0.4282,
"step": 238
},
{
"Batch Mean": 0.29278555512428284,
"accuracy": 0.7734375,
"epoch": 0.595,
"step": 238
},
{
"epoch": 0.5975,
"grad_norm": 9.741961479187012,
"learning_rate": 2.118421052631579e-06,
"loss": 0.4351,
"step": 239
},
{
"Batch Mean": 0.41886043548583984,
"accuracy": 0.8046875,
"epoch": 0.5975,
"step": 239
},
{
"epoch": 0.6,
"grad_norm": 9.94571590423584,
"learning_rate": 2.105263157894737e-06,
"loss": 0.4331,
"step": 240
},
{
"Batch Mean": 0.3082854747772217,
"accuracy": 0.78125,
"epoch": 0.6,
"step": 240
},
{
"epoch": 0.6025,
"grad_norm": 10.888338088989258,
"learning_rate": 2.092105263157895e-06,
"loss": 0.4521,
"step": 241
},
{
"Batch Mean": 0.2888784408569336,
"accuracy": 0.8203125,
"epoch": 0.6025,
"step": 241
},
{
"epoch": 0.605,
"grad_norm": 9.849271774291992,
"learning_rate": 2.078947368421053e-06,
"loss": 0.4457,
"step": 242
},
{
"Batch Mean": 0.33397483825683594,
"accuracy": 0.8125,
"epoch": 0.605,
"step": 242
},
{
"epoch": 0.6075,
"grad_norm": 7.735013484954834,
"learning_rate": 2.0657894736842108e-06,
"loss": 0.3658,
"step": 243
},
{
"Batch Mean": 0.4756225347518921,
"accuracy": 0.78125,
"epoch": 0.6075,
"step": 243
},
{
"epoch": 0.61,
"grad_norm": 10.216158866882324,
"learning_rate": 2.0526315789473687e-06,
"loss": 0.4212,
"step": 244
},
{
"Batch Mean": -0.44994914531707764,
"accuracy": 0.8203125,
"epoch": 0.61,
"step": 244
},
{
"epoch": 0.6125,
"grad_norm": 10.403032302856445,
"learning_rate": 2.0394736842105266e-06,
"loss": 0.4733,
"step": 245
},
{
"Batch Mean": 0.5498628616333008,
"accuracy": 0.7890625,
"epoch": 0.6125,
"step": 245
},
{
"epoch": 0.615,
"grad_norm": 10.038982391357422,
"learning_rate": 2.026315789473684e-06,
"loss": 0.4919,
"step": 246
},
{
"Batch Mean": 0.31244945526123047,
"accuracy": 0.7421875,
"epoch": 0.615,
"step": 246
},
{
"epoch": 0.6175,
"grad_norm": 9.569629669189453,
"learning_rate": 2.013157894736842e-06,
"loss": 0.4326,
"step": 247
},
{
"Batch Mean": 0.19263625144958496,
"accuracy": 0.828125,
"epoch": 0.6175,
"step": 247
},
{
"epoch": 0.62,
"grad_norm": 8.492071151733398,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.3753,
"step": 248
},
{
"Batch Mean": -0.19965410232543945,
"accuracy": 0.796875,
"epoch": 0.62,
"step": 248
},
{
"epoch": 0.6225,
"grad_norm": 7.891626358032227,
"learning_rate": 1.9868421052631582e-06,
"loss": 0.3956,
"step": 249
},
{
"Batch Mean": -0.16769587993621826,
"accuracy": 0.7890625,
"epoch": 0.6225,
"step": 249
},
{
"epoch": 0.625,
"grad_norm": 7.962087154388428,
"learning_rate": 1.973684210526316e-06,
"loss": 0.3986,
"step": 250
},
{
"Batch Mean": 0.14302682876586914,
"accuracy": 0.7421875,
"epoch": 0.625,
"step": 250
},
{
"epoch": 0.6275,
"grad_norm": 8.172347068786621,
"learning_rate": 1.9605263157894736e-06,
"loss": 0.4544,
"step": 251
},
{
"Batch Mean": -0.0999612808227539,
"accuracy": 0.8203125,
"epoch": 0.6275,
"step": 251
},
{
"epoch": 0.63,
"grad_norm": 7.999663352966309,
"learning_rate": 1.9473684210526315e-06,
"loss": 0.3684,
"step": 252
},
{
"Batch Mean": 0.26888561248779297,
"accuracy": 0.8046875,
"epoch": 0.63,
"step": 252
},
{
"epoch": 0.6325,
"grad_norm": 7.38095235824585,
"learning_rate": 1.9342105263157895e-06,
"loss": 0.4204,
"step": 253
},
{
"Batch Mean": 0.32740533351898193,
"accuracy": 0.8359375,
"epoch": 0.6325,
"step": 253
},
{
"epoch": 0.635,
"grad_norm": 7.9545111656188965,
"learning_rate": 1.9210526315789474e-06,
"loss": 0.3764,
"step": 254
},
{
"Batch Mean": 0.7168645858764648,
"accuracy": 0.7265625,
"epoch": 0.635,
"step": 254
},
{
"epoch": 0.6375,
"grad_norm": 8.771035194396973,
"learning_rate": 1.9078947368421057e-06,
"loss": 0.4705,
"step": 255
},
{
"Batch Mean": 0.7449779510498047,
"accuracy": 0.8125,
"epoch": 0.6375,
"step": 255
},
{
"epoch": 0.64,
"grad_norm": 7.6166157722473145,
"learning_rate": 1.8947368421052634e-06,
"loss": 0.4284,
"step": 256
},
{
"Batch Mean": 0.4443552494049072,
"accuracy": 0.7890625,
"epoch": 0.64,
"step": 256
},
{
"epoch": 0.6425,
"grad_norm": 7.024027347564697,
"learning_rate": 1.8815789473684213e-06,
"loss": 0.3937,
"step": 257
},
{
"Batch Mean": 0.8168082237243652,
"accuracy": 0.734375,
"epoch": 0.6425,
"step": 257
},
{
"epoch": 0.645,
"grad_norm": 9.206171035766602,
"learning_rate": 1.868421052631579e-06,
"loss": 0.5132,
"step": 258
},
{
"Batch Mean": 0.9836267232894897,
"accuracy": 0.765625,
"epoch": 0.645,
"step": 258
},
{
"epoch": 0.6475,
"grad_norm": 8.63451862335205,
"learning_rate": 1.855263157894737e-06,
"loss": 0.4299,
"step": 259
},
{
"Batch Mean": 1.264570713043213,
"accuracy": 0.796875,
"epoch": 0.6475,
"step": 259
},
{
"epoch": 0.65,
"grad_norm": 7.692586421966553,
"learning_rate": 1.8421052631578948e-06,
"loss": 0.4257,
"step": 260
},
{
"Batch Mean": 1.4181015491485596,
"accuracy": 0.8125,
"epoch": 0.65,
"step": 260
},
{
"epoch": 0.6525,
"grad_norm": 8.58105182647705,
"learning_rate": 1.828947368421053e-06,
"loss": 0.4744,
"step": 261
},
{
"Batch Mean": 1.348689079284668,
"accuracy": 0.828125,
"epoch": 0.6525,
"step": 261
},
{
"epoch": 0.655,
"grad_norm": 8.066439628601074,
"learning_rate": 1.8157894736842109e-06,
"loss": 0.4056,
"step": 262
},
{
"Batch Mean": 1.0410746335983276,
"accuracy": 0.78125,
"epoch": 0.655,
"step": 262
},
{
"epoch": 0.6575,
"grad_norm": 7.489403247833252,
"learning_rate": 1.8026315789473685e-06,
"loss": 0.4268,
"step": 263
},
{
"Batch Mean": 1.3752142190933228,
"accuracy": 0.8515625,
"epoch": 0.6575,
"step": 263
},
{
"epoch": 0.66,
"grad_norm": 7.725265026092529,
"learning_rate": 1.7894736842105265e-06,
"loss": 0.4003,
"step": 264
},
{
"Batch Mean": 1.4269516468048096,
"accuracy": 0.8046875,
"epoch": 0.66,
"step": 264
},
{
"epoch": 0.6625,
"grad_norm": 8.406197547912598,
"learning_rate": 1.7763157894736844e-06,
"loss": 0.4057,
"step": 265
},
{
"Batch Mean": 1.553175926208496,
"accuracy": 0.8359375,
"epoch": 0.6625,
"step": 265
},
{
"epoch": 0.665,
"grad_norm": 8.426583290100098,
"learning_rate": 1.7631578947368423e-06,
"loss": 0.3518,
"step": 266
},
{
"Batch Mean": 1.6486907005310059,
"accuracy": 0.859375,
"epoch": 0.665,
"step": 266
},
{
"epoch": 0.6675,
"grad_norm": 8.128406524658203,
"learning_rate": 1.75e-06,
"loss": 0.3854,
"step": 267
},
{
"Batch Mean": 1.7819617986679077,
"accuracy": 0.796875,
"epoch": 0.6675,
"step": 267
},
{
"epoch": 0.67,
"grad_norm": 8.426019668579102,
"learning_rate": 1.736842105263158e-06,
"loss": 0.4415,
"step": 268
},
{
"Batch Mean": 1.423553705215454,
"accuracy": 0.8359375,
"epoch": 0.67,
"step": 268
},
{
"epoch": 0.6725,
"grad_norm": 7.446429252624512,
"learning_rate": 1.723684210526316e-06,
"loss": 0.3808,
"step": 269
},
{
"Batch Mean": 1.6426506042480469,
"accuracy": 0.78125,
"epoch": 0.6725,
"step": 269
},
{
"epoch": 0.675,
"grad_norm": 7.678967475891113,
"learning_rate": 1.710526315789474e-06,
"loss": 0.4357,
"step": 270
},
{
"Batch Mean": 1.4175643920898438,
"accuracy": 0.84375,
"epoch": 0.675,
"step": 270
},
{
"epoch": 0.6775,
"grad_norm": 8.308606147766113,
"learning_rate": 1.6973684210526318e-06,
"loss": 0.3834,
"step": 271
},
{
"Batch Mean": 1.5686655044555664,
"accuracy": 0.8125,
"epoch": 0.6775,
"step": 271
},
{
"epoch": 0.68,
"grad_norm": 9.372905731201172,
"learning_rate": 1.6842105263157895e-06,
"loss": 0.4161,
"step": 272
},
{
"Batch Mean": 1.3371083736419678,
"accuracy": 0.8046875,
"epoch": 0.68,
"step": 272
},
{
"epoch": 0.6825,
"grad_norm": 8.12229061126709,
"learning_rate": 1.6710526315789474e-06,
"loss": 0.4039,
"step": 273
},
{
"Batch Mean": 1.0551990270614624,
"accuracy": 0.8046875,
"epoch": 0.6825,
"step": 273
},
{
"epoch": 0.685,
"grad_norm": 7.422252178192139,
"learning_rate": 1.6578947368421053e-06,
"loss": 0.3893,
"step": 274
},
{
"Batch Mean": 1.1766936779022217,
"accuracy": 0.796875,
"epoch": 0.685,
"step": 274
},
{
"epoch": 0.6875,
"grad_norm": 8.245626449584961,
"learning_rate": 1.6447368421052635e-06,
"loss": 0.4395,
"step": 275
},
{
"Batch Mean": 1.4158720970153809,
"accuracy": 0.8515625,
"epoch": 0.6875,
"step": 275
},
{
"epoch": 0.69,
"grad_norm": 7.877889633178711,
"learning_rate": 1.6315789473684212e-06,
"loss": 0.3947,
"step": 276
},
{
"Batch Mean": 1.196899652481079,
"accuracy": 0.7890625,
"epoch": 0.69,
"step": 276
},
{
"epoch": 0.6925,
"grad_norm": 9.034087181091309,
"learning_rate": 1.618421052631579e-06,
"loss": 0.4123,
"step": 277
},
{
"Batch Mean": 1.1326861381530762,
"accuracy": 0.828125,
"epoch": 0.6925,
"step": 277
},
{
"epoch": 0.695,
"grad_norm": 7.901162624359131,
"learning_rate": 1.605263157894737e-06,
"loss": 0.3597,
"step": 278
},
{
"Batch Mean": 0.9996824264526367,
"accuracy": 0.8359375,
"epoch": 0.695,
"step": 278
},
{
"epoch": 0.6975,
"grad_norm": 8.420028686523438,
"learning_rate": 1.5921052631578949e-06,
"loss": 0.3625,
"step": 279
},
{
"Batch Mean": 1.1864922046661377,
"accuracy": 0.8046875,
"epoch": 0.6975,
"step": 279
},
{
"epoch": 0.7,
"grad_norm": 8.587984085083008,
"learning_rate": 1.5789473684210526e-06,
"loss": 0.4278,
"step": 280
},
{
"Batch Mean": 1.194761037826538,
"accuracy": 0.78125,
"epoch": 0.7,
"step": 280
},
{
"epoch": 0.7025,
"grad_norm": 9.341832160949707,
"learning_rate": 1.5657894736842105e-06,
"loss": 0.4657,
"step": 281
},
{
"Batch Mean": 1.2568970918655396,
"accuracy": 0.8515625,
"epoch": 0.7025,
"step": 281
},
{
"epoch": 0.705,
"grad_norm": 8.656058311462402,
"learning_rate": 1.5526315789473686e-06,
"loss": 0.3952,
"step": 282
},
{
"Batch Mean": 0.6381912231445312,
"accuracy": 0.78125,
"epoch": 0.705,
"step": 282
},
{
"epoch": 0.7075,
"grad_norm": 10.641399383544922,
"learning_rate": 1.5394736842105265e-06,
"loss": 0.4827,
"step": 283
},
{
"Batch Mean": 1.1566600799560547,
"accuracy": 0.8203125,
"epoch": 0.7075,
"step": 283
},
{
"epoch": 0.71,
"grad_norm": 8.96735668182373,
"learning_rate": 1.5263157894736844e-06,
"loss": 0.3853,
"step": 284
},
{
"Batch Mean": 0.9634236693382263,
"accuracy": 0.7734375,
"epoch": 0.71,
"step": 284
},
{
"epoch": 0.7125,
"grad_norm": 8.702007293701172,
"learning_rate": 1.5131578947368421e-06,
"loss": 0.3859,
"step": 285
},
{
"Batch Mean": 0.8360998630523682,
"accuracy": 0.8125,
"epoch": 0.7125,
"step": 285
},
{
"epoch": 0.715,
"grad_norm": 10.517492294311523,
"learning_rate": 1.5e-06,
"loss": 0.4453,
"step": 286
},
{
"Batch Mean": 0.8578420877456665,
"accuracy": 0.8125,
"epoch": 0.715,
"step": 286
},
{
"epoch": 0.7175,
"grad_norm": 10.013699531555176,
"learning_rate": 1.486842105263158e-06,
"loss": 0.4632,
"step": 287
},
{
"Batch Mean": 0.3317883014678955,
"accuracy": 0.8203125,
"epoch": 0.7175,
"step": 287
},
{
"epoch": 0.72,
"grad_norm": 8.058357238769531,
"learning_rate": 1.4736842105263159e-06,
"loss": 0.3923,
"step": 288
},
{
"Batch Mean": 0.6622927188873291,
"accuracy": 0.78125,
"epoch": 0.72,
"step": 288
},
{
"epoch": 0.7225,
"grad_norm": 8.682968139648438,
"learning_rate": 1.460526315789474e-06,
"loss": 0.456,
"step": 289
},
{
"Batch Mean": 0.6830942630767822,
"accuracy": 0.78125,
"epoch": 0.7225,
"step": 289
},
{
"epoch": 0.725,
"grad_norm": 10.149425506591797,
"learning_rate": 1.4473684210526317e-06,
"loss": 0.4826,
"step": 290
},
{
"Batch Mean": 0.3039674758911133,
"accuracy": 0.78125,
"epoch": 0.725,
"step": 290
},
{
"epoch": 0.7275,
"grad_norm": 8.727519035339355,
"learning_rate": 1.4342105263157896e-06,
"loss": 0.4374,
"step": 291
},
{
"Batch Mean": 0.27513647079467773,
"accuracy": 0.84375,
"epoch": 0.7275,
"step": 291
},
{
"epoch": 0.73,
"grad_norm": 7.006710529327393,
"learning_rate": 1.4210526315789475e-06,
"loss": 0.3133,
"step": 292
},
{
"Batch Mean": 0.6557639837265015,
"accuracy": 0.84375,
"epoch": 0.73,
"step": 292
},
{
"epoch": 0.7325,
"grad_norm": 8.32409954071045,
"learning_rate": 1.4078947368421054e-06,
"loss": 0.3789,
"step": 293
},
{
"Batch Mean": 0.34485912322998047,
"accuracy": 0.78125,
"epoch": 0.7325,
"step": 293
},
{
"epoch": 0.735,
"grad_norm": 9.060379028320312,
"learning_rate": 1.394736842105263e-06,
"loss": 0.4696,
"step": 294
},
{
"Batch Mean": -0.03677475452423096,
"accuracy": 0.796875,
"epoch": 0.735,
"step": 294
},
{
"epoch": 0.7375,
"grad_norm": 8.253447532653809,
"learning_rate": 1.3815789473684212e-06,
"loss": 0.4298,
"step": 295
},
{
"Batch Mean": 0.1854919195175171,
"accuracy": 0.796875,
"epoch": 0.7375,
"step": 295
},
{
"epoch": 0.74,
"grad_norm": 7.977970123291016,
"learning_rate": 1.3684210526315791e-06,
"loss": 0.3875,
"step": 296
},
{
"Batch Mean": 0.2770446538925171,
"accuracy": 0.7890625,
"epoch": 0.74,
"step": 296
},
{
"epoch": 0.7425,
"grad_norm": 9.434441566467285,
"learning_rate": 1.355263157894737e-06,
"loss": 0.4307,
"step": 297
},
{
"Batch Mean": 0.17018723487854004,
"accuracy": 0.703125,
"epoch": 0.7425,
"step": 297
},
{
"epoch": 0.745,
"grad_norm": 9.74467945098877,
"learning_rate": 1.342105263157895e-06,
"loss": 0.4805,
"step": 298
},
{
"Batch Mean": 0.7385026216506958,
"accuracy": 0.8359375,
"epoch": 0.745,
"step": 298
},
{
"epoch": 0.7475,
"grad_norm": 7.976377487182617,
"learning_rate": 1.3289473684210526e-06,
"loss": 0.3466,
"step": 299
},
{
"Batch Mean": 0.5781590938568115,
"accuracy": 0.796875,
"epoch": 0.7475,
"step": 299
},
{
"epoch": 0.75,
"grad_norm": 8.911229133605957,
"learning_rate": 1.3157894736842106e-06,
"loss": 0.4001,
"step": 300
},
{
"Batch Mean": 0.5454775094985962,
"accuracy": 0.7578125,
"epoch": 0.75,
"step": 300
},
{
"epoch": 0.7525,
"grad_norm": 9.226743698120117,
"learning_rate": 1.3026315789473685e-06,
"loss": 0.4997,
"step": 301
},
{
"Batch Mean": 0.7983531951904297,
"accuracy": 0.7890625,
"epoch": 0.7525,
"step": 301
},
{
"epoch": 0.755,
"grad_norm": 8.112704277038574,
"learning_rate": 1.2894736842105266e-06,
"loss": 0.4428,
"step": 302
},
{
"Batch Mean": 0.6288890838623047,
"accuracy": 0.8046875,
"epoch": 0.755,
"step": 302
},
{
"epoch": 0.7575,
"grad_norm": 8.986902236938477,
"learning_rate": 1.2763157894736845e-06,
"loss": 0.4304,
"step": 303
},
{
"Batch Mean": 0.42789459228515625,
"accuracy": 0.75,
"epoch": 0.7575,
"step": 303
},
{
"epoch": 0.76,
"grad_norm": 9.686219215393066,
"learning_rate": 1.2631578947368422e-06,
"loss": 0.5235,
"step": 304
},
{
"Batch Mean": 0.6602230072021484,
"accuracy": 0.890625,
"epoch": 0.76,
"step": 304
},
{
"epoch": 0.7625,
"grad_norm": 7.1107048988342285,
"learning_rate": 1.25e-06,
"loss": 0.3158,
"step": 305
},
{
"Batch Mean": 0.4711933135986328,
"accuracy": 0.8828125,
"epoch": 0.7625,
"step": 305
},
{
"epoch": 0.765,
"grad_norm": 7.437671184539795,
"learning_rate": 1.236842105263158e-06,
"loss": 0.3386,
"step": 306
},
{
"Batch Mean": 0.9208922386169434,
"accuracy": 0.796875,
"epoch": 0.765,
"step": 306
},
{
"epoch": 0.7675,
"grad_norm": 6.951391220092773,
"learning_rate": 1.223684210526316e-06,
"loss": 0.3802,
"step": 307
},
{
"Batch Mean": 0.4036996364593506,
"accuracy": 0.75,
"epoch": 0.7675,
"step": 307
},
{
"epoch": 0.77,
"grad_norm": 9.149080276489258,
"learning_rate": 1.2105263157894738e-06,
"loss": 0.4636,
"step": 308
},
{
"Batch Mean": 0.6852213144302368,
"accuracy": 0.765625,
"epoch": 0.77,
"step": 308
},
{
"epoch": 0.7725,
"grad_norm": 9.365516662597656,
"learning_rate": 1.1973684210526317e-06,
"loss": 0.4672,
"step": 309
},
{
"Batch Mean": 0.7226486802101135,
"accuracy": 0.8125,
"epoch": 0.7725,
"step": 309
},
{
"epoch": 0.775,
"grad_norm": 8.166211128234863,
"learning_rate": 1.1842105263157894e-06,
"loss": 0.4039,
"step": 310
},
{
"Batch Mean": 0.8003917336463928,
"accuracy": 0.7890625,
"epoch": 0.775,
"step": 310
},
{
"epoch": 0.7775,
"grad_norm": 8.930299758911133,
"learning_rate": 1.1710526315789476e-06,
"loss": 0.4418,
"step": 311
},
{
"Batch Mean": 0.909870982170105,
"accuracy": 0.8046875,
"epoch": 0.7775,
"step": 311
},
{
"epoch": 0.78,
"grad_norm": 8.864906311035156,
"learning_rate": 1.1578947368421053e-06,
"loss": 0.4414,
"step": 312
},
{
"Batch Mean": 0.9025444984436035,
"accuracy": 0.7578125,
"epoch": 0.78,
"step": 312
},
{
"epoch": 0.7825,
"grad_norm": 9.02528190612793,
"learning_rate": 1.1447368421052632e-06,
"loss": 0.4417,
"step": 313
},
{
"Batch Mean": 0.7603792548179626,
"accuracy": 0.78125,
"epoch": 0.7825,
"step": 313
},
{
"epoch": 0.785,
"grad_norm": 10.754321098327637,
"learning_rate": 1.1315789473684213e-06,
"loss": 0.504,
"step": 314
},
{
"Batch Mean": 0.5785622000694275,
"accuracy": 0.8125,
"epoch": 0.785,
"step": 314
},
{
"epoch": 0.7875,
"grad_norm": 7.807627201080322,
"learning_rate": 1.118421052631579e-06,
"loss": 0.3825,
"step": 315
},
{
"Batch Mean": 0.7554885149002075,
"accuracy": 0.78125,
"epoch": 0.7875,
"step": 315
},
{
"epoch": 0.79,
"grad_norm": 8.794540405273438,
"learning_rate": 1.1052631578947369e-06,
"loss": 0.4374,
"step": 316
},
{
"Batch Mean": 0.3685053586959839,
"accuracy": 0.7890625,
"epoch": 0.79,
"step": 316
},
{
"epoch": 0.7925,
"grad_norm": 7.32318639755249,
"learning_rate": 1.0921052631578948e-06,
"loss": 0.4272,
"step": 317
},
{
"Batch Mean": 0.6454563140869141,
"accuracy": 0.7890625,
"epoch": 0.7925,
"step": 317
},
{
"epoch": 0.795,
"grad_norm": 7.675666332244873,
"learning_rate": 1.0789473684210527e-06,
"loss": 0.3687,
"step": 318
},
{
"Batch Mean": 0.39517223834991455,
"accuracy": 0.8125,
"epoch": 0.795,
"step": 318
},
{
"epoch": 0.7975,
"grad_norm": 8.13251781463623,
"learning_rate": 1.0657894736842106e-06,
"loss": 0.4212,
"step": 319
},
{
"Batch Mean": 0.754143238067627,
"accuracy": 0.8203125,
"epoch": 0.7975,
"step": 319
},
{
"epoch": 0.8,
"grad_norm": 8.40969467163086,
"learning_rate": 1.0526315789473685e-06,
"loss": 0.4132,
"step": 320
},
{
"Batch Mean": 0.49180907011032104,
"accuracy": 0.828125,
"epoch": 0.8,
"step": 320
},
{
"epoch": 0.8025,
"grad_norm": 7.858648777008057,
"learning_rate": 1.0394736842105264e-06,
"loss": 0.3632,
"step": 321
},
{
"Batch Mean": 0.4354734420776367,
"accuracy": 0.8046875,
"epoch": 0.8025,
"step": 321
},
{
"epoch": 0.805,
"grad_norm": 8.511212348937988,
"learning_rate": 1.0263157894736843e-06,
"loss": 0.4278,
"step": 322
},
{
"Batch Mean": 0.6012592315673828,
"accuracy": 0.8046875,
"epoch": 0.805,
"step": 322
},
{
"epoch": 0.8075,
"grad_norm": 9.346186637878418,
"learning_rate": 1.013157894736842e-06,
"loss": 0.3769,
"step": 323
},
{
"Batch Mean": 0.6434267163276672,
"accuracy": 0.84375,
"epoch": 0.8075,
"step": 323
},
{
"epoch": 0.81,
"grad_norm": 7.211809158325195,
"learning_rate": 1.0000000000000002e-06,
"loss": 0.3286,
"step": 324
},
{
"Batch Mean": 0.9172674417495728,
"accuracy": 0.8125,
"epoch": 0.81,
"step": 324
},
{
"epoch": 0.8125,
"grad_norm": 8.20433235168457,
"learning_rate": 9.86842105263158e-07,
"loss": 0.3843,
"step": 325
},
{
"Batch Mean": 0.5754473209381104,
"accuracy": 0.75,
"epoch": 0.8125,
"step": 325
},
{
"epoch": 0.815,
"grad_norm": 9.98836612701416,
"learning_rate": 9.736842105263158e-07,
"loss": 0.4768,
"step": 326
},
{
"Batch Mean": 0.6945743560791016,
"accuracy": 0.8359375,
"epoch": 0.815,
"step": 326
},
{
"epoch": 0.8175,
"grad_norm": 8.13077163696289,
"learning_rate": 9.605263157894737e-07,
"loss": 0.3631,
"step": 327
},
{
"Batch Mean": 0.7578158974647522,
"accuracy": 0.7890625,
"epoch": 0.8175,
"step": 327
},
{
"epoch": 0.82,
"grad_norm": 8.39214038848877,
"learning_rate": 9.473684210526317e-07,
"loss": 0.3959,
"step": 328
},
{
"Batch Mean": 1.0447397232055664,
"accuracy": 0.7421875,
"epoch": 0.82,
"step": 328
},
{
"epoch": 0.8225,
"grad_norm": 10.048609733581543,
"learning_rate": 9.342105263157895e-07,
"loss": 0.4811,
"step": 329
},
{
"Batch Mean": 1.2344022989273071,
"accuracy": 0.78125,
"epoch": 0.8225,
"step": 329
},
{
"epoch": 0.825,
"grad_norm": 11.657893180847168,
"learning_rate": 9.210526315789474e-07,
"loss": 0.4619,
"step": 330
},
{
"Batch Mean": 0.6894526481628418,
"accuracy": 0.8046875,
"epoch": 0.825,
"step": 330
},
{
"epoch": 0.8275,
"grad_norm": 9.905115127563477,
"learning_rate": 9.078947368421054e-07,
"loss": 0.404,
"step": 331
},
{
"Batch Mean": 0.794218897819519,
"accuracy": 0.8359375,
"epoch": 0.8275,
"step": 331
},
{
"epoch": 0.83,
"grad_norm": 9.018577575683594,
"learning_rate": 8.947368421052632e-07,
"loss": 0.4312,
"step": 332
},
{
"Batch Mean": 1.1337227821350098,
"accuracy": 0.7578125,
"epoch": 0.83,
"step": 332
},
{
"epoch": 0.8325,
"grad_norm": 10.027582168579102,
"learning_rate": 8.815789473684211e-07,
"loss": 0.4617,
"step": 333
},
{
"Batch Mean": 0.8478026390075684,
"accuracy": 0.7265625,
"epoch": 0.8325,
"step": 333
},
{
"epoch": 0.835,
"grad_norm": 10.266874313354492,
"learning_rate": 8.68421052631579e-07,
"loss": 0.5081,
"step": 334
},
{
"Batch Mean": 0.4819040298461914,
"accuracy": 0.8203125,
"epoch": 0.835,
"step": 334
},
{
"epoch": 0.8375,
"grad_norm": 7.865448474884033,
"learning_rate": 8.55263157894737e-07,
"loss": 0.3768,
"step": 335
},
{
"Batch Mean": 0.6513547897338867,
"accuracy": 0.828125,
"epoch": 0.8375,
"step": 335
},
{
"epoch": 0.84,
"grad_norm": 7.196630477905273,
"learning_rate": 8.421052631578948e-07,
"loss": 0.3389,
"step": 336
},
{
"Batch Mean": 0.6813340783119202,
"accuracy": 0.84375,
"epoch": 0.84,
"step": 336
},
{
"epoch": 0.8425,
"grad_norm": 8.149627685546875,
"learning_rate": 8.289473684210527e-07,
"loss": 0.3965,
"step": 337
},
{
"Batch Mean": 0.36331480741500854,
"accuracy": 0.8203125,
"epoch": 0.8425,
"step": 337
},
{
"epoch": 0.845,
"grad_norm": 7.823698043823242,
"learning_rate": 8.157894736842106e-07,
"loss": 0.3635,
"step": 338
},
{
"Batch Mean": 0.8013505935668945,
"accuracy": 0.8203125,
"epoch": 0.845,
"step": 338
},
{
"epoch": 0.8475,
"grad_norm": 8.24132251739502,
"learning_rate": 8.026315789473685e-07,
"loss": 0.3956,
"step": 339
},
{
"Batch Mean": 0.19779634475708008,
"accuracy": 0.8203125,
"epoch": 0.8475,
"step": 339
},
{
"epoch": 0.85,
"grad_norm": 8.245027542114258,
"learning_rate": 7.894736842105263e-07,
"loss": 0.3901,
"step": 340
},
{
"Batch Mean": 0.5423249006271362,
"accuracy": 0.8671875,
"epoch": 0.85,
"step": 340
},
{
"epoch": 0.8525,
"grad_norm": 8.009626388549805,
"learning_rate": 7.763157894736843e-07,
"loss": 0.3838,
"step": 341
},
{
"Batch Mean": 0.7195075750350952,
"accuracy": 0.8359375,
"epoch": 0.8525,
"step": 341
},
{
"epoch": 0.855,
"grad_norm": 7.631696701049805,
"learning_rate": 7.631578947368422e-07,
"loss": 0.3575,
"step": 342
},
{
"Batch Mean": 0.6287542581558228,
"accuracy": 0.78125,
"epoch": 0.855,
"step": 342
},
{
"epoch": 0.8575,
"grad_norm": 9.780237197875977,
"learning_rate": 7.5e-07,
"loss": 0.4548,
"step": 343
},
{
"Batch Mean": 0.8380413055419922,
"accuracy": 0.75,
"epoch": 0.8575,
"step": 343
},
{
"epoch": 0.86,
"grad_norm": 9.96857738494873,
"learning_rate": 7.368421052631579e-07,
"loss": 0.4722,
"step": 344
},
{
"Batch Mean": 1.0288476943969727,
"accuracy": 0.8046875,
"epoch": 0.86,
"step": 344
},
{
"epoch": 0.8625,
"grad_norm": 9.187278747558594,
"learning_rate": 7.236842105263158e-07,
"loss": 0.4232,
"step": 345
},
{
"Batch Mean": 0.7908471822738647,
"accuracy": 0.8125,
"epoch": 0.8625,
"step": 345
},
{
"epoch": 0.865,
"grad_norm": 10.134464263916016,
"learning_rate": 7.105263157894737e-07,
"loss": 0.4181,
"step": 346
},
{
"Batch Mean": 0.3469386100769043,
"accuracy": 0.7890625,
"epoch": 0.865,
"step": 346
},
{
"epoch": 0.8675,
"grad_norm": 9.54261589050293,
"learning_rate": 6.973684210526316e-07,
"loss": 0.4419,
"step": 347
},
{
"Batch Mean": 0.7479956150054932,
"accuracy": 0.78125,
"epoch": 0.8675,
"step": 347
},
{
"epoch": 0.87,
"grad_norm": 8.943264961242676,
"learning_rate": 6.842105263157896e-07,
"loss": 0.4662,
"step": 348
},
{
"Batch Mean": 0.7768815159797668,
"accuracy": 0.8515625,
"epoch": 0.87,
"step": 348
},
{
"epoch": 0.8725,
"grad_norm": 7.518605709075928,
"learning_rate": 6.710526315789475e-07,
"loss": 0.3902,
"step": 349
},
{
"Batch Mean": 0.626093327999115,
"accuracy": 0.828125,
"epoch": 0.8725,
"step": 349
},
{
"epoch": 0.875,
"grad_norm": 8.11800479888916,
"learning_rate": 6.578947368421053e-07,
"loss": 0.4039,
"step": 350
},
{
"Batch Mean": 0.8511027097702026,
"accuracy": 0.734375,
"epoch": 0.875,
"step": 350
},
{
"epoch": 0.8775,
"grad_norm": 10.33953857421875,
"learning_rate": 6.447368421052633e-07,
"loss": 0.4745,
"step": 351
},
{
"Batch Mean": 0.7830682992935181,
"accuracy": 0.828125,
"epoch": 0.8775,
"step": 351
},
{
"epoch": 0.88,
"grad_norm": 8.232589721679688,
"learning_rate": 6.315789473684211e-07,
"loss": 0.3851,
"step": 352
},
{
"Batch Mean": 0.45355987548828125,
"accuracy": 0.7734375,
"epoch": 0.88,
"step": 352
},
{
"epoch": 0.8825,
"grad_norm": 8.459394454956055,
"learning_rate": 6.18421052631579e-07,
"loss": 0.436,
"step": 353
},
{
"Batch Mean": 0.7531223297119141,
"accuracy": 0.8515625,
"epoch": 0.8825,
"step": 353
},
{
"epoch": 0.885,
"grad_norm": 8.150020599365234,
"learning_rate": 6.052631578947369e-07,
"loss": 0.3367,
"step": 354
},
{
"Batch Mean": 0.6358146667480469,
"accuracy": 0.7734375,
"epoch": 0.885,
"step": 354
},
{
"epoch": 0.8875,
"grad_norm": 9.160893440246582,
"learning_rate": 5.921052631578947e-07,
"loss": 0.4131,
"step": 355
},
{
"Batch Mean": 1.0457651615142822,
"accuracy": 0.8046875,
"epoch": 0.8875,
"step": 355
},
{
"epoch": 0.89,
"grad_norm": 8.483601570129395,
"learning_rate": 5.789473684210526e-07,
"loss": 0.4374,
"step": 356
},
{
"Batch Mean": 0.626734733581543,
"accuracy": 0.7578125,
"epoch": 0.89,
"step": 356
},
{
"epoch": 0.8925,
"grad_norm": 9.595534324645996,
"learning_rate": 5.657894736842106e-07,
"loss": 0.4491,
"step": 357
},
{
"Batch Mean": 0.4467230439186096,
"accuracy": 0.84375,
"epoch": 0.8925,
"step": 357
},
{
"epoch": 0.895,
"grad_norm": 8.240345001220703,
"learning_rate": 5.526315789473684e-07,
"loss": 0.3564,
"step": 358
},
{
"Batch Mean": 0.5113234519958496,
"accuracy": 0.8046875,
"epoch": 0.895,
"step": 358
},
{
"epoch": 0.8975,
"grad_norm": 8.639927864074707,
"learning_rate": 5.394736842105264e-07,
"loss": 0.4065,
"step": 359
},
{
"Batch Mean": 0.7469578981399536,
"accuracy": 0.8125,
"epoch": 0.8975,
"step": 359
},
{
"epoch": 0.9,
"grad_norm": 8.386570930480957,
"learning_rate": 5.263157894736843e-07,
"loss": 0.3927,
"step": 360
},
{
"Batch Mean": 0.9620860815048218,
"accuracy": 0.796875,
"epoch": 0.9,
"step": 360
},
{
"epoch": 0.9025,
"grad_norm": 8.406227111816406,
"learning_rate": 5.131578947368422e-07,
"loss": 0.3563,
"step": 361
},
{
"Batch Mean": 0.5185819864273071,
"accuracy": 0.84375,
"epoch": 0.9025,
"step": 361
},
{
"epoch": 0.905,
"grad_norm": 9.187983512878418,
"learning_rate": 5.000000000000001e-07,
"loss": 0.368,
"step": 362
},
{
"Batch Mean": 0.5017170906066895,
"accuracy": 0.84375,
"epoch": 0.905,
"step": 362
},
{
"epoch": 0.9075,
"grad_norm": 7.493112564086914,
"learning_rate": 4.868421052631579e-07,
"loss": 0.3496,
"step": 363
},
{
"Batch Mean": 0.9849581718444824,
"accuracy": 0.828125,
"epoch": 0.9075,
"step": 363
},
{
"epoch": 0.91,
"grad_norm": 9.461167335510254,
"learning_rate": 4.7368421052631585e-07,
"loss": 0.4282,
"step": 364
},
{
"Batch Mean": 0.47400856018066406,
"accuracy": 0.8515625,
"epoch": 0.91,
"step": 364
},
{
"epoch": 0.9125,
"grad_norm": 7.981534957885742,
"learning_rate": 4.605263157894737e-07,
"loss": 0.3698,
"step": 365
},
{
"Batch Mean": 1.0054559707641602,
"accuracy": 0.7890625,
"epoch": 0.9125,
"step": 365
},
{
"epoch": 0.915,
"grad_norm": 8.598128318786621,
"learning_rate": 4.473684210526316e-07,
"loss": 0.3753,
"step": 366
},
{
"Batch Mean": 0.5245475769042969,
"accuracy": 0.8203125,
"epoch": 0.915,
"step": 366
},
{
"epoch": 0.9175,
"grad_norm": 8.880937576293945,
"learning_rate": 4.342105263157895e-07,
"loss": 0.3632,
"step": 367
},
{
"Batch Mean": 1.2113008499145508,
"accuracy": 0.8125,
"epoch": 0.9175,
"step": 367
},
{
"epoch": 0.92,
"grad_norm": 9.441286087036133,
"learning_rate": 4.210526315789474e-07,
"loss": 0.3664,
"step": 368
},
{
"Batch Mean": 1.2161517143249512,
"accuracy": 0.796875,
"epoch": 0.92,
"step": 368
},
{
"epoch": 0.9225,
"grad_norm": 9.140300750732422,
"learning_rate": 4.078947368421053e-07,
"loss": 0.394,
"step": 369
},
{
"Batch Mean": 1.5785281658172607,
"accuracy": 0.7890625,
"epoch": 0.9225,
"step": 369
},
{
"epoch": 0.925,
"grad_norm": 10.756903648376465,
"learning_rate": 3.9473684210526315e-07,
"loss": 0.4119,
"step": 370
},
{
"Batch Mean": 0.9747679233551025,
"accuracy": 0.796875,
"epoch": 0.925,
"step": 370
},
{
"epoch": 0.9275,
"grad_norm": 10.054790496826172,
"learning_rate": 3.815789473684211e-07,
"loss": 0.4407,
"step": 371
},
{
"Batch Mean": 1.2581335306167603,
"accuracy": 0.8515625,
"epoch": 0.9275,
"step": 371
},
{
"epoch": 0.93,
"grad_norm": 9.812691688537598,
"learning_rate": 3.6842105263157896e-07,
"loss": 0.3646,
"step": 372
},
{
"Batch Mean": 0.8661596775054932,
"accuracy": 0.8203125,
"epoch": 0.93,
"step": 372
},
{
"epoch": 0.9325,
"grad_norm": 9.156208992004395,
"learning_rate": 3.5526315789473687e-07,
"loss": 0.3699,
"step": 373
},
{
"Batch Mean": 1.1736958026885986,
"accuracy": 0.8203125,
"epoch": 0.9325,
"step": 373
},
{
"epoch": 0.935,
"grad_norm": 9.179408073425293,
"learning_rate": 3.421052631578948e-07,
"loss": 0.3249,
"step": 374
},
{
"Batch Mean": 1.537584662437439,
"accuracy": 0.8125,
"epoch": 0.935,
"step": 374
},
{
"epoch": 0.9375,
"grad_norm": 10.745928764343262,
"learning_rate": 3.2894736842105264e-07,
"loss": 0.4165,
"step": 375
},
{
"Batch Mean": 1.1856932640075684,
"accuracy": 0.828125,
"epoch": 0.9375,
"step": 375
},
{
"epoch": 0.94,
"grad_norm": 10.173683166503906,
"learning_rate": 3.1578947368421055e-07,
"loss": 0.42,
"step": 376
},
{
"Batch Mean": 1.2718141078948975,
"accuracy": 0.7890625,
"epoch": 0.94,
"step": 376
},
{
"epoch": 0.9425,
"grad_norm": 9.606656074523926,
"learning_rate": 3.0263157894736846e-07,
"loss": 0.41,
"step": 377
},
{
"Batch Mean": 1.3097079992294312,
"accuracy": 0.8125,
"epoch": 0.9425,
"step": 377
},
{
"epoch": 0.945,
"grad_norm": 9.43652629852295,
"learning_rate": 2.894736842105263e-07,
"loss": 0.3783,
"step": 378
},
{
"Batch Mean": 1.068852186203003,
"accuracy": 0.765625,
"epoch": 0.945,
"step": 378
},
{
"epoch": 0.9475,
"grad_norm": 11.245665550231934,
"learning_rate": 2.763157894736842e-07,
"loss": 0.4687,
"step": 379
},
{
"Batch Mean": 1.3054351806640625,
"accuracy": 0.8046875,
"epoch": 0.9475,
"step": 379
},
{
"epoch": 0.95,
"grad_norm": 9.732074737548828,
"learning_rate": 2.6315789473684213e-07,
"loss": 0.3428,
"step": 380
},
{
"Batch Mean": 1.3510671854019165,
"accuracy": 0.8515625,
"epoch": 0.95,
"step": 380
},
{
"epoch": 0.9525,
"grad_norm": 8.85507869720459,
"learning_rate": 2.5000000000000004e-07,
"loss": 0.3613,
"step": 381
},
{
"Batch Mean": 1.2882790565490723,
"accuracy": 0.7734375,
"epoch": 0.9525,
"step": 381
},
{
"epoch": 0.955,
"grad_norm": 11.334850311279297,
"learning_rate": 2.3684210526315792e-07,
"loss": 0.4325,
"step": 382
},
{
"Batch Mean": 1.6350889205932617,
"accuracy": 0.8515625,
"epoch": 0.955,
"step": 382
},
{
"epoch": 0.9575,
"grad_norm": 8.26737117767334,
"learning_rate": 2.236842105263158e-07,
"loss": 0.3203,
"step": 383
},
{
"Batch Mean": 1.1832478046417236,
"accuracy": 0.8203125,
"epoch": 0.9575,
"step": 383
},
{
"epoch": 0.96,
"grad_norm": 9.144224166870117,
"learning_rate": 2.105263157894737e-07,
"loss": 0.3829,
"step": 384
},
{
"Batch Mean": 1.2896432876586914,
"accuracy": 0.734375,
"epoch": 0.96,
"step": 384
},
{
"epoch": 0.9625,
"grad_norm": 9.615970611572266,
"learning_rate": 1.9736842105263157e-07,
"loss": 0.4124,
"step": 385
},
{
"Batch Mean": 1.0332982540130615,
"accuracy": 0.78125,
"epoch": 0.9625,
"step": 385
},
{
"epoch": 0.965,
"grad_norm": 11.348684310913086,
"learning_rate": 1.8421052631578948e-07,
"loss": 0.4438,
"step": 386
},
{
"Batch Mean": 1.2439241409301758,
"accuracy": 0.8203125,
"epoch": 0.965,
"step": 386
},
{
"epoch": 0.9675,
"grad_norm": 10.433943748474121,
"learning_rate": 1.710526315789474e-07,
"loss": 0.4151,
"step": 387
},
{
"Batch Mean": 1.1753612756729126,
"accuracy": 0.8828125,
"epoch": 0.9675,
"step": 387
},
{
"epoch": 0.97,
"grad_norm": 8.631200790405273,
"learning_rate": 1.5789473684210527e-07,
"loss": 0.3213,
"step": 388
},
{
"Batch Mean": 1.1260321140289307,
"accuracy": 0.8828125,
"epoch": 0.97,
"step": 388
},
{
"epoch": 0.9725,
"grad_norm": 8.61961555480957,
"learning_rate": 1.4473684210526316e-07,
"loss": 0.3097,
"step": 389
},
{
"Batch Mean": 1.1978437900543213,
"accuracy": 0.796875,
"epoch": 0.9725,
"step": 389
},
{
"epoch": 0.975,
"grad_norm": 9.643059730529785,
"learning_rate": 1.3157894736842107e-07,
"loss": 0.4076,
"step": 390
},
{
"Batch Mean": 1.5045323371887207,
"accuracy": 0.84375,
"epoch": 0.975,
"step": 390
},
{
"epoch": 0.9775,
"grad_norm": 8.629721641540527,
"learning_rate": 1.1842105263157896e-07,
"loss": 0.3637,
"step": 391
},
{
"Batch Mean": 1.3541901111602783,
"accuracy": 0.8515625,
"epoch": 0.9775,
"step": 391
},
{
"epoch": 0.98,
"grad_norm": 8.126119613647461,
"learning_rate": 1.0526315789473685e-07,
"loss": 0.3126,
"step": 392
},
{
"Batch Mean": 1.2378711700439453,
"accuracy": 0.78125,
"epoch": 0.98,
"step": 392
},
{
"epoch": 0.9825,
"grad_norm": 11.107322692871094,
"learning_rate": 9.210526315789474e-08,
"loss": 0.482,
"step": 393
},
{
"Batch Mean": 1.0721631050109863,
"accuracy": 0.796875,
"epoch": 0.9825,
"step": 393
},
{
"epoch": 0.985,
"grad_norm": 11.249762535095215,
"learning_rate": 7.894736842105264e-08,
"loss": 0.4666,
"step": 394
},
{
"Batch Mean": 1.3150776624679565,
"accuracy": 0.796875,
"epoch": 0.985,
"step": 394
},
{
"epoch": 0.9875,
"grad_norm": 9.827174186706543,
"learning_rate": 6.578947368421053e-08,
"loss": 0.3992,
"step": 395
},
{
"Batch Mean": 1.5467689037322998,
"accuracy": 0.8125,
"epoch": 0.9875,
"step": 395
},
{
"epoch": 0.99,
"grad_norm": 10.23254108428955,
"learning_rate": 5.263157894736842e-08,
"loss": 0.4113,
"step": 396
},
{
"Batch Mean": 1.1395454406738281,
"accuracy": 0.8125,
"epoch": 0.99,
"step": 396
},
{
"epoch": 0.9925,
"grad_norm": 10.58552074432373,
"learning_rate": 3.947368421052632e-08,
"loss": 0.4247,
"step": 397
},
{
"Batch Mean": 1.0851218700408936,
"accuracy": 0.875,
"epoch": 0.9925,
"step": 397
},
{
"epoch": 0.995,
"grad_norm": 8.150121688842773,
"learning_rate": 2.631578947368421e-08,
"loss": 0.3215,
"step": 398
},
{
"Batch Mean": 1.3310359716415405,
"accuracy": 0.8046875,
"epoch": 0.995,
"step": 398
},
{
"epoch": 0.9975,
"grad_norm": 9.32239055633545,
"learning_rate": 1.3157894736842106e-08,
"loss": 0.4072,
"step": 399
},
{
"Batch Mean": 1.4866905212402344,
"accuracy": 0.8359375,
"epoch": 0.9975,
"step": 399
},
{
"epoch": 1.0,
"grad_norm": 9.590243339538574,
"learning_rate": 0.0,
"loss": 0.4157,
"step": 400
}
],
"logging_steps": 1,
"max_steps": 400,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 80,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}