FinGEITje-7B-sft / trainer_state.json
snoels's picture
Upload 12 files
684edd4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 3922,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"grad_norm": 1.046875,
"learning_rate": 5.089058524173028e-07,
"loss": 1.4534,
"step": 1
},
{
"epoch": 0.0,
"grad_norm": 0.9609375,
"learning_rate": 2.544529262086514e-06,
"loss": 1.2906,
"step": 5
},
{
"epoch": 0.0,
"grad_norm": 0.90625,
"learning_rate": 5.089058524173028e-06,
"loss": 1.253,
"step": 10
},
{
"epoch": 0.0,
"grad_norm": 0.78515625,
"learning_rate": 7.633587786259543e-06,
"loss": 1.2664,
"step": 15
},
{
"epoch": 0.01,
"grad_norm": 0.59375,
"learning_rate": 1.0178117048346055e-05,
"loss": 1.1842,
"step": 20
},
{
"epoch": 0.01,
"grad_norm": 0.59765625,
"learning_rate": 1.2722646310432571e-05,
"loss": 1.1703,
"step": 25
},
{
"epoch": 0.01,
"grad_norm": 0.5625,
"learning_rate": 1.5267175572519086e-05,
"loss": 1.1021,
"step": 30
},
{
"epoch": 0.01,
"grad_norm": 0.458984375,
"learning_rate": 1.78117048346056e-05,
"loss": 1.05,
"step": 35
},
{
"epoch": 0.01,
"grad_norm": 0.416015625,
"learning_rate": 2.035623409669211e-05,
"loss": 0.9612,
"step": 40
},
{
"epoch": 0.01,
"grad_norm": 0.35546875,
"learning_rate": 2.2900763358778628e-05,
"loss": 0.9693,
"step": 45
},
{
"epoch": 0.01,
"grad_norm": 0.3125,
"learning_rate": 2.5445292620865142e-05,
"loss": 0.9687,
"step": 50
},
{
"epoch": 0.01,
"grad_norm": 0.341796875,
"learning_rate": 2.7989821882951656e-05,
"loss": 0.8825,
"step": 55
},
{
"epoch": 0.02,
"grad_norm": 0.3203125,
"learning_rate": 3.053435114503817e-05,
"loss": 0.8668,
"step": 60
},
{
"epoch": 0.02,
"grad_norm": 0.29296875,
"learning_rate": 3.307888040712468e-05,
"loss": 0.8335,
"step": 65
},
{
"epoch": 0.02,
"grad_norm": 0.318359375,
"learning_rate": 3.56234096692112e-05,
"loss": 0.8146,
"step": 70
},
{
"epoch": 0.02,
"grad_norm": 0.3046875,
"learning_rate": 3.816793893129771e-05,
"loss": 0.8094,
"step": 75
},
{
"epoch": 0.02,
"grad_norm": 0.33984375,
"learning_rate": 4.071246819338422e-05,
"loss": 0.7349,
"step": 80
},
{
"epoch": 0.02,
"grad_norm": 0.3828125,
"learning_rate": 4.325699745547074e-05,
"loss": 0.7013,
"step": 85
},
{
"epoch": 0.02,
"grad_norm": 0.375,
"learning_rate": 4.5801526717557256e-05,
"loss": 0.7324,
"step": 90
},
{
"epoch": 0.02,
"grad_norm": 0.30859375,
"learning_rate": 4.8346055979643766e-05,
"loss": 0.7071,
"step": 95
},
{
"epoch": 0.03,
"grad_norm": 0.306640625,
"learning_rate": 5.0890585241730283e-05,
"loss": 0.7077,
"step": 100
},
{
"epoch": 0.03,
"grad_norm": 0.287109375,
"learning_rate": 5.3435114503816794e-05,
"loss": 0.7136,
"step": 105
},
{
"epoch": 0.03,
"grad_norm": 0.28125,
"learning_rate": 5.597964376590331e-05,
"loss": 0.6851,
"step": 110
},
{
"epoch": 0.03,
"grad_norm": 0.30078125,
"learning_rate": 5.852417302798983e-05,
"loss": 0.6655,
"step": 115
},
{
"epoch": 0.03,
"grad_norm": 0.30859375,
"learning_rate": 6.106870229007635e-05,
"loss": 0.7056,
"step": 120
},
{
"epoch": 0.03,
"grad_norm": 0.271484375,
"learning_rate": 6.361323155216285e-05,
"loss": 0.6643,
"step": 125
},
{
"epoch": 0.03,
"grad_norm": 0.2890625,
"learning_rate": 6.615776081424937e-05,
"loss": 0.6525,
"step": 130
},
{
"epoch": 0.03,
"grad_norm": 0.26953125,
"learning_rate": 6.870229007633588e-05,
"loss": 0.6733,
"step": 135
},
{
"epoch": 0.04,
"grad_norm": 0.287109375,
"learning_rate": 7.12468193384224e-05,
"loss": 0.6742,
"step": 140
},
{
"epoch": 0.04,
"grad_norm": 0.310546875,
"learning_rate": 7.379134860050892e-05,
"loss": 0.6985,
"step": 145
},
{
"epoch": 0.04,
"grad_norm": 0.3125,
"learning_rate": 7.633587786259542e-05,
"loss": 0.61,
"step": 150
},
{
"epoch": 0.04,
"grad_norm": 0.326171875,
"learning_rate": 7.888040712468194e-05,
"loss": 0.6655,
"step": 155
},
{
"epoch": 0.04,
"grad_norm": 0.271484375,
"learning_rate": 8.142493638676844e-05,
"loss": 0.6217,
"step": 160
},
{
"epoch": 0.04,
"grad_norm": 0.2890625,
"learning_rate": 8.396946564885496e-05,
"loss": 0.5982,
"step": 165
},
{
"epoch": 0.04,
"grad_norm": 0.306640625,
"learning_rate": 8.651399491094148e-05,
"loss": 0.6715,
"step": 170
},
{
"epoch": 0.04,
"grad_norm": 0.314453125,
"learning_rate": 8.9058524173028e-05,
"loss": 0.6818,
"step": 175
},
{
"epoch": 0.05,
"grad_norm": 0.296875,
"learning_rate": 9.160305343511451e-05,
"loss": 0.7241,
"step": 180
},
{
"epoch": 0.05,
"grad_norm": 0.302734375,
"learning_rate": 9.414758269720102e-05,
"loss": 0.6986,
"step": 185
},
{
"epoch": 0.05,
"grad_norm": 0.330078125,
"learning_rate": 9.669211195928753e-05,
"loss": 0.6512,
"step": 190
},
{
"epoch": 0.05,
"grad_norm": 0.259765625,
"learning_rate": 9.923664122137405e-05,
"loss": 0.6016,
"step": 195
},
{
"epoch": 0.05,
"grad_norm": 0.2734375,
"learning_rate": 0.00010178117048346057,
"loss": 0.6165,
"step": 200
},
{
"epoch": 0.05,
"grad_norm": 0.296875,
"learning_rate": 0.00010432569974554708,
"loss": 0.6832,
"step": 205
},
{
"epoch": 0.05,
"grad_norm": 0.294921875,
"learning_rate": 0.00010687022900763359,
"loss": 0.6087,
"step": 210
},
{
"epoch": 0.05,
"grad_norm": 0.302734375,
"learning_rate": 0.00010941475826972009,
"loss": 0.6266,
"step": 215
},
{
"epoch": 0.06,
"grad_norm": 0.310546875,
"learning_rate": 0.00011195928753180662,
"loss": 0.6328,
"step": 220
},
{
"epoch": 0.06,
"grad_norm": 0.294921875,
"learning_rate": 0.00011450381679389313,
"loss": 0.6222,
"step": 225
},
{
"epoch": 0.06,
"grad_norm": 0.32421875,
"learning_rate": 0.00011704834605597966,
"loss": 0.6983,
"step": 230
},
{
"epoch": 0.06,
"grad_norm": 0.318359375,
"learning_rate": 0.00011959287531806616,
"loss": 0.5978,
"step": 235
},
{
"epoch": 0.06,
"grad_norm": 0.259765625,
"learning_rate": 0.0001221374045801527,
"loss": 0.6174,
"step": 240
},
{
"epoch": 0.06,
"grad_norm": 0.28515625,
"learning_rate": 0.00012468193384223918,
"loss": 0.6544,
"step": 245
},
{
"epoch": 0.06,
"grad_norm": 0.314453125,
"learning_rate": 0.0001272264631043257,
"loss": 0.5861,
"step": 250
},
{
"epoch": 0.07,
"grad_norm": 0.318359375,
"learning_rate": 0.00012977099236641222,
"loss": 0.6288,
"step": 255
},
{
"epoch": 0.07,
"grad_norm": 0.326171875,
"learning_rate": 0.00013231552162849873,
"loss": 0.6122,
"step": 260
},
{
"epoch": 0.07,
"grad_norm": 0.294921875,
"learning_rate": 0.00013486005089058525,
"loss": 0.6029,
"step": 265
},
{
"epoch": 0.07,
"grad_norm": 0.259765625,
"learning_rate": 0.00013740458015267177,
"loss": 0.5742,
"step": 270
},
{
"epoch": 0.07,
"grad_norm": 0.3359375,
"learning_rate": 0.00013994910941475828,
"loss": 0.6598,
"step": 275
},
{
"epoch": 0.07,
"grad_norm": 0.31640625,
"learning_rate": 0.0001424936386768448,
"loss": 0.6627,
"step": 280
},
{
"epoch": 0.07,
"grad_norm": 0.3359375,
"learning_rate": 0.0001450381679389313,
"loss": 0.6372,
"step": 285
},
{
"epoch": 0.07,
"grad_norm": 0.2578125,
"learning_rate": 0.00014758269720101784,
"loss": 0.6057,
"step": 290
},
{
"epoch": 0.08,
"grad_norm": 0.263671875,
"learning_rate": 0.00015012722646310433,
"loss": 0.5944,
"step": 295
},
{
"epoch": 0.08,
"grad_norm": 0.2578125,
"learning_rate": 0.00015267175572519084,
"loss": 0.5982,
"step": 300
},
{
"epoch": 0.08,
"grad_norm": 0.2890625,
"learning_rate": 0.00015521628498727736,
"loss": 0.5889,
"step": 305
},
{
"epoch": 0.08,
"grad_norm": 0.255859375,
"learning_rate": 0.00015776081424936388,
"loss": 0.6026,
"step": 310
},
{
"epoch": 0.08,
"grad_norm": 0.2890625,
"learning_rate": 0.0001603053435114504,
"loss": 0.5539,
"step": 315
},
{
"epoch": 0.08,
"grad_norm": 0.255859375,
"learning_rate": 0.00016284987277353689,
"loss": 0.6334,
"step": 320
},
{
"epoch": 0.08,
"grad_norm": 0.310546875,
"learning_rate": 0.00016539440203562343,
"loss": 0.612,
"step": 325
},
{
"epoch": 0.08,
"grad_norm": 0.212890625,
"learning_rate": 0.00016793893129770992,
"loss": 0.6216,
"step": 330
},
{
"epoch": 0.09,
"grad_norm": 0.25390625,
"learning_rate": 0.00017048346055979644,
"loss": 0.6328,
"step": 335
},
{
"epoch": 0.09,
"grad_norm": 0.2412109375,
"learning_rate": 0.00017302798982188295,
"loss": 0.5892,
"step": 340
},
{
"epoch": 0.09,
"grad_norm": 0.265625,
"learning_rate": 0.00017557251908396947,
"loss": 0.5984,
"step": 345
},
{
"epoch": 0.09,
"grad_norm": 0.275390625,
"learning_rate": 0.000178117048346056,
"loss": 0.6217,
"step": 350
},
{
"epoch": 0.09,
"grad_norm": 0.2734375,
"learning_rate": 0.0001806615776081425,
"loss": 0.5835,
"step": 355
},
{
"epoch": 0.09,
"grad_norm": 0.2890625,
"learning_rate": 0.00018320610687022902,
"loss": 0.6361,
"step": 360
},
{
"epoch": 0.09,
"grad_norm": 0.28515625,
"learning_rate": 0.00018575063613231554,
"loss": 0.5791,
"step": 365
},
{
"epoch": 0.09,
"grad_norm": 0.2734375,
"learning_rate": 0.00018829516539440203,
"loss": 0.6529,
"step": 370
},
{
"epoch": 0.1,
"grad_norm": 0.240234375,
"learning_rate": 0.00019083969465648857,
"loss": 0.6094,
"step": 375
},
{
"epoch": 0.1,
"grad_norm": 0.23828125,
"learning_rate": 0.00019338422391857506,
"loss": 0.5728,
"step": 380
},
{
"epoch": 0.1,
"grad_norm": 0.259765625,
"learning_rate": 0.00019592875318066158,
"loss": 0.6022,
"step": 385
},
{
"epoch": 0.1,
"grad_norm": 0.279296875,
"learning_rate": 0.0001984732824427481,
"loss": 0.5601,
"step": 390
},
{
"epoch": 0.1,
"grad_norm": 0.27734375,
"learning_rate": 0.00019999984150107918,
"loss": 0.6034,
"step": 395
},
{
"epoch": 0.1,
"grad_norm": 0.271484375,
"learning_rate": 0.00019999805839398995,
"loss": 0.6251,
"step": 400
},
{
"epoch": 0.1,
"grad_norm": 0.283203125,
"learning_rate": 0.0001999942940916057,
"loss": 0.6018,
"step": 405
},
{
"epoch": 0.1,
"grad_norm": 0.197265625,
"learning_rate": 0.0001999885486685061,
"loss": 0.5812,
"step": 410
},
{
"epoch": 0.11,
"grad_norm": 0.259765625,
"learning_rate": 0.00019998082223852138,
"loss": 0.6252,
"step": 415
},
{
"epoch": 0.11,
"grad_norm": 0.255859375,
"learning_rate": 0.00019997111495473018,
"loss": 0.592,
"step": 420
},
{
"epoch": 0.11,
"grad_norm": 0.28125,
"learning_rate": 0.0001999594270094565,
"loss": 0.5532,
"step": 425
},
{
"epoch": 0.11,
"grad_norm": 0.314453125,
"learning_rate": 0.00019994575863426585,
"loss": 0.5884,
"step": 430
},
{
"epoch": 0.11,
"grad_norm": 0.267578125,
"learning_rate": 0.0001999301100999607,
"loss": 0.634,
"step": 435
},
{
"epoch": 0.11,
"grad_norm": 0.251953125,
"learning_rate": 0.00019991248171657508,
"loss": 0.6146,
"step": 440
},
{
"epoch": 0.11,
"grad_norm": 0.26953125,
"learning_rate": 0.00019989287383336853,
"loss": 0.5627,
"step": 445
},
{
"epoch": 0.11,
"grad_norm": 0.228515625,
"learning_rate": 0.00019987128683881896,
"loss": 0.529,
"step": 450
},
{
"epoch": 0.12,
"grad_norm": 0.267578125,
"learning_rate": 0.00019984772116061523,
"loss": 0.6148,
"step": 455
},
{
"epoch": 0.12,
"grad_norm": 0.255859375,
"learning_rate": 0.00019982217726564856,
"loss": 0.5908,
"step": 460
},
{
"epoch": 0.12,
"grad_norm": 0.27734375,
"learning_rate": 0.00019979465566000317,
"loss": 0.5796,
"step": 465
},
{
"epoch": 0.12,
"grad_norm": 0.2109375,
"learning_rate": 0.00019976515688894638,
"loss": 0.6234,
"step": 470
},
{
"epoch": 0.12,
"grad_norm": 0.2578125,
"learning_rate": 0.0001997336815369179,
"loss": 0.6051,
"step": 475
},
{
"epoch": 0.12,
"grad_norm": 0.279296875,
"learning_rate": 0.00019970023022751793,
"loss": 0.5882,
"step": 480
},
{
"epoch": 0.12,
"grad_norm": 0.25390625,
"learning_rate": 0.00019966480362349515,
"loss": 0.5858,
"step": 485
},
{
"epoch": 0.12,
"grad_norm": 0.2578125,
"learning_rate": 0.00019962740242673342,
"loss": 0.5626,
"step": 490
},
{
"epoch": 0.13,
"grad_norm": 0.265625,
"learning_rate": 0.00019958802737823786,
"loss": 0.5921,
"step": 495
},
{
"epoch": 0.13,
"grad_norm": 0.251953125,
"learning_rate": 0.0001995466792581202,
"loss": 0.5472,
"step": 500
},
{
"epoch": 0.13,
"grad_norm": 0.255859375,
"learning_rate": 0.00019950335888558338,
"loss": 0.5575,
"step": 505
},
{
"epoch": 0.13,
"grad_norm": 0.255859375,
"learning_rate": 0.00019945806711890526,
"loss": 0.5166,
"step": 510
},
{
"epoch": 0.13,
"grad_norm": 0.302734375,
"learning_rate": 0.00019941080485542156,
"loss": 0.6087,
"step": 515
},
{
"epoch": 0.13,
"grad_norm": 0.2734375,
"learning_rate": 0.00019936157303150823,
"loss": 0.562,
"step": 520
},
{
"epoch": 0.13,
"grad_norm": 0.318359375,
"learning_rate": 0.0001993103726225628,
"loss": 0.6184,
"step": 525
},
{
"epoch": 0.14,
"grad_norm": 0.25390625,
"learning_rate": 0.00019925720464298511,
"loss": 0.6244,
"step": 530
},
{
"epoch": 0.14,
"grad_norm": 0.2392578125,
"learning_rate": 0.00019920207014615703,
"loss": 0.5759,
"step": 535
},
{
"epoch": 0.14,
"grad_norm": 0.30078125,
"learning_rate": 0.00019914497022442186,
"loss": 0.5807,
"step": 540
},
{
"epoch": 0.14,
"grad_norm": 0.267578125,
"learning_rate": 0.0001990859060090625,
"loss": 0.5753,
"step": 545
},
{
"epoch": 0.14,
"grad_norm": 0.287109375,
"learning_rate": 0.00019902487867027918,
"loss": 0.562,
"step": 550
},
{
"epoch": 0.14,
"grad_norm": 0.306640625,
"learning_rate": 0.00019896188941716613,
"loss": 0.55,
"step": 555
},
{
"epoch": 0.14,
"grad_norm": 0.2490234375,
"learning_rate": 0.00019889693949768766,
"loss": 0.564,
"step": 560
},
{
"epoch": 0.14,
"grad_norm": 0.2236328125,
"learning_rate": 0.00019883003019865357,
"loss": 0.5283,
"step": 565
},
{
"epoch": 0.15,
"grad_norm": 0.1982421875,
"learning_rate": 0.00019876116284569338,
"loss": 0.5591,
"step": 570
},
{
"epoch": 0.15,
"grad_norm": 0.240234375,
"learning_rate": 0.00019869033880323044,
"loss": 0.5353,
"step": 575
},
{
"epoch": 0.15,
"grad_norm": 0.27734375,
"learning_rate": 0.00019861755947445456,
"loss": 0.5084,
"step": 580
},
{
"epoch": 0.15,
"grad_norm": 0.3125,
"learning_rate": 0.00019854282630129438,
"loss": 0.5518,
"step": 585
},
{
"epoch": 0.15,
"grad_norm": 0.23046875,
"learning_rate": 0.0001984661407643887,
"loss": 0.5609,
"step": 590
},
{
"epoch": 0.15,
"grad_norm": 0.2890625,
"learning_rate": 0.00019838750438305733,
"loss": 0.5606,
"step": 595
},
{
"epoch": 0.15,
"grad_norm": 0.306640625,
"learning_rate": 0.00019830691871527078,
"loss": 0.5606,
"step": 600
},
{
"epoch": 0.15,
"grad_norm": 0.234375,
"learning_rate": 0.00019822438535761953,
"loss": 0.5632,
"step": 605
},
{
"epoch": 0.16,
"grad_norm": 0.283203125,
"learning_rate": 0.00019813990594528234,
"loss": 0.5923,
"step": 610
},
{
"epoch": 0.16,
"grad_norm": 0.3125,
"learning_rate": 0.00019805348215199377,
"loss": 0.5813,
"step": 615
},
{
"epoch": 0.16,
"grad_norm": 0.275390625,
"learning_rate": 0.0001979651156900113,
"loss": 0.5534,
"step": 620
},
{
"epoch": 0.16,
"grad_norm": 0.28515625,
"learning_rate": 0.00019787480831008102,
"loss": 0.4909,
"step": 625
},
{
"epoch": 0.16,
"grad_norm": 0.234375,
"learning_rate": 0.00019778256180140327,
"loss": 0.5642,
"step": 630
},
{
"epoch": 0.16,
"grad_norm": 0.271484375,
"learning_rate": 0.00019768837799159704,
"loss": 0.6093,
"step": 635
},
{
"epoch": 0.16,
"grad_norm": 0.2470703125,
"learning_rate": 0.00019759225874666373,
"loss": 0.4626,
"step": 640
},
{
"epoch": 0.16,
"grad_norm": 0.259765625,
"learning_rate": 0.00019749420597095033,
"loss": 0.5873,
"step": 645
},
{
"epoch": 0.17,
"grad_norm": 0.2734375,
"learning_rate": 0.00019739422160711147,
"loss": 0.5864,
"step": 650
},
{
"epoch": 0.17,
"grad_norm": 0.2890625,
"learning_rate": 0.0001972923076360712,
"loss": 0.5286,
"step": 655
},
{
"epoch": 0.17,
"grad_norm": 0.359375,
"learning_rate": 0.00019718846607698343,
"loss": 0.5755,
"step": 660
},
{
"epoch": 0.17,
"grad_norm": 0.2490234375,
"learning_rate": 0.00019708269898719225,
"loss": 0.53,
"step": 665
},
{
"epoch": 0.17,
"grad_norm": 0.30859375,
"learning_rate": 0.00019697500846219094,
"loss": 0.5735,
"step": 670
},
{
"epoch": 0.17,
"grad_norm": 0.23828125,
"learning_rate": 0.00019686539663558053,
"loss": 0.5132,
"step": 675
},
{
"epoch": 0.17,
"grad_norm": 0.271484375,
"learning_rate": 0.00019675386567902757,
"loss": 0.5416,
"step": 680
},
{
"epoch": 0.17,
"grad_norm": 0.30078125,
"learning_rate": 0.00019664041780222103,
"loss": 0.6035,
"step": 685
},
{
"epoch": 0.18,
"grad_norm": 0.265625,
"learning_rate": 0.00019652505525282848,
"loss": 0.5411,
"step": 690
},
{
"epoch": 0.18,
"grad_norm": 0.2578125,
"learning_rate": 0.00019640778031645176,
"loss": 0.5178,
"step": 695
},
{
"epoch": 0.18,
"grad_norm": 0.287109375,
"learning_rate": 0.0001962885953165815,
"loss": 0.5137,
"step": 700
},
{
"epoch": 0.18,
"grad_norm": 0.271484375,
"learning_rate": 0.0001961675026145511,
"loss": 0.5723,
"step": 705
},
{
"epoch": 0.18,
"grad_norm": 0.271484375,
"learning_rate": 0.00019604450460949013,
"loss": 0.5401,
"step": 710
},
{
"epoch": 0.18,
"grad_norm": 0.26171875,
"learning_rate": 0.00019591960373827657,
"loss": 0.5215,
"step": 715
},
{
"epoch": 0.18,
"grad_norm": 0.29296875,
"learning_rate": 0.00019579280247548865,
"loss": 0.5151,
"step": 720
},
{
"epoch": 0.18,
"grad_norm": 0.279296875,
"learning_rate": 0.00019566410333335578,
"loss": 0.5204,
"step": 725
},
{
"epoch": 0.19,
"grad_norm": 0.2890625,
"learning_rate": 0.00019553350886170883,
"loss": 0.5436,
"step": 730
},
{
"epoch": 0.19,
"grad_norm": 0.2197265625,
"learning_rate": 0.00019540102164792956,
"loss": 0.5291,
"step": 735
},
{
"epoch": 0.19,
"grad_norm": 0.279296875,
"learning_rate": 0.0001952666443168994,
"loss": 0.5917,
"step": 740
},
{
"epoch": 0.19,
"grad_norm": 0.298828125,
"learning_rate": 0.0001951303795309474,
"loss": 0.5848,
"step": 745
},
{
"epoch": 0.19,
"grad_norm": 0.3046875,
"learning_rate": 0.0001949922299897975,
"loss": 0.5621,
"step": 750
},
{
"epoch": 0.19,
"grad_norm": 0.20703125,
"learning_rate": 0.00019485219843051502,
"loss": 0.5872,
"step": 755
},
{
"epoch": 0.19,
"grad_norm": 0.28515625,
"learning_rate": 0.00019471028762745252,
"loss": 0.5723,
"step": 760
},
{
"epoch": 0.2,
"grad_norm": 0.259765625,
"learning_rate": 0.00019456650039219474,
"loss": 0.6229,
"step": 765
},
{
"epoch": 0.2,
"grad_norm": 0.310546875,
"learning_rate": 0.00019442083957350295,
"loss": 0.5566,
"step": 770
},
{
"epoch": 0.2,
"grad_norm": 0.25390625,
"learning_rate": 0.00019427330805725843,
"loss": 0.4698,
"step": 775
},
{
"epoch": 0.2,
"grad_norm": 0.302734375,
"learning_rate": 0.00019412390876640543,
"loss": 0.52,
"step": 780
},
{
"epoch": 0.2,
"grad_norm": 0.318359375,
"learning_rate": 0.00019397264466089313,
"loss": 0.4791,
"step": 785
},
{
"epoch": 0.2,
"grad_norm": 0.359375,
"learning_rate": 0.00019381951873761707,
"loss": 0.5702,
"step": 790
},
{
"epoch": 0.2,
"grad_norm": 0.259765625,
"learning_rate": 0.0001936645340303597,
"loss": 0.5061,
"step": 795
},
{
"epoch": 0.2,
"grad_norm": 0.294921875,
"learning_rate": 0.00019350769360973038,
"loss": 0.5498,
"step": 800
},
{
"epoch": 0.21,
"grad_norm": 0.2470703125,
"learning_rate": 0.0001933490005831045,
"loss": 0.4755,
"step": 805
},
{
"epoch": 0.21,
"grad_norm": 0.30859375,
"learning_rate": 0.00019318845809456186,
"loss": 0.5354,
"step": 810
},
{
"epoch": 0.21,
"grad_norm": 0.3125,
"learning_rate": 0.00019302606932482448,
"loss": 0.5161,
"step": 815
},
{
"epoch": 0.21,
"grad_norm": 0.263671875,
"learning_rate": 0.00019286183749119346,
"loss": 0.4965,
"step": 820
},
{
"epoch": 0.21,
"grad_norm": 0.24609375,
"learning_rate": 0.00019269576584748536,
"loss": 0.5485,
"step": 825
},
{
"epoch": 0.21,
"grad_norm": 0.291015625,
"learning_rate": 0.0001925278576839676,
"loss": 0.4787,
"step": 830
},
{
"epoch": 0.21,
"grad_norm": 0.291015625,
"learning_rate": 0.00019235811632729353,
"loss": 0.5091,
"step": 835
},
{
"epoch": 0.21,
"grad_norm": 0.267578125,
"learning_rate": 0.00019218654514043608,
"loss": 0.5315,
"step": 840
},
{
"epoch": 0.22,
"grad_norm": 0.275390625,
"learning_rate": 0.0001920131475226216,
"loss": 0.5157,
"step": 845
},
{
"epoch": 0.22,
"grad_norm": 0.28515625,
"learning_rate": 0.00019183792690926222,
"loss": 0.5745,
"step": 850
},
{
"epoch": 0.22,
"grad_norm": 0.33984375,
"learning_rate": 0.00019166088677188793,
"loss": 0.5265,
"step": 855
},
{
"epoch": 0.22,
"grad_norm": 0.29296875,
"learning_rate": 0.00019148203061807766,
"loss": 0.5473,
"step": 860
},
{
"epoch": 0.22,
"grad_norm": 0.28125,
"learning_rate": 0.00019130136199138994,
"loss": 0.4799,
"step": 865
},
{
"epoch": 0.22,
"grad_norm": 0.2734375,
"learning_rate": 0.00019111888447129256,
"loss": 0.552,
"step": 870
},
{
"epoch": 0.22,
"grad_norm": 0.2412109375,
"learning_rate": 0.00019093460167309177,
"loss": 0.4779,
"step": 875
},
{
"epoch": 0.22,
"grad_norm": 0.34765625,
"learning_rate": 0.00019074851724786064,
"loss": 0.5315,
"step": 880
},
{
"epoch": 0.23,
"grad_norm": 0.302734375,
"learning_rate": 0.00019056063488236661,
"loss": 0.4807,
"step": 885
},
{
"epoch": 0.23,
"grad_norm": 0.322265625,
"learning_rate": 0.00019037095829899849,
"loss": 0.5849,
"step": 890
},
{
"epoch": 0.23,
"grad_norm": 0.255859375,
"learning_rate": 0.00019017949125569285,
"loss": 0.5343,
"step": 895
},
{
"epoch": 0.23,
"grad_norm": 0.2578125,
"learning_rate": 0.00018998623754585935,
"loss": 0.5233,
"step": 900
},
{
"epoch": 0.23,
"grad_norm": 0.2490234375,
"learning_rate": 0.00018979120099830578,
"loss": 0.4892,
"step": 905
},
{
"epoch": 0.23,
"grad_norm": 0.287109375,
"learning_rate": 0.00018959438547716203,
"loss": 0.5311,
"step": 910
},
{
"epoch": 0.23,
"grad_norm": 0.2490234375,
"learning_rate": 0.0001893957948818037,
"loss": 0.5872,
"step": 915
},
{
"epoch": 0.23,
"grad_norm": 0.30078125,
"learning_rate": 0.00018919543314677463,
"loss": 0.5303,
"step": 920
},
{
"epoch": 0.24,
"grad_norm": 0.224609375,
"learning_rate": 0.00018899330424170926,
"loss": 0.5294,
"step": 925
},
{
"epoch": 0.24,
"grad_norm": 0.26171875,
"learning_rate": 0.00018878941217125367,
"loss": 0.5178,
"step": 930
},
{
"epoch": 0.24,
"grad_norm": 0.2451171875,
"learning_rate": 0.00018858376097498637,
"loss": 0.4638,
"step": 935
},
{
"epoch": 0.24,
"grad_norm": 0.365234375,
"learning_rate": 0.00018837635472733832,
"loss": 0.5604,
"step": 940
},
{
"epoch": 0.24,
"grad_norm": 0.3125,
"learning_rate": 0.00018816719753751213,
"loss": 0.5109,
"step": 945
},
{
"epoch": 0.24,
"grad_norm": 0.2578125,
"learning_rate": 0.00018795629354940064,
"loss": 0.5107,
"step": 950
},
{
"epoch": 0.24,
"grad_norm": 0.310546875,
"learning_rate": 0.00018774364694150488,
"loss": 0.5133,
"step": 955
},
{
"epoch": 0.24,
"grad_norm": 0.2578125,
"learning_rate": 0.00018752926192685125,
"loss": 0.4816,
"step": 960
},
{
"epoch": 0.25,
"grad_norm": 0.28125,
"learning_rate": 0.000187313142752908,
"loss": 0.5037,
"step": 965
},
{
"epoch": 0.25,
"grad_norm": 0.294921875,
"learning_rate": 0.00018709529370150124,
"loss": 0.5207,
"step": 970
},
{
"epoch": 0.25,
"grad_norm": 0.275390625,
"learning_rate": 0.0001868757190887299,
"loss": 0.4827,
"step": 975
},
{
"epoch": 0.25,
"grad_norm": 0.361328125,
"learning_rate": 0.00018665442326488033,
"loss": 0.5168,
"step": 980
},
{
"epoch": 0.25,
"grad_norm": 0.326171875,
"learning_rate": 0.0001864314106143401,
"loss": 0.5157,
"step": 985
},
{
"epoch": 0.25,
"grad_norm": 0.240234375,
"learning_rate": 0.00018620668555551116,
"loss": 0.5182,
"step": 990
},
{
"epoch": 0.25,
"grad_norm": 0.314453125,
"learning_rate": 0.00018598025254072225,
"loss": 0.5141,
"step": 995
},
{
"epoch": 0.25,
"grad_norm": 0.298828125,
"learning_rate": 0.00018575211605614066,
"loss": 0.4837,
"step": 1000
},
{
"epoch": 0.26,
"grad_norm": 0.330078125,
"learning_rate": 0.00018552228062168355,
"loss": 0.4921,
"step": 1005
},
{
"epoch": 0.26,
"grad_norm": 0.263671875,
"learning_rate": 0.00018529075079092803,
"loss": 0.4074,
"step": 1010
},
{
"epoch": 0.26,
"grad_norm": 0.2578125,
"learning_rate": 0.0001850575311510214,
"loss": 0.5471,
"step": 1015
},
{
"epoch": 0.26,
"grad_norm": 0.375,
"learning_rate": 0.00018482262632258975,
"loss": 0.552,
"step": 1020
},
{
"epoch": 0.26,
"grad_norm": 0.302734375,
"learning_rate": 0.000184586040959647,
"loss": 0.4955,
"step": 1025
},
{
"epoch": 0.26,
"grad_norm": 0.31640625,
"learning_rate": 0.00018434777974950218,
"loss": 0.5257,
"step": 1030
},
{
"epoch": 0.26,
"grad_norm": 0.318359375,
"learning_rate": 0.0001841078474126668,
"loss": 0.4816,
"step": 1035
},
{
"epoch": 0.27,
"grad_norm": 0.294921875,
"learning_rate": 0.00018386624870276138,
"loss": 0.474,
"step": 1040
},
{
"epoch": 0.27,
"grad_norm": 0.271484375,
"learning_rate": 0.00018362298840642107,
"loss": 0.4893,
"step": 1045
},
{
"epoch": 0.27,
"grad_norm": 0.2421875,
"learning_rate": 0.00018337807134320103,
"loss": 0.4894,
"step": 1050
},
{
"epoch": 0.27,
"grad_norm": 0.30859375,
"learning_rate": 0.00018313150236548082,
"loss": 0.5137,
"step": 1055
},
{
"epoch": 0.27,
"grad_norm": 0.359375,
"learning_rate": 0.00018288328635836826,
"loss": 0.4951,
"step": 1060
},
{
"epoch": 0.27,
"grad_norm": 0.28125,
"learning_rate": 0.00018263342823960269,
"loss": 0.514,
"step": 1065
},
{
"epoch": 0.27,
"grad_norm": 0.36328125,
"learning_rate": 0.0001823819329594575,
"loss": 0.5667,
"step": 1070
},
{
"epoch": 0.27,
"grad_norm": 0.298828125,
"learning_rate": 0.00018212880550064214,
"loss": 0.5274,
"step": 1075
},
{
"epoch": 0.28,
"grad_norm": 0.25,
"learning_rate": 0.0001818740508782032,
"loss": 0.5278,
"step": 1080
},
{
"epoch": 0.28,
"grad_norm": 0.34375,
"learning_rate": 0.00018161767413942537,
"loss": 0.4962,
"step": 1085
},
{
"epoch": 0.28,
"grad_norm": 0.232421875,
"learning_rate": 0.0001813596803637311,
"loss": 0.4785,
"step": 1090
},
{
"epoch": 0.28,
"grad_norm": 0.34765625,
"learning_rate": 0.00018110007466258017,
"loss": 0.5365,
"step": 1095
},
{
"epoch": 0.28,
"grad_norm": 0.298828125,
"learning_rate": 0.0001808388621793684,
"loss": 0.4773,
"step": 1100
},
{
"epoch": 0.28,
"grad_norm": 0.255859375,
"learning_rate": 0.00018057604808932576,
"loss": 0.5222,
"step": 1105
},
{
"epoch": 0.28,
"grad_norm": 0.259765625,
"learning_rate": 0.00018031163759941362,
"loss": 0.5292,
"step": 1110
},
{
"epoch": 0.28,
"grad_norm": 0.326171875,
"learning_rate": 0.00018004563594822197,
"loss": 0.5277,
"step": 1115
},
{
"epoch": 0.29,
"grad_norm": 0.263671875,
"learning_rate": 0.0001797780484058653,
"loss": 0.4917,
"step": 1120
},
{
"epoch": 0.29,
"grad_norm": 0.296875,
"learning_rate": 0.00017950888027387833,
"loss": 0.501,
"step": 1125
},
{
"epoch": 0.29,
"grad_norm": 0.33984375,
"learning_rate": 0.000179238136885111,
"loss": 0.4991,
"step": 1130
},
{
"epoch": 0.29,
"grad_norm": 0.28125,
"learning_rate": 0.00017896582360362273,
"loss": 0.4903,
"step": 1135
},
{
"epoch": 0.29,
"grad_norm": 0.251953125,
"learning_rate": 0.00017869194582457616,
"loss": 0.5216,
"step": 1140
},
{
"epoch": 0.29,
"grad_norm": 0.251953125,
"learning_rate": 0.00017841650897413035,
"loss": 0.5009,
"step": 1145
},
{
"epoch": 0.29,
"grad_norm": 0.40625,
"learning_rate": 0.0001781395185093332,
"loss": 0.56,
"step": 1150
},
{
"epoch": 0.29,
"grad_norm": 0.318359375,
"learning_rate": 0.00017786097991801328,
"loss": 0.5148,
"step": 1155
},
{
"epoch": 0.3,
"grad_norm": 0.283203125,
"learning_rate": 0.00017758089871867117,
"loss": 0.5133,
"step": 1160
},
{
"epoch": 0.3,
"grad_norm": 0.2890625,
"learning_rate": 0.00017729928046037024,
"loss": 0.5191,
"step": 1165
},
{
"epoch": 0.3,
"grad_norm": 0.279296875,
"learning_rate": 0.00017701613072262644,
"loss": 0.4322,
"step": 1170
},
{
"epoch": 0.3,
"grad_norm": 0.3125,
"learning_rate": 0.000176731455115298,
"loss": 0.5283,
"step": 1175
},
{
"epoch": 0.3,
"grad_norm": 0.392578125,
"learning_rate": 0.00017644525927847416,
"loss": 0.5175,
"step": 1180
},
{
"epoch": 0.3,
"grad_norm": 0.3125,
"learning_rate": 0.00017615754888236347,
"loss": 0.4731,
"step": 1185
},
{
"epoch": 0.3,
"grad_norm": 0.314453125,
"learning_rate": 0.00017586832962718143,
"loss": 0.4347,
"step": 1190
},
{
"epoch": 0.3,
"grad_norm": 0.337890625,
"learning_rate": 0.0001755776072430376,
"loss": 0.5024,
"step": 1195
},
{
"epoch": 0.31,
"grad_norm": 0.30078125,
"learning_rate": 0.00017528538748982198,
"loss": 0.4423,
"step": 1200
},
{
"epoch": 0.31,
"grad_norm": 0.287109375,
"learning_rate": 0.00017499167615709095,
"loss": 0.4574,
"step": 1205
},
{
"epoch": 0.31,
"grad_norm": 0.357421875,
"learning_rate": 0.0001746964790639527,
"loss": 0.4691,
"step": 1210
},
{
"epoch": 0.31,
"grad_norm": 0.259765625,
"learning_rate": 0.00017439980205895158,
"loss": 0.4838,
"step": 1215
},
{
"epoch": 0.31,
"grad_norm": 0.30078125,
"learning_rate": 0.00017410165101995267,
"loss": 0.4605,
"step": 1220
},
{
"epoch": 0.31,
"grad_norm": 0.3046875,
"learning_rate": 0.00017380203185402503,
"loss": 0.4571,
"step": 1225
},
{
"epoch": 0.31,
"grad_norm": 0.322265625,
"learning_rate": 0.00017350095049732466,
"loss": 0.5028,
"step": 1230
},
{
"epoch": 0.31,
"grad_norm": 0.263671875,
"learning_rate": 0.0001731984129149771,
"loss": 0.4862,
"step": 1235
},
{
"epoch": 0.32,
"grad_norm": 0.26953125,
"learning_rate": 0.00017289442510095906,
"loss": 0.4735,
"step": 1240
},
{
"epoch": 0.32,
"grad_norm": 0.322265625,
"learning_rate": 0.00017258899307797976,
"loss": 0.4943,
"step": 1245
},
{
"epoch": 0.32,
"grad_norm": 0.29296875,
"learning_rate": 0.00017228212289736153,
"loss": 0.4617,
"step": 1250
},
{
"epoch": 0.32,
"grad_norm": 0.26953125,
"learning_rate": 0.00017197382063892006,
"loss": 0.4763,
"step": 1255
},
{
"epoch": 0.32,
"grad_norm": 0.240234375,
"learning_rate": 0.00017166409241084374,
"loss": 0.4584,
"step": 1260
},
{
"epoch": 0.32,
"grad_norm": 0.298828125,
"learning_rate": 0.00017135294434957282,
"loss": 0.4857,
"step": 1265
},
{
"epoch": 0.32,
"grad_norm": 0.33203125,
"learning_rate": 0.00017104038261967768,
"loss": 0.4834,
"step": 1270
},
{
"epoch": 0.33,
"grad_norm": 0.33984375,
"learning_rate": 0.00017072641341373692,
"loss": 0.5171,
"step": 1275
},
{
"epoch": 0.33,
"grad_norm": 0.392578125,
"learning_rate": 0.00017041104295221438,
"loss": 0.4695,
"step": 1280
},
{
"epoch": 0.33,
"grad_norm": 0.341796875,
"learning_rate": 0.0001700942774833362,
"loss": 0.5051,
"step": 1285
},
{
"epoch": 0.33,
"grad_norm": 0.283203125,
"learning_rate": 0.00016977612328296674,
"loss": 0.4571,
"step": 1290
},
{
"epoch": 0.33,
"grad_norm": 0.2890625,
"learning_rate": 0.00016945658665448447,
"loss": 0.467,
"step": 1295
},
{
"epoch": 0.33,
"grad_norm": 0.31640625,
"learning_rate": 0.00016913567392865697,
"loss": 0.5049,
"step": 1300
},
{
"epoch": 0.33,
"grad_norm": 0.255859375,
"learning_rate": 0.00016881339146351544,
"loss": 0.4694,
"step": 1305
},
{
"epoch": 0.33,
"grad_norm": 0.27734375,
"learning_rate": 0.00016848974564422894,
"loss": 0.4315,
"step": 1310
},
{
"epoch": 0.34,
"grad_norm": 0.396484375,
"learning_rate": 0.00016816474288297773,
"loss": 0.4595,
"step": 1315
},
{
"epoch": 0.34,
"grad_norm": 0.3984375,
"learning_rate": 0.0001678383896188262,
"loss": 0.4894,
"step": 1320
},
{
"epoch": 0.34,
"grad_norm": 0.259765625,
"learning_rate": 0.00016751069231759548,
"loss": 0.471,
"step": 1325
},
{
"epoch": 0.34,
"grad_norm": 0.33984375,
"learning_rate": 0.00016718165747173507,
"loss": 0.4694,
"step": 1330
},
{
"epoch": 0.34,
"grad_norm": 0.373046875,
"learning_rate": 0.00016685129160019444,
"loss": 0.4806,
"step": 1335
},
{
"epoch": 0.34,
"grad_norm": 0.40625,
"learning_rate": 0.00016651960124829378,
"loss": 0.4655,
"step": 1340
},
{
"epoch": 0.34,
"grad_norm": 0.3359375,
"learning_rate": 0.00016618659298759436,
"loss": 0.4996,
"step": 1345
},
{
"epoch": 0.34,
"grad_norm": 0.31640625,
"learning_rate": 0.00016585227341576824,
"loss": 0.4961,
"step": 1350
},
{
"epoch": 0.35,
"grad_norm": 0.27734375,
"learning_rate": 0.00016551664915646767,
"loss": 0.4674,
"step": 1355
},
{
"epoch": 0.35,
"grad_norm": 0.26171875,
"learning_rate": 0.0001651797268591938,
"loss": 0.5274,
"step": 1360
},
{
"epoch": 0.35,
"grad_norm": 0.28125,
"learning_rate": 0.00016484151319916488,
"loss": 0.447,
"step": 1365
},
{
"epoch": 0.35,
"grad_norm": 0.345703125,
"learning_rate": 0.00016450201487718422,
"loss": 0.4504,
"step": 1370
},
{
"epoch": 0.35,
"grad_norm": 0.333984375,
"learning_rate": 0.00016416123861950714,
"loss": 0.4656,
"step": 1375
},
{
"epoch": 0.35,
"grad_norm": 0.35546875,
"learning_rate": 0.0001638191911777079,
"loss": 0.5016,
"step": 1380
},
{
"epoch": 0.35,
"grad_norm": 0.36328125,
"learning_rate": 0.000163475879328546,
"loss": 0.4829,
"step": 1385
},
{
"epoch": 0.35,
"grad_norm": 0.263671875,
"learning_rate": 0.00016313130987383157,
"loss": 0.4662,
"step": 1390
},
{
"epoch": 0.36,
"grad_norm": 0.251953125,
"learning_rate": 0.00016278548964029108,
"loss": 0.4765,
"step": 1395
},
{
"epoch": 0.36,
"grad_norm": 0.34375,
"learning_rate": 0.0001624384254794317,
"loss": 0.4522,
"step": 1400
},
{
"epoch": 0.36,
"grad_norm": 0.279296875,
"learning_rate": 0.00016209012426740582,
"loss": 0.4363,
"step": 1405
},
{
"epoch": 0.36,
"grad_norm": 0.369140625,
"learning_rate": 0.0001617405929048746,
"loss": 0.4888,
"step": 1410
},
{
"epoch": 0.36,
"grad_norm": 0.357421875,
"learning_rate": 0.00016138983831687142,
"loss": 0.4398,
"step": 1415
},
{
"epoch": 0.36,
"grad_norm": 0.369140625,
"learning_rate": 0.0001610378674526646,
"loss": 0.5025,
"step": 1420
},
{
"epoch": 0.36,
"grad_norm": 0.34375,
"learning_rate": 0.00016068468728561977,
"loss": 0.4561,
"step": 1425
},
{
"epoch": 0.36,
"grad_norm": 0.326171875,
"learning_rate": 0.0001603303048130616,
"loss": 0.4637,
"step": 1430
},
{
"epoch": 0.37,
"grad_norm": 0.35546875,
"learning_rate": 0.0001599747270561353,
"loss": 0.5011,
"step": 1435
},
{
"epoch": 0.37,
"grad_norm": 0.2578125,
"learning_rate": 0.00015961796105966745,
"loss": 0.4757,
"step": 1440
},
{
"epoch": 0.37,
"grad_norm": 0.35546875,
"learning_rate": 0.00015926001389202638,
"loss": 0.4707,
"step": 1445
},
{
"epoch": 0.37,
"grad_norm": 0.35546875,
"learning_rate": 0.0001589008926449823,
"loss": 0.4305,
"step": 1450
},
{
"epoch": 0.37,
"grad_norm": 0.33984375,
"learning_rate": 0.00015854060443356658,
"loss": 0.4506,
"step": 1455
},
{
"epoch": 0.37,
"grad_norm": 0.3671875,
"learning_rate": 0.0001581791563959309,
"loss": 0.4594,
"step": 1460
},
{
"epoch": 0.37,
"grad_norm": 0.30859375,
"learning_rate": 0.00015781655569320577,
"loss": 0.4565,
"step": 1465
},
{
"epoch": 0.37,
"grad_norm": 0.33203125,
"learning_rate": 0.00015745280950935883,
"loss": 0.4848,
"step": 1470
},
{
"epoch": 0.38,
"grad_norm": 0.392578125,
"learning_rate": 0.00015708792505105223,
"loss": 0.5282,
"step": 1475
},
{
"epoch": 0.38,
"grad_norm": 0.37109375,
"learning_rate": 0.00015672190954750005,
"loss": 0.4261,
"step": 1480
},
{
"epoch": 0.38,
"grad_norm": 0.2734375,
"learning_rate": 0.00015635477025032504,
"loss": 0.486,
"step": 1485
},
{
"epoch": 0.38,
"grad_norm": 0.314453125,
"learning_rate": 0.00015598651443341491,
"loss": 0.462,
"step": 1490
},
{
"epoch": 0.38,
"grad_norm": 0.291015625,
"learning_rate": 0.00015561714939277822,
"loss": 0.4475,
"step": 1495
},
{
"epoch": 0.38,
"grad_norm": 0.44140625,
"learning_rate": 0.00015524668244639993,
"loss": 0.4765,
"step": 1500
},
{
"epoch": 0.38,
"grad_norm": 0.390625,
"learning_rate": 0.00015487512093409616,
"loss": 0.4819,
"step": 1505
},
{
"epoch": 0.39,
"grad_norm": 0.3828125,
"learning_rate": 0.0001545024722173691,
"loss": 0.4338,
"step": 1510
},
{
"epoch": 0.39,
"grad_norm": 0.29296875,
"learning_rate": 0.00015412874367926093,
"loss": 0.4556,
"step": 1515
},
{
"epoch": 0.39,
"grad_norm": 0.3828125,
"learning_rate": 0.00015375394272420763,
"loss": 0.4704,
"step": 1520
},
{
"epoch": 0.39,
"grad_norm": 0.3046875,
"learning_rate": 0.00015337807677789228,
"loss": 0.4999,
"step": 1525
},
{
"epoch": 0.39,
"grad_norm": 0.34375,
"learning_rate": 0.0001530011532870979,
"loss": 0.4351,
"step": 1530
},
{
"epoch": 0.39,
"grad_norm": 0.2333984375,
"learning_rate": 0.00015262317971955993,
"loss": 0.444,
"step": 1535
},
{
"epoch": 0.39,
"grad_norm": 0.3828125,
"learning_rate": 0.00015224416356381833,
"loss": 0.4488,
"step": 1540
},
{
"epoch": 0.39,
"grad_norm": 0.34375,
"learning_rate": 0.00015186411232906912,
"loss": 0.4704,
"step": 1545
},
{
"epoch": 0.4,
"grad_norm": 0.353515625,
"learning_rate": 0.0001514830335450157,
"loss": 0.4406,
"step": 1550
},
{
"epoch": 0.4,
"grad_norm": 0.390625,
"learning_rate": 0.0001511009347617196,
"loss": 0.458,
"step": 1555
},
{
"epoch": 0.4,
"grad_norm": 0.314453125,
"learning_rate": 0.00015071782354945094,
"loss": 0.4432,
"step": 1560
},
{
"epoch": 0.4,
"grad_norm": 0.330078125,
"learning_rate": 0.00015033370749853835,
"loss": 0.4713,
"step": 1565
},
{
"epoch": 0.4,
"grad_norm": 0.353515625,
"learning_rate": 0.00014994859421921876,
"loss": 0.4574,
"step": 1570
},
{
"epoch": 0.4,
"grad_norm": 0.271484375,
"learning_rate": 0.00014956249134148646,
"loss": 0.4234,
"step": 1575
},
{
"epoch": 0.4,
"grad_norm": 0.287109375,
"learning_rate": 0.00014917540651494205,
"loss": 0.471,
"step": 1580
},
{
"epoch": 0.4,
"grad_norm": 0.3515625,
"learning_rate": 0.0001487873474086408,
"loss": 0.4152,
"step": 1585
},
{
"epoch": 0.41,
"grad_norm": 0.30078125,
"learning_rate": 0.00014839832171094074,
"loss": 0.4751,
"step": 1590
},
{
"epoch": 0.41,
"grad_norm": 0.306640625,
"learning_rate": 0.00014800833712935033,
"loss": 0.5005,
"step": 1595
},
{
"epoch": 0.41,
"grad_norm": 0.35546875,
"learning_rate": 0.00014761740139037578,
"loss": 0.4411,
"step": 1600
},
{
"epoch": 0.41,
"grad_norm": 0.37890625,
"learning_rate": 0.00014722552223936792,
"loss": 0.4463,
"step": 1605
},
{
"epoch": 0.41,
"grad_norm": 0.380859375,
"learning_rate": 0.00014683270744036882,
"loss": 0.4677,
"step": 1610
},
{
"epoch": 0.41,
"grad_norm": 0.376953125,
"learning_rate": 0.00014643896477595792,
"loss": 0.4811,
"step": 1615
},
{
"epoch": 0.41,
"grad_norm": 0.443359375,
"learning_rate": 0.0001460443020470978,
"loss": 0.4741,
"step": 1620
},
{
"epoch": 0.41,
"grad_norm": 0.392578125,
"learning_rate": 0.00014564872707297966,
"loss": 0.4751,
"step": 1625
},
{
"epoch": 0.42,
"grad_norm": 0.376953125,
"learning_rate": 0.00014525224769086853,
"loss": 0.4897,
"step": 1630
},
{
"epoch": 0.42,
"grad_norm": 0.341796875,
"learning_rate": 0.0001448548717559477,
"loss": 0.4843,
"step": 1635
},
{
"epoch": 0.42,
"grad_norm": 0.494140625,
"learning_rate": 0.00014445660714116344,
"loss": 0.4625,
"step": 1640
},
{
"epoch": 0.42,
"grad_norm": 0.3984375,
"learning_rate": 0.0001440574617370687,
"loss": 0.4661,
"step": 1645
},
{
"epoch": 0.42,
"grad_norm": 0.265625,
"learning_rate": 0.00014365744345166704,
"loss": 0.4235,
"step": 1650
},
{
"epoch": 0.42,
"grad_norm": 0.357421875,
"learning_rate": 0.00014325656021025573,
"loss": 0.4567,
"step": 1655
},
{
"epoch": 0.42,
"grad_norm": 0.353515625,
"learning_rate": 0.00014285481995526892,
"loss": 0.4371,
"step": 1660
},
{
"epoch": 0.42,
"grad_norm": 0.357421875,
"learning_rate": 0.0001424522306461202,
"loss": 0.456,
"step": 1665
},
{
"epoch": 0.43,
"grad_norm": 0.328125,
"learning_rate": 0.0001420488002590449,
"loss": 0.4579,
"step": 1670
},
{
"epoch": 0.43,
"grad_norm": 0.35546875,
"learning_rate": 0.000141644536786942,
"loss": 0.5085,
"step": 1675
},
{
"epoch": 0.43,
"grad_norm": 0.333984375,
"learning_rate": 0.00014123944823921596,
"loss": 0.4936,
"step": 1680
},
{
"epoch": 0.43,
"grad_norm": 0.455078125,
"learning_rate": 0.00014083354264161782,
"loss": 0.4667,
"step": 1685
},
{
"epoch": 0.43,
"grad_norm": 0.341796875,
"learning_rate": 0.00014042682803608637,
"loss": 0.4848,
"step": 1690
},
{
"epoch": 0.43,
"grad_norm": 0.322265625,
"learning_rate": 0.0001400193124805886,
"loss": 0.432,
"step": 1695
},
{
"epoch": 0.43,
"grad_norm": 0.4375,
"learning_rate": 0.00013961100404896035,
"loss": 0.4491,
"step": 1700
},
{
"epoch": 0.43,
"grad_norm": 0.3515625,
"learning_rate": 0.00013920191083074612,
"loss": 0.4099,
"step": 1705
},
{
"epoch": 0.44,
"grad_norm": 0.408203125,
"learning_rate": 0.00013879204093103885,
"loss": 0.4092,
"step": 1710
},
{
"epoch": 0.44,
"grad_norm": 0.392578125,
"learning_rate": 0.00013838140247031944,
"loss": 0.4587,
"step": 1715
},
{
"epoch": 0.44,
"grad_norm": 0.3203125,
"learning_rate": 0.00013797000358429563,
"loss": 0.4563,
"step": 1720
},
{
"epoch": 0.44,
"grad_norm": 0.416015625,
"learning_rate": 0.00013755785242374112,
"loss": 0.5002,
"step": 1725
},
{
"epoch": 0.44,
"grad_norm": 0.31640625,
"learning_rate": 0.0001371449571543338,
"loss": 0.4465,
"step": 1730
},
{
"epoch": 0.44,
"grad_norm": 0.314453125,
"learning_rate": 0.00013673132595649418,
"loss": 0.4596,
"step": 1735
},
{
"epoch": 0.44,
"grad_norm": 0.33984375,
"learning_rate": 0.0001363169670252232,
"loss": 0.484,
"step": 1740
},
{
"epoch": 0.44,
"grad_norm": 0.369140625,
"learning_rate": 0.00013590188856993992,
"loss": 0.4489,
"step": 1745
},
{
"epoch": 0.45,
"grad_norm": 0.359375,
"learning_rate": 0.00013548609881431883,
"loss": 0.4362,
"step": 1750
},
{
"epoch": 0.45,
"grad_norm": 0.369140625,
"learning_rate": 0.0001350696059961269,
"loss": 0.4953,
"step": 1755
},
{
"epoch": 0.45,
"grad_norm": 0.36328125,
"learning_rate": 0.00013465241836706056,
"loss": 0.377,
"step": 1760
},
{
"epoch": 0.45,
"grad_norm": 0.4296875,
"learning_rate": 0.0001342345441925819,
"loss": 0.3919,
"step": 1765
},
{
"epoch": 0.45,
"grad_norm": 0.291015625,
"learning_rate": 0.00013381599175175517,
"loss": 0.4668,
"step": 1770
},
{
"epoch": 0.45,
"grad_norm": 0.271484375,
"learning_rate": 0.00013339676933708266,
"loss": 0.4401,
"step": 1775
},
{
"epoch": 0.45,
"grad_norm": 0.2412109375,
"learning_rate": 0.00013297688525434035,
"loss": 0.4791,
"step": 1780
},
{
"epoch": 0.46,
"grad_norm": 0.3046875,
"learning_rate": 0.00013255634782241349,
"loss": 0.3973,
"step": 1785
},
{
"epoch": 0.46,
"grad_norm": 0.404296875,
"learning_rate": 0.00013213516537313163,
"loss": 0.4692,
"step": 1790
},
{
"epoch": 0.46,
"grad_norm": 0.306640625,
"learning_rate": 0.0001317133462511037,
"loss": 0.456,
"step": 1795
},
{
"epoch": 0.46,
"grad_norm": 0.35546875,
"learning_rate": 0.00013129089881355256,
"loss": 0.4319,
"step": 1800
},
{
"epoch": 0.46,
"grad_norm": 0.32421875,
"learning_rate": 0.00013086783143014942,
"loss": 0.3933,
"step": 1805
},
{
"epoch": 0.46,
"grad_norm": 0.375,
"learning_rate": 0.0001304441524828482,
"loss": 0.427,
"step": 1810
},
{
"epoch": 0.46,
"grad_norm": 0.3984375,
"learning_rate": 0.00013001987036571916,
"loss": 0.4683,
"step": 1815
},
{
"epoch": 0.46,
"grad_norm": 0.365234375,
"learning_rate": 0.00012959499348478293,
"loss": 0.4522,
"step": 1820
},
{
"epoch": 0.47,
"grad_norm": 0.2314453125,
"learning_rate": 0.00012916953025784368,
"loss": 0.4255,
"step": 1825
},
{
"epoch": 0.47,
"grad_norm": 0.37109375,
"learning_rate": 0.00012874348911432251,
"loss": 0.4362,
"step": 1830
},
{
"epoch": 0.47,
"grad_norm": 0.37890625,
"learning_rate": 0.00012831687849509043,
"loss": 0.4532,
"step": 1835
},
{
"epoch": 0.47,
"grad_norm": 0.345703125,
"learning_rate": 0.00012788970685230106,
"loss": 0.4177,
"step": 1840
},
{
"epoch": 0.47,
"grad_norm": 0.466796875,
"learning_rate": 0.00012746198264922319,
"loss": 0.4137,
"step": 1845
},
{
"epoch": 0.47,
"grad_norm": 0.35546875,
"learning_rate": 0.00012703371436007313,
"loss": 0.4552,
"step": 1850
},
{
"epoch": 0.47,
"grad_norm": 0.37890625,
"learning_rate": 0.00012660491046984686,
"loss": 0.4682,
"step": 1855
},
{
"epoch": 0.47,
"grad_norm": 0.390625,
"learning_rate": 0.00012617557947415179,
"loss": 0.4947,
"step": 1860
},
{
"epoch": 0.48,
"grad_norm": 0.37109375,
"learning_rate": 0.0001257457298790386,
"loss": 0.4205,
"step": 1865
},
{
"epoch": 0.48,
"grad_norm": 0.322265625,
"learning_rate": 0.00012531537020083258,
"loss": 0.4037,
"step": 1870
},
{
"epoch": 0.48,
"grad_norm": 0.3515625,
"learning_rate": 0.000124884508965965,
"loss": 0.4267,
"step": 1875
},
{
"epoch": 0.48,
"grad_norm": 0.291015625,
"learning_rate": 0.00012445315471080402,
"loss": 0.3942,
"step": 1880
},
{
"epoch": 0.48,
"grad_norm": 0.314453125,
"learning_rate": 0.0001240213159814859,
"loss": 0.4453,
"step": 1885
},
{
"epoch": 0.48,
"grad_norm": 0.38671875,
"learning_rate": 0.0001235890013337453,
"loss": 0.4269,
"step": 1890
},
{
"epoch": 0.48,
"grad_norm": 0.328125,
"learning_rate": 0.00012315621933274597,
"loss": 0.4226,
"step": 1895
},
{
"epoch": 0.48,
"grad_norm": 0.416015625,
"learning_rate": 0.00012272297855291103,
"loss": 0.452,
"step": 1900
},
{
"epoch": 0.49,
"grad_norm": 0.3828125,
"learning_rate": 0.0001222892875777531,
"loss": 0.4421,
"step": 1905
},
{
"epoch": 0.49,
"grad_norm": 0.3984375,
"learning_rate": 0.00012185515499970421,
"loss": 0.4633,
"step": 1910
},
{
"epoch": 0.49,
"grad_norm": 0.365234375,
"learning_rate": 0.00012142058941994556,
"loss": 0.4258,
"step": 1915
},
{
"epoch": 0.49,
"grad_norm": 0.318359375,
"learning_rate": 0.00012098559944823714,
"loss": 0.4136,
"step": 1920
},
{
"epoch": 0.49,
"grad_norm": 0.53125,
"learning_rate": 0.00012055019370274714,
"loss": 0.3866,
"step": 1925
},
{
"epoch": 0.49,
"grad_norm": 0.482421875,
"learning_rate": 0.00012011438080988118,
"loss": 0.4743,
"step": 1930
},
{
"epoch": 0.49,
"grad_norm": 0.373046875,
"learning_rate": 0.00011967816940411145,
"loss": 0.4307,
"step": 1935
},
{
"epoch": 0.49,
"grad_norm": 0.326171875,
"learning_rate": 0.00011924156812780558,
"loss": 0.4401,
"step": 1940
},
{
"epoch": 0.5,
"grad_norm": 0.3359375,
"learning_rate": 0.0001188045856310555,
"loss": 0.4417,
"step": 1945
},
{
"epoch": 0.5,
"grad_norm": 0.3984375,
"learning_rate": 0.00011836723057150594,
"loss": 0.4071,
"step": 1950
},
{
"epoch": 0.5,
"grad_norm": 0.384765625,
"learning_rate": 0.00011792951161418301,
"loss": 0.4008,
"step": 1955
},
{
"epoch": 0.5,
"grad_norm": 0.46875,
"learning_rate": 0.00011749143743132249,
"loss": 0.442,
"step": 1960
},
{
"epoch": 0.5,
"grad_norm": 0.31640625,
"learning_rate": 0.000117053016702198,
"loss": 0.4082,
"step": 1965
},
{
"epoch": 0.5,
"grad_norm": 0.306640625,
"learning_rate": 0.00011661425811294902,
"loss": 0.4163,
"step": 1970
},
{
"epoch": 0.5,
"grad_norm": 0.423828125,
"learning_rate": 0.00011617517035640888,
"loss": 0.4438,
"step": 1975
},
{
"epoch": 0.5,
"grad_norm": 0.443359375,
"learning_rate": 0.00011573576213193245,
"loss": 0.46,
"step": 1980
},
{
"epoch": 0.51,
"grad_norm": 0.33203125,
"learning_rate": 0.00011529604214522385,
"loss": 0.5053,
"step": 1985
},
{
"epoch": 0.51,
"grad_norm": 0.326171875,
"learning_rate": 0.00011485601910816388,
"loss": 0.4295,
"step": 1990
},
{
"epoch": 0.51,
"grad_norm": 0.345703125,
"learning_rate": 0.00011441570173863756,
"loss": 0.4254,
"step": 1995
},
{
"epoch": 0.51,
"grad_norm": 0.341796875,
"learning_rate": 0.00011397509876036125,
"loss": 0.4208,
"step": 2000
},
{
"epoch": 0.51,
"grad_norm": 0.310546875,
"learning_rate": 0.00011353421890270992,
"loss": 0.4441,
"step": 2005
},
{
"epoch": 0.51,
"grad_norm": 0.419921875,
"learning_rate": 0.00011309307090054415,
"loss": 0.4299,
"step": 2010
},
{
"epoch": 0.51,
"grad_norm": 0.380859375,
"learning_rate": 0.00011265166349403713,
"loss": 0.4684,
"step": 2015
},
{
"epoch": 0.52,
"grad_norm": 0.287109375,
"learning_rate": 0.00011221000542850142,
"loss": 0.4475,
"step": 2020
},
{
"epoch": 0.52,
"grad_norm": 0.259765625,
"learning_rate": 0.00011176810545421572,
"loss": 0.4259,
"step": 2025
},
{
"epoch": 0.52,
"grad_norm": 0.287109375,
"learning_rate": 0.00011132597232625153,
"loss": 0.4661,
"step": 2030
},
{
"epoch": 0.52,
"grad_norm": 0.37890625,
"learning_rate": 0.00011088361480429965,
"loss": 0.4305,
"step": 2035
},
{
"epoch": 0.52,
"grad_norm": 0.2470703125,
"learning_rate": 0.00011044104165249665,
"loss": 0.4598,
"step": 2040
},
{
"epoch": 0.52,
"grad_norm": 0.31640625,
"learning_rate": 0.00010999826163925127,
"loss": 0.4168,
"step": 2045
},
{
"epoch": 0.52,
"grad_norm": 0.29296875,
"learning_rate": 0.00010955528353707059,
"loss": 0.4045,
"step": 2050
},
{
"epoch": 0.52,
"grad_norm": 0.3984375,
"learning_rate": 0.00010911211612238632,
"loss": 0.4082,
"step": 2055
},
{
"epoch": 0.53,
"grad_norm": 0.28515625,
"learning_rate": 0.00010866876817538097,
"loss": 0.4321,
"step": 2060
},
{
"epoch": 0.53,
"grad_norm": 0.28125,
"learning_rate": 0.0001082252484798137,
"loss": 0.4071,
"step": 2065
},
{
"epoch": 0.53,
"grad_norm": 0.306640625,
"learning_rate": 0.00010778156582284649,
"loss": 0.4588,
"step": 2070
},
{
"epoch": 0.53,
"grad_norm": 0.234375,
"learning_rate": 0.00010733772899486991,
"loss": 0.4414,
"step": 2075
},
{
"epoch": 0.53,
"grad_norm": 0.302734375,
"learning_rate": 0.00010689374678932906,
"loss": 0.393,
"step": 2080
},
{
"epoch": 0.53,
"grad_norm": 0.451171875,
"learning_rate": 0.00010644962800254932,
"loss": 0.4724,
"step": 2085
},
{
"epoch": 0.53,
"grad_norm": 0.462890625,
"learning_rate": 0.00010600538143356201,
"loss": 0.4428,
"step": 2090
},
{
"epoch": 0.53,
"grad_norm": 0.3828125,
"learning_rate": 0.00010556101588393015,
"loss": 0.391,
"step": 2095
},
{
"epoch": 0.54,
"grad_norm": 0.333984375,
"learning_rate": 0.00010511654015757407,
"loss": 0.3976,
"step": 2100
},
{
"epoch": 0.54,
"grad_norm": 0.3515625,
"learning_rate": 0.00010467196306059687,
"loss": 0.4012,
"step": 2105
},
{
"epoch": 0.54,
"grad_norm": 0.2890625,
"learning_rate": 0.00010422729340111015,
"loss": 0.468,
"step": 2110
},
{
"epoch": 0.54,
"grad_norm": 0.37109375,
"learning_rate": 0.00010378253998905932,
"loss": 0.431,
"step": 2115
},
{
"epoch": 0.54,
"grad_norm": 0.33984375,
"learning_rate": 0.00010333771163604911,
"loss": 0.4248,
"step": 2120
},
{
"epoch": 0.54,
"grad_norm": 0.337890625,
"learning_rate": 0.0001028928171551691,
"loss": 0.389,
"step": 2125
},
{
"epoch": 0.54,
"grad_norm": 0.388671875,
"learning_rate": 0.0001024478653608189,
"loss": 0.418,
"step": 2130
},
{
"epoch": 0.54,
"grad_norm": 0.337890625,
"learning_rate": 0.0001020028650685337,
"loss": 0.4409,
"step": 2135
},
{
"epoch": 0.55,
"grad_norm": 0.259765625,
"learning_rate": 0.00010155782509480954,
"loss": 0.4182,
"step": 2140
},
{
"epoch": 0.55,
"grad_norm": 0.3515625,
"learning_rate": 0.00010111275425692863,
"loss": 0.3616,
"step": 2145
},
{
"epoch": 0.55,
"grad_norm": 0.3671875,
"learning_rate": 0.0001006676613727847,
"loss": 0.384,
"step": 2150
},
{
"epoch": 0.55,
"grad_norm": 0.255859375,
"learning_rate": 0.00010022255526070823,
"loss": 0.4133,
"step": 2155
},
{
"epoch": 0.55,
"grad_norm": 0.36328125,
"learning_rate": 9.97774447392918e-05,
"loss": 0.4635,
"step": 2160
},
{
"epoch": 0.55,
"grad_norm": 0.259765625,
"learning_rate": 9.933233862721532e-05,
"loss": 0.4389,
"step": 2165
},
{
"epoch": 0.55,
"grad_norm": 0.29296875,
"learning_rate": 9.888724574307139e-05,
"loss": 0.4488,
"step": 2170
},
{
"epoch": 0.55,
"grad_norm": 0.267578125,
"learning_rate": 9.844217490519049e-05,
"loss": 0.4116,
"step": 2175
},
{
"epoch": 0.56,
"grad_norm": 0.37109375,
"learning_rate": 9.799713493146633e-05,
"loss": 0.3794,
"step": 2180
},
{
"epoch": 0.56,
"grad_norm": 0.34375,
"learning_rate": 9.755213463918114e-05,
"loss": 0.3887,
"step": 2185
},
{
"epoch": 0.56,
"grad_norm": 0.30859375,
"learning_rate": 9.710718284483094e-05,
"loss": 0.4222,
"step": 2190
},
{
"epoch": 0.56,
"grad_norm": 0.4140625,
"learning_rate": 9.66622883639509e-05,
"loss": 0.4451,
"step": 2195
},
{
"epoch": 0.56,
"grad_norm": 0.255859375,
"learning_rate": 9.621746001094072e-05,
"loss": 0.4213,
"step": 2200
},
{
"epoch": 0.56,
"grad_norm": 0.32421875,
"learning_rate": 9.577270659888988e-05,
"loss": 0.4378,
"step": 2205
},
{
"epoch": 0.56,
"grad_norm": 0.3203125,
"learning_rate": 9.532803693940315e-05,
"loss": 0.4315,
"step": 2210
},
{
"epoch": 0.56,
"grad_norm": 0.4296875,
"learning_rate": 9.488345984242597e-05,
"loss": 0.4118,
"step": 2215
},
{
"epoch": 0.57,
"grad_norm": 0.3125,
"learning_rate": 9.443898411606988e-05,
"loss": 0.4118,
"step": 2220
},
{
"epoch": 0.57,
"grad_norm": 0.3515625,
"learning_rate": 9.399461856643802e-05,
"loss": 0.5094,
"step": 2225
},
{
"epoch": 0.57,
"grad_norm": 0.291015625,
"learning_rate": 9.355037199745072e-05,
"loss": 0.391,
"step": 2230
},
{
"epoch": 0.57,
"grad_norm": 0.33203125,
"learning_rate": 9.310625321067096e-05,
"loss": 0.3856,
"step": 2235
},
{
"epoch": 0.57,
"grad_norm": 0.35546875,
"learning_rate": 9.26622710051301e-05,
"loss": 0.4223,
"step": 2240
},
{
"epoch": 0.57,
"grad_norm": 0.5078125,
"learning_rate": 9.221843417715352e-05,
"loss": 0.3672,
"step": 2245
},
{
"epoch": 0.57,
"grad_norm": 0.51171875,
"learning_rate": 9.17747515201863e-05,
"loss": 0.433,
"step": 2250
},
{
"epoch": 0.57,
"grad_norm": 0.412109375,
"learning_rate": 9.133123182461904e-05,
"loss": 0.3983,
"step": 2255
},
{
"epoch": 0.58,
"grad_norm": 0.32421875,
"learning_rate": 9.088788387761366e-05,
"loss": 0.4376,
"step": 2260
},
{
"epoch": 0.58,
"grad_norm": 0.3828125,
"learning_rate": 9.044471646292942e-05,
"loss": 0.4773,
"step": 2265
},
{
"epoch": 0.58,
"grad_norm": 0.341796875,
"learning_rate": 9.000173836074874e-05,
"loss": 0.461,
"step": 2270
},
{
"epoch": 0.58,
"grad_norm": 0.3046875,
"learning_rate": 8.955895834750334e-05,
"loss": 0.4507,
"step": 2275
},
{
"epoch": 0.58,
"grad_norm": 0.8671875,
"learning_rate": 8.911638519570036e-05,
"loss": 0.3797,
"step": 2280
},
{
"epoch": 0.58,
"grad_norm": 0.365234375,
"learning_rate": 8.867402767374848e-05,
"loss": 0.3717,
"step": 2285
},
{
"epoch": 0.58,
"grad_norm": 0.4140625,
"learning_rate": 8.823189454578428e-05,
"loss": 0.3888,
"step": 2290
},
{
"epoch": 0.59,
"grad_norm": 0.3125,
"learning_rate": 8.778999457149863e-05,
"loss": 0.4496,
"step": 2295
},
{
"epoch": 0.59,
"grad_norm": 0.4296875,
"learning_rate": 8.73483365059629e-05,
"loss": 0.3736,
"step": 2300
},
{
"epoch": 0.59,
"grad_norm": 0.384765625,
"learning_rate": 8.69069290994559e-05,
"loss": 0.4593,
"step": 2305
},
{
"epoch": 0.59,
"grad_norm": 0.361328125,
"learning_rate": 8.646578109729014e-05,
"loss": 0.4097,
"step": 2310
},
{
"epoch": 0.59,
"grad_norm": 0.31640625,
"learning_rate": 8.60249012396388e-05,
"loss": 0.4032,
"step": 2315
},
{
"epoch": 0.59,
"grad_norm": 0.298828125,
"learning_rate": 8.558429826136248e-05,
"loss": 0.3813,
"step": 2320
},
{
"epoch": 0.59,
"grad_norm": 0.291015625,
"learning_rate": 8.514398089183616e-05,
"loss": 0.4135,
"step": 2325
},
{
"epoch": 0.59,
"grad_norm": 0.37109375,
"learning_rate": 8.470395785477619e-05,
"loss": 0.3782,
"step": 2330
},
{
"epoch": 0.6,
"grad_norm": 0.345703125,
"learning_rate": 8.426423786806756e-05,
"loss": 0.4147,
"step": 2335
},
{
"epoch": 0.6,
"grad_norm": 0.40234375,
"learning_rate": 8.382482964359115e-05,
"loss": 0.4261,
"step": 2340
},
{
"epoch": 0.6,
"grad_norm": 0.376953125,
"learning_rate": 8.338574188705101e-05,
"loss": 0.4219,
"step": 2345
},
{
"epoch": 0.6,
"grad_norm": 0.34375,
"learning_rate": 8.294698329780203e-05,
"loss": 0.3911,
"step": 2350
},
{
"epoch": 0.6,
"grad_norm": 0.318359375,
"learning_rate": 8.250856256867753e-05,
"loss": 0.376,
"step": 2355
},
{
"epoch": 0.6,
"grad_norm": 0.306640625,
"learning_rate": 8.207048838581701e-05,
"loss": 0.3934,
"step": 2360
},
{
"epoch": 0.6,
"grad_norm": 0.33984375,
"learning_rate": 8.163276942849408e-05,
"loss": 0.3958,
"step": 2365
},
{
"epoch": 0.6,
"grad_norm": 0.37109375,
"learning_rate": 8.119541436894452e-05,
"loss": 0.396,
"step": 2370
},
{
"epoch": 0.61,
"grad_norm": 0.23828125,
"learning_rate": 8.075843187219443e-05,
"loss": 0.4779,
"step": 2375
},
{
"epoch": 0.61,
"grad_norm": 0.36328125,
"learning_rate": 8.032183059588858e-05,
"loss": 0.4172,
"step": 2380
},
{
"epoch": 0.61,
"grad_norm": 0.3828125,
"learning_rate": 7.988561919011884e-05,
"loss": 0.3797,
"step": 2385
},
{
"epoch": 0.61,
"grad_norm": 0.25390625,
"learning_rate": 7.94498062972529e-05,
"loss": 0.3962,
"step": 2390
},
{
"epoch": 0.61,
"grad_norm": 0.4140625,
"learning_rate": 7.901440055176287e-05,
"loss": 0.3965,
"step": 2395
},
{
"epoch": 0.61,
"grad_norm": 0.3359375,
"learning_rate": 7.857941058005447e-05,
"loss": 0.444,
"step": 2400
},
{
"epoch": 0.61,
"grad_norm": 0.375,
"learning_rate": 7.814484500029581e-05,
"loss": 0.3988,
"step": 2405
},
{
"epoch": 0.61,
"grad_norm": 0.35546875,
"learning_rate": 7.771071242224693e-05,
"loss": 0.4253,
"step": 2410
},
{
"epoch": 0.62,
"grad_norm": 0.390625,
"learning_rate": 7.727702144708899e-05,
"loss": 0.445,
"step": 2415
},
{
"epoch": 0.62,
"grad_norm": 0.265625,
"learning_rate": 7.684378066725406e-05,
"loss": 0.4292,
"step": 2420
},
{
"epoch": 0.62,
"grad_norm": 0.341796875,
"learning_rate": 7.641099866625472e-05,
"loss": 0.4002,
"step": 2425
},
{
"epoch": 0.62,
"grad_norm": 0.30859375,
"learning_rate": 7.597868401851411e-05,
"loss": 0.388,
"step": 2430
},
{
"epoch": 0.62,
"grad_norm": 0.33203125,
"learning_rate": 7.554684528919598e-05,
"loss": 0.4417,
"step": 2435
},
{
"epoch": 0.62,
"grad_norm": 0.341796875,
"learning_rate": 7.511549103403505e-05,
"loss": 0.3966,
"step": 2440
},
{
"epoch": 0.62,
"grad_norm": 0.265625,
"learning_rate": 7.468462979916744e-05,
"loss": 0.3563,
"step": 2445
},
{
"epoch": 0.62,
"grad_norm": 0.423828125,
"learning_rate": 7.425427012096142e-05,
"loss": 0.3639,
"step": 2450
},
{
"epoch": 0.63,
"grad_norm": 0.291015625,
"learning_rate": 7.382442052584821e-05,
"loss": 0.4633,
"step": 2455
},
{
"epoch": 0.63,
"grad_norm": 0.361328125,
"learning_rate": 7.339508953015316e-05,
"loss": 0.3974,
"step": 2460
},
{
"epoch": 0.63,
"grad_norm": 0.353515625,
"learning_rate": 7.296628563992689e-05,
"loss": 0.4446,
"step": 2465
},
{
"epoch": 0.63,
"grad_norm": 0.37890625,
"learning_rate": 7.253801735077684e-05,
"loss": 0.3914,
"step": 2470
},
{
"epoch": 0.63,
"grad_norm": 0.337890625,
"learning_rate": 7.211029314769897e-05,
"loss": 0.4249,
"step": 2475
},
{
"epoch": 0.63,
"grad_norm": 0.36328125,
"learning_rate": 7.168312150490959e-05,
"loss": 0.41,
"step": 2480
},
{
"epoch": 0.63,
"grad_norm": 0.34765625,
"learning_rate": 7.125651088567751e-05,
"loss": 0.4528,
"step": 2485
},
{
"epoch": 0.63,
"grad_norm": 0.28515625,
"learning_rate": 7.083046974215634e-05,
"loss": 0.38,
"step": 2490
},
{
"epoch": 0.64,
"grad_norm": 0.333984375,
"learning_rate": 7.040500651521708e-05,
"loss": 0.3793,
"step": 2495
},
{
"epoch": 0.64,
"grad_norm": 0.291015625,
"learning_rate": 6.998012963428084e-05,
"loss": 0.3859,
"step": 2500
},
{
"epoch": 0.64,
"grad_norm": 0.330078125,
"learning_rate": 6.95558475171518e-05,
"loss": 0.3929,
"step": 2505
},
{
"epoch": 0.64,
"grad_norm": 0.40234375,
"learning_rate": 6.913216856985056e-05,
"loss": 0.3616,
"step": 2510
},
{
"epoch": 0.64,
"grad_norm": 0.38671875,
"learning_rate": 6.870910118644744e-05,
"loss": 0.3599,
"step": 2515
},
{
"epoch": 0.64,
"grad_norm": 0.37890625,
"learning_rate": 6.828665374889629e-05,
"loss": 0.4315,
"step": 2520
},
{
"epoch": 0.64,
"grad_norm": 0.32421875,
"learning_rate": 6.786483462686835e-05,
"loss": 0.5134,
"step": 2525
},
{
"epoch": 0.65,
"grad_norm": 0.333984375,
"learning_rate": 6.744365217758651e-05,
"loss": 0.3792,
"step": 2530
},
{
"epoch": 0.65,
"grad_norm": 0.41796875,
"learning_rate": 6.702311474565965e-05,
"loss": 0.419,
"step": 2535
},
{
"epoch": 0.65,
"grad_norm": 0.314453125,
"learning_rate": 6.660323066291735e-05,
"loss": 0.4255,
"step": 2540
},
{
"epoch": 0.65,
"grad_norm": 0.3046875,
"learning_rate": 6.618400824824482e-05,
"loss": 0.4608,
"step": 2545
},
{
"epoch": 0.65,
"grad_norm": 0.30859375,
"learning_rate": 6.576545580741811e-05,
"loss": 0.4734,
"step": 2550
},
{
"epoch": 0.65,
"grad_norm": 0.44921875,
"learning_rate": 6.534758163293944e-05,
"loss": 0.4022,
"step": 2555
},
{
"epoch": 0.65,
"grad_norm": 0.375,
"learning_rate": 6.493039400387312e-05,
"loss": 0.393,
"step": 2560
},
{
"epoch": 0.65,
"grad_norm": 0.39453125,
"learning_rate": 6.451390118568124e-05,
"loss": 0.4285,
"step": 2565
},
{
"epoch": 0.66,
"grad_norm": 0.37890625,
"learning_rate": 6.409811143006012e-05,
"loss": 0.3768,
"step": 2570
},
{
"epoch": 0.66,
"grad_norm": 0.33203125,
"learning_rate": 6.368303297477685e-05,
"loss": 0.4258,
"step": 2575
},
{
"epoch": 0.66,
"grad_norm": 0.3671875,
"learning_rate": 6.326867404350587e-05,
"loss": 0.4105,
"step": 2580
},
{
"epoch": 0.66,
"grad_norm": 0.47265625,
"learning_rate": 6.285504284566623e-05,
"loss": 0.3375,
"step": 2585
},
{
"epoch": 0.66,
"grad_norm": 0.357421875,
"learning_rate": 6.244214757625891e-05,
"loss": 0.4004,
"step": 2590
},
{
"epoch": 0.66,
"grad_norm": 0.388671875,
"learning_rate": 6.202999641570438e-05,
"loss": 0.4723,
"step": 2595
},
{
"epoch": 0.66,
"grad_norm": 0.375,
"learning_rate": 6.161859752968059e-05,
"loss": 0.3726,
"step": 2600
},
{
"epoch": 0.66,
"grad_norm": 0.31640625,
"learning_rate": 6.120795906896116e-05,
"loss": 0.3782,
"step": 2605
},
{
"epoch": 0.67,
"grad_norm": 0.376953125,
"learning_rate": 6.079808916925391e-05,
"loss": 0.3922,
"step": 2610
},
{
"epoch": 0.67,
"grad_norm": 0.33203125,
"learning_rate": 6.038899595103968e-05,
"loss": 0.3924,
"step": 2615
},
{
"epoch": 0.67,
"grad_norm": 0.388671875,
"learning_rate": 5.9980687519411436e-05,
"loss": 0.4182,
"step": 2620
},
{
"epoch": 0.67,
"grad_norm": 0.283203125,
"learning_rate": 5.95731719639137e-05,
"loss": 0.483,
"step": 2625
},
{
"epoch": 0.67,
"grad_norm": 0.373046875,
"learning_rate": 5.91664573583822e-05,
"loss": 0.4156,
"step": 2630
},
{
"epoch": 0.67,
"grad_norm": 0.35546875,
"learning_rate": 5.876055176078407e-05,
"loss": 0.4438,
"step": 2635
},
{
"epoch": 0.67,
"grad_norm": 0.298828125,
"learning_rate": 5.8355463213058025e-05,
"loss": 0.4392,
"step": 2640
},
{
"epoch": 0.67,
"grad_norm": 0.33984375,
"learning_rate": 5.795119974095514e-05,
"loss": 0.4561,
"step": 2645
},
{
"epoch": 0.68,
"grad_norm": 0.302734375,
"learning_rate": 5.754776935387982e-05,
"loss": 0.3761,
"step": 2650
},
{
"epoch": 0.68,
"grad_norm": 0.388671875,
"learning_rate": 5.714518004473111e-05,
"loss": 0.3753,
"step": 2655
},
{
"epoch": 0.68,
"grad_norm": 0.345703125,
"learning_rate": 5.674343978974431e-05,
"loss": 0.3991,
"step": 2660
},
{
"epoch": 0.68,
"grad_norm": 0.326171875,
"learning_rate": 5.634255654833297e-05,
"loss": 0.3749,
"step": 2665
},
{
"epoch": 0.68,
"grad_norm": 0.48828125,
"learning_rate": 5.594253826293129e-05,
"loss": 0.3925,
"step": 2670
},
{
"epoch": 0.68,
"grad_norm": 0.33984375,
"learning_rate": 5.554339285883656e-05,
"loss": 0.3903,
"step": 2675
},
{
"epoch": 0.68,
"grad_norm": 0.48046875,
"learning_rate": 5.514512824405228e-05,
"loss": 0.3997,
"step": 2680
},
{
"epoch": 0.68,
"grad_norm": 0.42578125,
"learning_rate": 5.474775230913147e-05,
"loss": 0.4459,
"step": 2685
},
{
"epoch": 0.69,
"grad_norm": 0.453125,
"learning_rate": 5.435127292702033e-05,
"loss": 0.3924,
"step": 2690
},
{
"epoch": 0.69,
"grad_norm": 0.455078125,
"learning_rate": 5.395569795290221e-05,
"loss": 0.4301,
"step": 2695
},
{
"epoch": 0.69,
"grad_norm": 0.314453125,
"learning_rate": 5.356103522404208e-05,
"loss": 0.4262,
"step": 2700
},
{
"epoch": 0.69,
"grad_norm": 0.4140625,
"learning_rate": 5.316729255963118e-05,
"loss": 0.4261,
"step": 2705
},
{
"epoch": 0.69,
"grad_norm": 0.3984375,
"learning_rate": 5.277447776063208e-05,
"loss": 0.3991,
"step": 2710
},
{
"epoch": 0.69,
"grad_norm": 0.384765625,
"learning_rate": 5.238259860962423e-05,
"loss": 0.3679,
"step": 2715
},
{
"epoch": 0.69,
"grad_norm": 0.369140625,
"learning_rate": 5.199166287064967e-05,
"loss": 0.3751,
"step": 2720
},
{
"epoch": 0.69,
"grad_norm": 0.302734375,
"learning_rate": 5.160167828905926e-05,
"loss": 0.4476,
"step": 2725
},
{
"epoch": 0.7,
"grad_norm": 0.359375,
"learning_rate": 5.121265259135921e-05,
"loss": 0.3787,
"step": 2730
},
{
"epoch": 0.7,
"grad_norm": 0.400390625,
"learning_rate": 5.0824593485057946e-05,
"loss": 0.362,
"step": 2735
},
{
"epoch": 0.7,
"grad_norm": 0.38671875,
"learning_rate": 5.043750865851354e-05,
"loss": 0.3852,
"step": 2740
},
{
"epoch": 0.7,
"grad_norm": 0.421875,
"learning_rate": 5.005140578078127e-05,
"loss": 0.3985,
"step": 2745
},
{
"epoch": 0.7,
"grad_norm": 0.353515625,
"learning_rate": 4.966629250146167e-05,
"loss": 0.3807,
"step": 2750
},
{
"epoch": 0.7,
"grad_norm": 0.279296875,
"learning_rate": 4.928217645054909e-05,
"loss": 0.3646,
"step": 2755
},
{
"epoch": 0.7,
"grad_norm": 0.287109375,
"learning_rate": 4.889906523828041e-05,
"loss": 0.3996,
"step": 2760
},
{
"epoch": 0.7,
"grad_norm": 0.3671875,
"learning_rate": 4.851696645498429e-05,
"loss": 0.4218,
"step": 2765
},
{
"epoch": 0.71,
"grad_norm": 0.41015625,
"learning_rate": 4.813588767093088e-05,
"loss": 0.4394,
"step": 2770
},
{
"epoch": 0.71,
"grad_norm": 0.2890625,
"learning_rate": 4.7755836436181676e-05,
"loss": 0.3549,
"step": 2775
},
{
"epoch": 0.71,
"grad_norm": 0.384765625,
"learning_rate": 4.737682028044007e-05,
"loss": 0.4035,
"step": 2780
},
{
"epoch": 0.71,
"grad_norm": 0.322265625,
"learning_rate": 4.699884671290211e-05,
"loss": 0.3727,
"step": 2785
},
{
"epoch": 0.71,
"grad_norm": 0.330078125,
"learning_rate": 4.662192322210771e-05,
"loss": 0.3926,
"step": 2790
},
{
"epoch": 0.71,
"grad_norm": 0.439453125,
"learning_rate": 4.6246057275792356e-05,
"loss": 0.3738,
"step": 2795
},
{
"epoch": 0.71,
"grad_norm": 0.32421875,
"learning_rate": 4.587125632073906e-05,
"loss": 0.4018,
"step": 2800
},
{
"epoch": 0.72,
"grad_norm": 0.416015625,
"learning_rate": 4.54975277826309e-05,
"loss": 0.4074,
"step": 2805
},
{
"epoch": 0.72,
"grad_norm": 0.3046875,
"learning_rate": 4.512487906590385e-05,
"loss": 0.4087,
"step": 2810
},
{
"epoch": 0.72,
"grad_norm": 0.349609375,
"learning_rate": 4.4753317553600084e-05,
"loss": 0.3621,
"step": 2815
},
{
"epoch": 0.72,
"grad_norm": 0.46484375,
"learning_rate": 4.438285060722176e-05,
"loss": 0.4324,
"step": 2820
},
{
"epoch": 0.72,
"grad_norm": 0.37890625,
"learning_rate": 4.401348556658509e-05,
"loss": 0.4352,
"step": 2825
},
{
"epoch": 0.72,
"grad_norm": 0.3046875,
"learning_rate": 4.3645229749674987e-05,
"loss": 0.4221,
"step": 2830
},
{
"epoch": 0.72,
"grad_norm": 0.5078125,
"learning_rate": 4.3278090452499984e-05,
"loss": 0.3938,
"step": 2835
},
{
"epoch": 0.72,
"grad_norm": 0.40625,
"learning_rate": 4.291207494894781e-05,
"loss": 0.4412,
"step": 2840
},
{
"epoch": 0.73,
"grad_norm": 0.34375,
"learning_rate": 4.254719049064121e-05,
"loss": 0.3785,
"step": 2845
},
{
"epoch": 0.73,
"grad_norm": 0.298828125,
"learning_rate": 4.218344430679426e-05,
"loss": 0.4147,
"step": 2850
},
{
"epoch": 0.73,
"grad_norm": 0.435546875,
"learning_rate": 4.182084360406917e-05,
"loss": 0.3998,
"step": 2855
},
{
"epoch": 0.73,
"grad_norm": 0.33984375,
"learning_rate": 4.145939556643348e-05,
"loss": 0.3945,
"step": 2860
},
{
"epoch": 0.73,
"grad_norm": 0.421875,
"learning_rate": 4.109910735501774e-05,
"loss": 0.422,
"step": 2865
},
{
"epoch": 0.73,
"grad_norm": 0.3359375,
"learning_rate": 4.073998610797366e-05,
"loss": 0.4348,
"step": 2870
},
{
"epoch": 0.73,
"grad_norm": 0.2734375,
"learning_rate": 4.0382038940332615e-05,
"loss": 0.408,
"step": 2875
},
{
"epoch": 0.73,
"grad_norm": 0.3125,
"learning_rate": 4.002527294386476e-05,
"loss": 0.3963,
"step": 2880
},
{
"epoch": 0.74,
"grad_norm": 0.357421875,
"learning_rate": 3.966969518693845e-05,
"loss": 0.3804,
"step": 2885
},
{
"epoch": 0.74,
"grad_norm": 0.416015625,
"learning_rate": 3.931531271438027e-05,
"loss": 0.4101,
"step": 2890
},
{
"epoch": 0.74,
"grad_norm": 0.380859375,
"learning_rate": 3.896213254733543e-05,
"loss": 0.3588,
"step": 2895
},
{
"epoch": 0.74,
"grad_norm": 0.271484375,
"learning_rate": 3.861016168312861e-05,
"loss": 0.3458,
"step": 2900
},
{
"epoch": 0.74,
"grad_norm": 0.333984375,
"learning_rate": 3.8259407095125446e-05,
"loss": 0.4511,
"step": 2905
},
{
"epoch": 0.74,
"grad_norm": 0.40234375,
"learning_rate": 3.790987573259422e-05,
"loss": 0.44,
"step": 2910
},
{
"epoch": 0.74,
"grad_norm": 0.37890625,
"learning_rate": 3.756157452056832e-05,
"loss": 0.4312,
"step": 2915
},
{
"epoch": 0.74,
"grad_norm": 0.306640625,
"learning_rate": 3.7214510359708934e-05,
"loss": 0.4258,
"step": 2920
},
{
"epoch": 0.75,
"grad_norm": 0.44140625,
"learning_rate": 3.6868690126168425e-05,
"loss": 0.4092,
"step": 2925
},
{
"epoch": 0.75,
"grad_norm": 0.2890625,
"learning_rate": 3.652412067145402e-05,
"loss": 0.4247,
"step": 2930
},
{
"epoch": 0.75,
"grad_norm": 0.416015625,
"learning_rate": 3.6180808822292077e-05,
"loss": 0.3827,
"step": 2935
},
{
"epoch": 0.75,
"grad_norm": 0.35546875,
"learning_rate": 3.583876138049287e-05,
"loss": 0.4062,
"step": 2940
},
{
"epoch": 0.75,
"grad_norm": 0.306640625,
"learning_rate": 3.549798512281579e-05,
"loss": 0.392,
"step": 2945
},
{
"epoch": 0.75,
"grad_norm": 0.380859375,
"learning_rate": 3.5158486800835114e-05,
"loss": 0.3869,
"step": 2950
},
{
"epoch": 0.75,
"grad_norm": 0.328125,
"learning_rate": 3.482027314080621e-05,
"loss": 0.4215,
"step": 2955
},
{
"epoch": 0.75,
"grad_norm": 0.3515625,
"learning_rate": 3.4483350843532335e-05,
"loss": 0.3947,
"step": 2960
},
{
"epoch": 0.76,
"grad_norm": 0.423828125,
"learning_rate": 3.414772658423176e-05,
"loss": 0.4174,
"step": 2965
},
{
"epoch": 0.76,
"grad_norm": 0.31640625,
"learning_rate": 3.381340701240564e-05,
"loss": 0.4834,
"step": 2970
},
{
"epoch": 0.76,
"grad_norm": 0.3828125,
"learning_rate": 3.348039875170622e-05,
"loss": 0.3834,
"step": 2975
},
{
"epoch": 0.76,
"grad_norm": 0.416015625,
"learning_rate": 3.3148708399805574e-05,
"loss": 0.4661,
"step": 2980
},
{
"epoch": 0.76,
"grad_norm": 0.333984375,
"learning_rate": 3.2818342528264944e-05,
"loss": 0.3878,
"step": 2985
},
{
"epoch": 0.76,
"grad_norm": 0.375,
"learning_rate": 3.248930768240454e-05,
"loss": 0.4531,
"step": 2990
},
{
"epoch": 0.76,
"grad_norm": 0.345703125,
"learning_rate": 3.216161038117379e-05,
"loss": 0.388,
"step": 2995
},
{
"epoch": 0.76,
"grad_norm": 0.396484375,
"learning_rate": 3.1835257117022276e-05,
"loss": 0.4092,
"step": 3000
},
{
"epoch": 0.77,
"grad_norm": 0.431640625,
"learning_rate": 3.151025435577106e-05,
"loss": 0.4103,
"step": 3005
},
{
"epoch": 0.77,
"grad_norm": 0.3125,
"learning_rate": 3.118660853648457e-05,
"loss": 0.4309,
"step": 3010
},
{
"epoch": 0.77,
"grad_norm": 0.490234375,
"learning_rate": 3.0864326071343064e-05,
"loss": 0.361,
"step": 3015
},
{
"epoch": 0.77,
"grad_norm": 0.322265625,
"learning_rate": 3.0543413345515526e-05,
"loss": 0.3627,
"step": 3020
},
{
"epoch": 0.77,
"grad_norm": 0.37109375,
"learning_rate": 3.022387671703325e-05,
"loss": 0.444,
"step": 3025
},
{
"epoch": 0.77,
"grad_norm": 0.322265625,
"learning_rate": 2.9905722516663803e-05,
"loss": 0.44,
"step": 3030
},
{
"epoch": 0.77,
"grad_norm": 0.314453125,
"learning_rate": 2.9588957047785614e-05,
"loss": 0.3949,
"step": 3035
},
{
"epoch": 0.78,
"grad_norm": 0.265625,
"learning_rate": 2.9273586586263103e-05,
"loss": 0.4309,
"step": 3040
},
{
"epoch": 0.78,
"grad_norm": 0.3671875,
"learning_rate": 2.8959617380322336e-05,
"loss": 0.4052,
"step": 3045
},
{
"epoch": 0.78,
"grad_norm": 0.38671875,
"learning_rate": 2.8647055650427214e-05,
"loss": 0.4693,
"step": 3050
},
{
"epoch": 0.78,
"grad_norm": 0.2578125,
"learning_rate": 2.8335907589156264e-05,
"loss": 0.3854,
"step": 3055
},
{
"epoch": 0.78,
"grad_norm": 0.4140625,
"learning_rate": 2.802617936107993e-05,
"loss": 0.3751,
"step": 3060
},
{
"epoch": 0.78,
"grad_norm": 0.302734375,
"learning_rate": 2.7717877102638446e-05,
"loss": 0.3518,
"step": 3065
},
{
"epoch": 0.78,
"grad_norm": 0.353515625,
"learning_rate": 2.7411006922020244e-05,
"loss": 0.3935,
"step": 3070
},
{
"epoch": 0.78,
"grad_norm": 0.40625,
"learning_rate": 2.7105574899040942e-05,
"loss": 0.3661,
"step": 3075
},
{
"epoch": 0.79,
"grad_norm": 0.3359375,
"learning_rate": 2.6801587085022906e-05,
"loss": 0.3446,
"step": 3080
},
{
"epoch": 0.79,
"grad_norm": 0.404296875,
"learning_rate": 2.6499049502675344e-05,
"loss": 0.4103,
"step": 3085
},
{
"epoch": 0.79,
"grad_norm": 0.32421875,
"learning_rate": 2.6197968145974993e-05,
"loss": 0.4104,
"step": 3090
},
{
"epoch": 0.79,
"grad_norm": 0.36328125,
"learning_rate": 2.589834898004735e-05,
"loss": 0.3658,
"step": 3095
},
{
"epoch": 0.79,
"grad_norm": 0.392578125,
"learning_rate": 2.5600197941048442e-05,
"loss": 0.4091,
"step": 3100
},
{
"epoch": 0.79,
"grad_norm": 0.26171875,
"learning_rate": 2.5303520936047366e-05,
"loss": 0.4013,
"step": 3105
},
{
"epoch": 0.79,
"grad_norm": 0.3828125,
"learning_rate": 2.5008323842909075e-05,
"loss": 0.3581,
"step": 3110
},
{
"epoch": 0.79,
"grad_norm": 0.296875,
"learning_rate": 2.471461251017808e-05,
"loss": 0.3613,
"step": 3115
},
{
"epoch": 0.8,
"grad_norm": 0.4375,
"learning_rate": 2.4422392756962452e-05,
"loss": 0.3939,
"step": 3120
},
{
"epoch": 0.8,
"grad_norm": 0.42578125,
"learning_rate": 2.4131670372818603e-05,
"loss": 0.402,
"step": 3125
},
{
"epoch": 0.8,
"grad_norm": 0.34765625,
"learning_rate": 2.3842451117636566e-05,
"loss": 0.4436,
"step": 3130
},
{
"epoch": 0.8,
"grad_norm": 0.31640625,
"learning_rate": 2.355474072152588e-05,
"loss": 0.3995,
"step": 3135
},
{
"epoch": 0.8,
"grad_norm": 0.291015625,
"learning_rate": 2.3268544884702037e-05,
"loss": 0.3673,
"step": 3140
},
{
"epoch": 0.8,
"grad_norm": 0.345703125,
"learning_rate": 2.2983869277373604e-05,
"loss": 0.3658,
"step": 3145
},
{
"epoch": 0.8,
"grad_norm": 0.330078125,
"learning_rate": 2.270071953962981e-05,
"loss": 0.3789,
"step": 3150
},
{
"epoch": 0.8,
"grad_norm": 0.34375,
"learning_rate": 2.2419101281328857e-05,
"loss": 0.442,
"step": 3155
},
{
"epoch": 0.81,
"grad_norm": 0.45703125,
"learning_rate": 2.2139020081986783e-05,
"loss": 0.3913,
"step": 3160
},
{
"epoch": 0.81,
"grad_norm": 0.4140625,
"learning_rate": 2.186048149066684e-05,
"loss": 0.3532,
"step": 3165
},
{
"epoch": 0.81,
"grad_norm": 0.255859375,
"learning_rate": 2.158349102586964e-05,
"loss": 0.4629,
"step": 3170
},
{
"epoch": 0.81,
"grad_norm": 0.326171875,
"learning_rate": 2.130805417542384e-05,
"loss": 0.3224,
"step": 3175
},
{
"epoch": 0.81,
"grad_norm": 0.326171875,
"learning_rate": 2.103417639637729e-05,
"loss": 0.4609,
"step": 3180
},
{
"epoch": 0.81,
"grad_norm": 0.3515625,
"learning_rate": 2.0761863114889002e-05,
"loss": 0.4257,
"step": 3185
},
{
"epoch": 0.81,
"grad_norm": 0.4140625,
"learning_rate": 2.0491119726121667e-05,
"loss": 0.4391,
"step": 3190
},
{
"epoch": 0.81,
"grad_norm": 0.3359375,
"learning_rate": 2.0221951594134713e-05,
"loss": 0.3992,
"step": 3195
},
{
"epoch": 0.82,
"grad_norm": 0.353515625,
"learning_rate": 1.995436405177804e-05,
"loss": 0.3755,
"step": 3200
},
{
"epoch": 0.82,
"grad_norm": 0.345703125,
"learning_rate": 1.968836240058638e-05,
"loss": 0.3586,
"step": 3205
},
{
"epoch": 0.82,
"grad_norm": 0.4296875,
"learning_rate": 1.9423951910674265e-05,
"loss": 0.4388,
"step": 3210
},
{
"epoch": 0.82,
"grad_norm": 0.37890625,
"learning_rate": 1.916113782063159e-05,
"loss": 0.422,
"step": 3215
},
{
"epoch": 0.82,
"grad_norm": 0.35546875,
"learning_rate": 1.8899925337419844e-05,
"loss": 0.4077,
"step": 3220
},
{
"epoch": 0.82,
"grad_norm": 0.3125,
"learning_rate": 1.864031963626893e-05,
"loss": 0.3461,
"step": 3225
},
{
"epoch": 0.82,
"grad_norm": 0.384765625,
"learning_rate": 1.8382325860574656e-05,
"loss": 0.3394,
"step": 3230
},
{
"epoch": 0.82,
"grad_norm": 0.375,
"learning_rate": 1.8125949121796805e-05,
"loss": 0.3918,
"step": 3235
},
{
"epoch": 0.83,
"grad_norm": 0.40625,
"learning_rate": 1.787119449935789e-05,
"loss": 0.4278,
"step": 3240
},
{
"epoch": 0.83,
"grad_norm": 0.41796875,
"learning_rate": 1.7618067040542507e-05,
"loss": 0.4128,
"step": 3245
},
{
"epoch": 0.83,
"grad_norm": 0.326171875,
"learning_rate": 1.736657176039732e-05,
"loss": 0.3997,
"step": 3250
},
{
"epoch": 0.83,
"grad_norm": 0.291015625,
"learning_rate": 1.7116713641631744e-05,
"loss": 0.3856,
"step": 3255
},
{
"epoch": 0.83,
"grad_norm": 0.328125,
"learning_rate": 1.6868497634519185e-05,
"loss": 0.3838,
"step": 3260
},
{
"epoch": 0.83,
"grad_norm": 0.357421875,
"learning_rate": 1.6621928656798967e-05,
"loss": 0.4358,
"step": 3265
},
{
"epoch": 0.83,
"grad_norm": 0.47265625,
"learning_rate": 1.637701159357895e-05,
"loss": 0.4258,
"step": 3270
},
{
"epoch": 0.84,
"grad_norm": 0.2734375,
"learning_rate": 1.6133751297238654e-05,
"loss": 0.4668,
"step": 3275
},
{
"epoch": 0.84,
"grad_norm": 0.31640625,
"learning_rate": 1.589215258733322e-05,
"loss": 0.4056,
"step": 3280
},
{
"epoch": 0.84,
"grad_norm": 0.310546875,
"learning_rate": 1.565222025049785e-05,
"loss": 0.3928,
"step": 3285
},
{
"epoch": 0.84,
"grad_norm": 0.30078125,
"learning_rate": 1.541395904035301e-05,
"loss": 0.3993,
"step": 3290
},
{
"epoch": 0.84,
"grad_norm": 0.412109375,
"learning_rate": 1.5177373677410245e-05,
"loss": 0.4256,
"step": 3295
},
{
"epoch": 0.84,
"grad_norm": 0.447265625,
"learning_rate": 1.4942468848978642e-05,
"loss": 0.3931,
"step": 3300
},
{
"epoch": 0.84,
"grad_norm": 0.42578125,
"learning_rate": 1.4709249209071974e-05,
"loss": 0.4088,
"step": 3305
},
{
"epoch": 0.84,
"grad_norm": 0.423828125,
"learning_rate": 1.4477719378316469e-05,
"loss": 0.376,
"step": 3310
},
{
"epoch": 0.85,
"grad_norm": 0.5,
"learning_rate": 1.424788394385933e-05,
"loss": 0.4155,
"step": 3315
},
{
"epoch": 0.85,
"grad_norm": 0.33203125,
"learning_rate": 1.4019747459277777e-05,
"loss": 0.4336,
"step": 3320
},
{
"epoch": 0.85,
"grad_norm": 0.427734375,
"learning_rate": 1.379331444448886e-05,
"loss": 0.4389,
"step": 3325
},
{
"epoch": 0.85,
"grad_norm": 0.369140625,
"learning_rate": 1.3568589385659914e-05,
"loss": 0.3889,
"step": 3330
},
{
"epoch": 0.85,
"grad_norm": 0.369140625,
"learning_rate": 1.334557673511969e-05,
"loss": 0.3689,
"step": 3335
},
{
"epoch": 0.85,
"grad_norm": 0.27734375,
"learning_rate": 1.3124280911270114e-05,
"loss": 0.4072,
"step": 3340
},
{
"epoch": 0.85,
"grad_norm": 0.29296875,
"learning_rate": 1.290470629849876e-05,
"loss": 0.3905,
"step": 3345
},
{
"epoch": 0.85,
"grad_norm": 0.337890625,
"learning_rate": 1.2686857247091987e-05,
"loss": 0.3706,
"step": 3350
},
{
"epoch": 0.86,
"grad_norm": 0.4609375,
"learning_rate": 1.2470738073148769e-05,
"loss": 0.3561,
"step": 3355
},
{
"epoch": 0.86,
"grad_norm": 0.37109375,
"learning_rate": 1.225635305849513e-05,
"loss": 0.3786,
"step": 3360
},
{
"epoch": 0.86,
"grad_norm": 0.416015625,
"learning_rate": 1.2043706450599391e-05,
"loss": 0.3541,
"step": 3365
},
{
"epoch": 0.86,
"grad_norm": 0.349609375,
"learning_rate": 1.1832802462487902e-05,
"loss": 0.3837,
"step": 3370
},
{
"epoch": 0.86,
"grad_norm": 0.359375,
"learning_rate": 1.1623645272661698e-05,
"loss": 0.3629,
"step": 3375
},
{
"epoch": 0.86,
"grad_norm": 0.265625,
"learning_rate": 1.1416239025013653e-05,
"loss": 0.371,
"step": 3380
},
{
"epoch": 0.86,
"grad_norm": 0.353515625,
"learning_rate": 1.1210587828746355e-05,
"loss": 0.3975,
"step": 3385
},
{
"epoch": 0.86,
"grad_norm": 0.296875,
"learning_rate": 1.1006695758290753e-05,
"loss": 0.3682,
"step": 3390
},
{
"epoch": 0.87,
"grad_norm": 0.3046875,
"learning_rate": 1.0804566853225383e-05,
"loss": 0.4048,
"step": 3395
},
{
"epoch": 0.87,
"grad_norm": 0.369140625,
"learning_rate": 1.0604205118196342e-05,
"loss": 0.3566,
"step": 3400
},
{
"epoch": 0.87,
"grad_norm": 0.42578125,
"learning_rate": 1.0405614522837992e-05,
"loss": 0.3956,
"step": 3405
},
{
"epoch": 0.87,
"grad_norm": 0.34765625,
"learning_rate": 1.0208799001694247e-05,
"loss": 0.4019,
"step": 3410
},
{
"epoch": 0.87,
"grad_norm": 0.421875,
"learning_rate": 1.0013762454140663e-05,
"loss": 0.4616,
"step": 3415
},
{
"epoch": 0.87,
"grad_norm": 0.29296875,
"learning_rate": 9.820508744307177e-06,
"loss": 0.3845,
"step": 3420
},
{
"epoch": 0.87,
"grad_norm": 0.443359375,
"learning_rate": 9.629041701001507e-06,
"loss": 0.4296,
"step": 3425
},
{
"epoch": 0.87,
"grad_norm": 0.3203125,
"learning_rate": 9.439365117633404e-06,
"loss": 0.3861,
"step": 3430
},
{
"epoch": 0.88,
"grad_norm": 0.47265625,
"learning_rate": 9.251482752139352e-06,
"loss": 0.3431,
"step": 3435
},
{
"epoch": 0.88,
"grad_norm": 0.447265625,
"learning_rate": 9.065398326908215e-06,
"loss": 0.3941,
"step": 3440
},
{
"epoch": 0.88,
"grad_norm": 0.39453125,
"learning_rate": 8.881115528707462e-06,
"loss": 0.3596,
"step": 3445
},
{
"epoch": 0.88,
"grad_norm": 0.333984375,
"learning_rate": 8.698638008610094e-06,
"loss": 0.4148,
"step": 3450
},
{
"epoch": 0.88,
"grad_norm": 0.43359375,
"learning_rate": 8.517969381922364e-06,
"loss": 0.3717,
"step": 3455
},
{
"epoch": 0.88,
"grad_norm": 0.416015625,
"learning_rate": 8.339113228112083e-06,
"loss": 0.4585,
"step": 3460
},
{
"epoch": 0.88,
"grad_norm": 0.306640625,
"learning_rate": 8.162073090737776e-06,
"loss": 0.4115,
"step": 3465
},
{
"epoch": 0.88,
"grad_norm": 0.34375,
"learning_rate": 7.986852477378404e-06,
"loss": 0.3699,
"step": 3470
},
{
"epoch": 0.89,
"grad_norm": 0.322265625,
"learning_rate": 7.813454859563929e-06,
"loss": 0.4215,
"step": 3475
},
{
"epoch": 0.89,
"grad_norm": 0.330078125,
"learning_rate": 7.641883672706496e-06,
"loss": 0.3779,
"step": 3480
},
{
"epoch": 0.89,
"grad_norm": 0.375,
"learning_rate": 7.472142316032382e-06,
"loss": 0.3675,
"step": 3485
},
{
"epoch": 0.89,
"grad_norm": 0.349609375,
"learning_rate": 7.304234152514655e-06,
"loss": 0.4172,
"step": 3490
},
{
"epoch": 0.89,
"grad_norm": 0.328125,
"learning_rate": 7.138162508806556e-06,
"loss": 0.423,
"step": 3495
},
{
"epoch": 0.89,
"grad_norm": 0.310546875,
"learning_rate": 6.973930675175533e-06,
"loss": 0.368,
"step": 3500
},
{
"epoch": 0.89,
"grad_norm": 0.291015625,
"learning_rate": 6.811541905438145e-06,
"loss": 0.4112,
"step": 3505
},
{
"epoch": 0.89,
"grad_norm": 0.37109375,
"learning_rate": 6.6509994168955204e-06,
"loss": 0.4059,
"step": 3510
},
{
"epoch": 0.9,
"grad_norm": 0.46484375,
"learning_rate": 6.492306390269642e-06,
"loss": 0.3527,
"step": 3515
},
{
"epoch": 0.9,
"grad_norm": 0.279296875,
"learning_rate": 6.335465969640331e-06,
"loss": 0.3475,
"step": 3520
},
{
"epoch": 0.9,
"grad_norm": 0.455078125,
"learning_rate": 6.180481262382964e-06,
"loss": 0.3912,
"step": 3525
},
{
"epoch": 0.9,
"grad_norm": 0.41015625,
"learning_rate": 6.027355339106877e-06,
"loss": 0.428,
"step": 3530
},
{
"epoch": 0.9,
"grad_norm": 0.369140625,
"learning_rate": 5.876091233594572e-06,
"loss": 0.4253,
"step": 3535
},
{
"epoch": 0.9,
"grad_norm": 0.369140625,
"learning_rate": 5.726691942741569e-06,
"loss": 0.4701,
"step": 3540
},
{
"epoch": 0.9,
"grad_norm": 0.400390625,
"learning_rate": 5.579160426497065e-06,
"loss": 0.3716,
"step": 3545
},
{
"epoch": 0.91,
"grad_norm": 0.51171875,
"learning_rate": 5.4334996078052705e-06,
"loss": 0.3864,
"step": 3550
},
{
"epoch": 0.91,
"grad_norm": 0.400390625,
"learning_rate": 5.289712372547495e-06,
"loss": 0.4586,
"step": 3555
},
{
"epoch": 0.91,
"grad_norm": 0.310546875,
"learning_rate": 5.147801569485e-06,
"loss": 0.4011,
"step": 3560
},
{
"epoch": 0.91,
"grad_norm": 0.353515625,
"learning_rate": 5.007770010202528e-06,
"loss": 0.3688,
"step": 3565
},
{
"epoch": 0.91,
"grad_norm": 0.357421875,
"learning_rate": 4.8696204690526155e-06,
"loss": 0.3555,
"step": 3570
},
{
"epoch": 0.91,
"grad_norm": 0.427734375,
"learning_rate": 4.733355683100604e-06,
"loss": 0.3783,
"step": 3575
},
{
"epoch": 0.91,
"grad_norm": 0.42578125,
"learning_rate": 4.598978352070437e-06,
"loss": 0.386,
"step": 3580
},
{
"epoch": 0.91,
"grad_norm": 0.423828125,
"learning_rate": 4.466491138291185e-06,
"loss": 0.3876,
"step": 3585
},
{
"epoch": 0.92,
"grad_norm": 0.3984375,
"learning_rate": 4.3358966666442455e-06,
"loss": 0.4083,
"step": 3590
},
{
"epoch": 0.92,
"grad_norm": 0.369140625,
"learning_rate": 4.207197524511375e-06,
"loss": 0.3974,
"step": 3595
},
{
"epoch": 0.92,
"grad_norm": 0.4296875,
"learning_rate": 4.0803962617234405e-06,
"loss": 0.3811,
"step": 3600
},
{
"epoch": 0.92,
"grad_norm": 0.34375,
"learning_rate": 3.955495390509878e-06,
"loss": 0.3775,
"step": 3605
},
{
"epoch": 0.92,
"grad_norm": 0.45703125,
"learning_rate": 3.8324973854489074e-06,
"loss": 0.446,
"step": 3610
},
{
"epoch": 0.92,
"grad_norm": 0.412109375,
"learning_rate": 3.71140468341854e-06,
"loss": 0.4419,
"step": 3615
},
{
"epoch": 0.92,
"grad_norm": 0.41015625,
"learning_rate": 3.592219683548259e-06,
"loss": 0.3783,
"step": 3620
},
{
"epoch": 0.92,
"grad_norm": 0.388671875,
"learning_rate": 3.474944747171538e-06,
"loss": 0.4398,
"step": 3625
},
{
"epoch": 0.93,
"grad_norm": 0.357421875,
"learning_rate": 3.35958219777901e-06,
"loss": 0.4204,
"step": 3630
},
{
"epoch": 0.93,
"grad_norm": 0.431640625,
"learning_rate": 3.246134320972438e-06,
"loss": 0.4019,
"step": 3635
},
{
"epoch": 0.93,
"grad_norm": 0.34765625,
"learning_rate": 3.134603364419475e-06,
"loss": 0.3955,
"step": 3640
},
{
"epoch": 0.93,
"grad_norm": 0.275390625,
"learning_rate": 3.0249915378090854e-06,
"loss": 0.414,
"step": 3645
},
{
"epoch": 0.93,
"grad_norm": 0.341796875,
"learning_rate": 2.9173010128077826e-06,
"loss": 0.3865,
"step": 3650
},
{
"epoch": 0.93,
"grad_norm": 0.294921875,
"learning_rate": 2.811533923016607e-06,
"loss": 0.4735,
"step": 3655
},
{
"epoch": 0.93,
"grad_norm": 0.310546875,
"learning_rate": 2.7076923639288486e-06,
"loss": 0.3737,
"step": 3660
},
{
"epoch": 0.93,
"grad_norm": 0.345703125,
"learning_rate": 2.605778392888547e-06,
"loss": 0.3935,
"step": 3665
},
{
"epoch": 0.94,
"grad_norm": 0.353515625,
"learning_rate": 2.505794029049702e-06,
"loss": 0.3963,
"step": 3670
},
{
"epoch": 0.94,
"grad_norm": 0.353515625,
"learning_rate": 2.4077412533362707e-06,
"loss": 0.4122,
"step": 3675
},
{
"epoch": 0.94,
"grad_norm": 0.36328125,
"learning_rate": 2.3116220084029694e-06,
"loss": 0.3266,
"step": 3680
},
{
"epoch": 0.94,
"grad_norm": 0.34765625,
"learning_rate": 2.217438198596733e-06,
"loss": 0.4134,
"step": 3685
},
{
"epoch": 0.94,
"grad_norm": 0.388671875,
"learning_rate": 2.125191689918993e-06,
"loss": 0.4304,
"step": 3690
},
{
"epoch": 0.94,
"grad_norm": 0.3828125,
"learning_rate": 2.034884309988727e-06,
"loss": 0.4402,
"step": 3695
},
{
"epoch": 0.94,
"grad_norm": 0.3046875,
"learning_rate": 1.9465178480062353e-06,
"loss": 0.4104,
"step": 3700
},
{
"epoch": 0.94,
"grad_norm": 0.298828125,
"learning_rate": 1.860094054717687e-06,
"loss": 0.4293,
"step": 3705
},
{
"epoch": 0.95,
"grad_norm": 0.287109375,
"learning_rate": 1.7756146423804742e-06,
"loss": 0.317,
"step": 3710
},
{
"epoch": 0.95,
"grad_norm": 0.388671875,
"learning_rate": 1.6930812847292255e-06,
"loss": 0.3738,
"step": 3715
},
{
"epoch": 0.95,
"grad_norm": 0.3515625,
"learning_rate": 1.6124956169426886e-06,
"loss": 0.4004,
"step": 3720
},
{
"epoch": 0.95,
"grad_norm": 0.39453125,
"learning_rate": 1.5338592356113124e-06,
"loss": 0.4346,
"step": 3725
},
{
"epoch": 0.95,
"grad_norm": 0.24609375,
"learning_rate": 1.4571736987056495e-06,
"loss": 0.4255,
"step": 3730
},
{
"epoch": 0.95,
"grad_norm": 0.341796875,
"learning_rate": 1.3824405255454475e-06,
"loss": 0.3787,
"step": 3735
},
{
"epoch": 0.95,
"grad_norm": 0.390625,
"learning_rate": 1.3096611967695517e-06,
"loss": 0.3445,
"step": 3740
},
{
"epoch": 0.95,
"grad_norm": 0.431640625,
"learning_rate": 1.2388371543066045e-06,
"loss": 0.4093,
"step": 3745
},
{
"epoch": 0.96,
"grad_norm": 0.3671875,
"learning_rate": 1.1699698013464488e-06,
"loss": 0.3977,
"step": 3750
},
{
"epoch": 0.96,
"grad_norm": 0.380859375,
"learning_rate": 1.1030605023123364e-06,
"loss": 0.373,
"step": 3755
},
{
"epoch": 0.96,
"grad_norm": 0.34765625,
"learning_rate": 1.0381105828338844e-06,
"loss": 0.3833,
"step": 3760
},
{
"epoch": 0.96,
"grad_norm": 0.275390625,
"learning_rate": 9.751213297208184e-07,
"loss": 0.434,
"step": 3765
},
{
"epoch": 0.96,
"grad_norm": 0.35546875,
"learning_rate": 9.140939909375034e-07,
"loss": 0.3993,
"step": 3770
},
{
"epoch": 0.96,
"grad_norm": 0.275390625,
"learning_rate": 8.550297755781644e-07,
"loss": 0.3842,
"step": 3775
},
{
"epoch": 0.96,
"grad_norm": 0.345703125,
"learning_rate": 7.979298538429935e-07,
"loss": 0.462,
"step": 3780
},
{
"epoch": 0.97,
"grad_norm": 0.408203125,
"learning_rate": 7.427953570149027e-07,
"loss": 0.3928,
"step": 3785
},
{
"epoch": 0.97,
"grad_norm": 0.3359375,
"learning_rate": 6.896273774371742e-07,
"loss": 0.3982,
"step": 3790
},
{
"epoch": 0.97,
"grad_norm": 0.314453125,
"learning_rate": 6.384269684917676e-07,
"loss": 0.4198,
"step": 3795
},
{
"epoch": 0.97,
"grad_norm": 0.380859375,
"learning_rate": 5.891951445784583e-07,
"loss": 0.3316,
"step": 3800
},
{
"epoch": 0.97,
"grad_norm": 0.279296875,
"learning_rate": 5.419328810947755e-07,
"loss": 0.4272,
"step": 3805
},
{
"epoch": 0.97,
"grad_norm": 0.34375,
"learning_rate": 4.966411144166405e-07,
"loss": 0.3763,
"step": 3810
},
{
"epoch": 0.97,
"grad_norm": 0.326171875,
"learning_rate": 4.533207418798147e-07,
"loss": 0.3747,
"step": 3815
},
{
"epoch": 0.97,
"grad_norm": 0.28125,
"learning_rate": 4.119726217621578e-07,
"loss": 0.4468,
"step": 3820
},
{
"epoch": 0.98,
"grad_norm": 0.41015625,
"learning_rate": 3.7259757326657543e-07,
"loss": 0.3989,
"step": 3825
},
{
"epoch": 0.98,
"grad_norm": 0.306640625,
"learning_rate": 3.351963765048427e-07,
"loss": 0.3995,
"step": 3830
},
{
"epoch": 0.98,
"grad_norm": 0.43359375,
"learning_rate": 2.997697724820725e-07,
"loss": 0.4069,
"step": 3835
},
{
"epoch": 0.98,
"grad_norm": 0.384765625,
"learning_rate": 2.663184630821158e-07,
"loss": 0.397,
"step": 3840
},
{
"epoch": 0.98,
"grad_norm": 0.287109375,
"learning_rate": 2.3484311105360645e-07,
"loss": 0.383,
"step": 3845
},
{
"epoch": 0.98,
"grad_norm": 0.333984375,
"learning_rate": 2.053443399968602e-07,
"loss": 0.3835,
"step": 3850
},
{
"epoch": 0.98,
"grad_norm": 0.337890625,
"learning_rate": 1.778227343514627e-07,
"loss": 0.4508,
"step": 3855
},
{
"epoch": 0.98,
"grad_norm": 0.3203125,
"learning_rate": 1.522788393847785e-07,
"loss": 0.4136,
"step": 3860
},
{
"epoch": 0.99,
"grad_norm": 0.375,
"learning_rate": 1.287131611810599e-07,
"loss": 0.3933,
"step": 3865
},
{
"epoch": 0.99,
"grad_norm": 0.365234375,
"learning_rate": 1.0712616663149932e-07,
"loss": 0.3702,
"step": 3870
},
{
"epoch": 0.99,
"grad_norm": 0.302734375,
"learning_rate": 8.751828342491442e-08,
"loss": 0.3742,
"step": 3875
},
{
"epoch": 0.99,
"grad_norm": 0.349609375,
"learning_rate": 6.988990003929941e-08,
"loss": 0.4224,
"step": 3880
},
{
"epoch": 0.99,
"grad_norm": 0.376953125,
"learning_rate": 5.4241365734153396e-08,
"loss": 0.4584,
"step": 3885
},
{
"epoch": 0.99,
"grad_norm": 0.380859375,
"learning_rate": 4.057299054350816e-08,
"loss": 0.4101,
"step": 3890
},
{
"epoch": 0.99,
"grad_norm": 0.33984375,
"learning_rate": 2.8885045269833044e-08,
"loss": 0.4152,
"step": 3895
},
{
"epoch": 0.99,
"grad_norm": 0.294921875,
"learning_rate": 1.9177761478639255e-08,
"loss": 0.4205,
"step": 3900
},
{
"epoch": 1.0,
"grad_norm": 0.41015625,
"learning_rate": 1.1451331493916862e-08,
"loss": 0.3096,
"step": 3905
},
{
"epoch": 1.0,
"grad_norm": 0.3125,
"learning_rate": 5.7059083943045154e-09,
"loss": 0.3654,
"step": 3910
},
{
"epoch": 1.0,
"grad_norm": 0.4921875,
"learning_rate": 1.941606010069652e-09,
"loss": 0.4881,
"step": 3915
},
{
"epoch": 1.0,
"grad_norm": 0.32421875,
"learning_rate": 1.5849892083252828e-10,
"loss": 0.406,
"step": 3920
},
{
"epoch": 1.0,
"eval_loss": 0.39284008741378784,
"eval_runtime": 450.3981,
"eval_samples_per_second": 3.71,
"eval_steps_per_second": 0.464,
"step": 3922
},
{
"epoch": 1.0,
"step": 3922,
"total_flos": 2.757410325375484e+18,
"train_loss": 0.47386226463779874,
"train_runtime": 32073.4099,
"train_samples_per_second": 0.978,
"train_steps_per_second": 0.122
}
],
"logging_steps": 5,
"max_steps": 3922,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 2.757410325375484e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}