bart-indo-small / trainer_state.json
Gaduh Hartawan
first commit
6a48c5e
raw
history blame
No virus
78.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9902828495388996,
"eval_steps": 500,
"global_step": 48000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"grad_norm": 13.376683235168457,
"learning_rate": 0.00001,
"loss": 4.2362,
"step": 100
},
{
"epoch": 0,
"grad_norm": 2.2011091709136963,
"learning_rate": 0.00002,
"loss": 0.6427,
"step": 200
},
{
"epoch": 0.01,
"grad_norm": 2.1116416454315186,
"learning_rate": 0.00003,
"loss": 0.5047,
"step": 300
},
{
"epoch": 0.01,
"grad_norm": 2.325719118118286,
"learning_rate": 0.00004,
"loss": 0.4955,
"step": 400
},
{
"epoch": 0.01,
"grad_norm": 1.2606059312820435,
"learning_rate": 0.00005,
"loss": 0.5104,
"step": 500
},
{
"epoch": 0.01,
"grad_norm": 1.9306057691574097,
"learning_rate": 0.000049895770361259934,
"loss": 0.5024,
"step": 600
},
{
"epoch": 0.01,
"grad_norm": 1.3757009506225586,
"learning_rate": 0.00004979154072251986,
"loss": 0.4814,
"step": 700
},
{
"epoch": 0.02,
"grad_norm": 1.6575660705566406,
"learning_rate": 0.00004968731108377979,
"loss": 0.4675,
"step": 800
},
{
"epoch": 0.02,
"grad_norm": 1.6257671117782593,
"learning_rate": 0.000049583081445039714,
"loss": 0.4673,
"step": 900
},
{
"epoch": 0.02,
"grad_norm": 1.1650177240371704,
"learning_rate": 0.000049478851806299645,
"loss": 0.4635,
"step": 1000
},
{
"epoch": 0.02,
"grad_norm": 1.2291123867034912,
"learning_rate": 0.00004937462216755957,
"loss": 0.4645,
"step": 1100
},
{
"epoch": 0.02,
"grad_norm": 1.094923973083496,
"learning_rate": 0.0000492703925288195,
"loss": 0.463,
"step": 1200
},
{
"epoch": 0.03,
"grad_norm": 1.4243600368499756,
"learning_rate": 0.000049166162890079426,
"loss": 0.4305,
"step": 1300
},
{
"epoch": 0.03,
"grad_norm": 1.5323872566223145,
"learning_rate": 0.00004906193325133936,
"loss": 0.4431,
"step": 1400
},
{
"epoch": 0.03,
"grad_norm": 1.4820982217788696,
"learning_rate": 0.00004895770361259928,
"loss": 0.4301,
"step": 1500
},
{
"epoch": 0.03,
"grad_norm": 1.6124228239059448,
"learning_rate": 0.00004885347397385921,
"loss": 0.4537,
"step": 1600
},
{
"epoch": 0.04,
"grad_norm": 1.5138214826583862,
"learning_rate": 0.00004874924433511914,
"loss": 0.4101,
"step": 1700
},
{
"epoch": 0.04,
"grad_norm": 2.3704726696014404,
"learning_rate": 0.00004864501469637907,
"loss": 0.4113,
"step": 1800
},
{
"epoch": 0.04,
"grad_norm": 1.4032459259033203,
"learning_rate": 0.00004854078505763899,
"loss": 0.4297,
"step": 1900
},
{
"epoch": 0.04,
"grad_norm": 1.3240689039230347,
"learning_rate": 0.000048436555418898924,
"loss": 0.4193,
"step": 2000
},
{
"epoch": 0.04,
"grad_norm": 1.411370038986206,
"learning_rate": 0.00004833232578015885,
"loss": 0.4118,
"step": 2100
},
{
"epoch": 0.05,
"grad_norm": 1.4880633354187012,
"learning_rate": 0.00004822809614141878,
"loss": 0.4284,
"step": 2200
},
{
"epoch": 0.05,
"grad_norm": 1.3598105907440186,
"learning_rate": 0.000048123866502678705,
"loss": 0.4119,
"step": 2300
},
{
"epoch": 0.05,
"grad_norm": 1.2027459144592285,
"learning_rate": 0.000048019636863938636,
"loss": 0.4073,
"step": 2400
},
{
"epoch": 0.05,
"grad_norm": 1.35155189037323,
"learning_rate": 0.00004791540722519856,
"loss": 0.4134,
"step": 2500
},
{
"epoch": 0.05,
"grad_norm": 1.8489034175872803,
"learning_rate": 0.00004781117758645849,
"loss": 0.4069,
"step": 2600
},
{
"epoch": 0.06,
"grad_norm": 1.2636560201644897,
"learning_rate": 0.000047706947947718416,
"loss": 0.4224,
"step": 2700
},
{
"epoch": 0.06,
"grad_norm": 1.4396322965621948,
"learning_rate": 0.00004760271830897835,
"loss": 0.3951,
"step": 2800
},
{
"epoch": 0.06,
"grad_norm": 1.3303143978118896,
"learning_rate": 0.00004749848867023827,
"loss": 0.4111,
"step": 2900
},
{
"epoch": 0.06,
"grad_norm": 1.2054022550582886,
"learning_rate": 0.0000473942590314982,
"loss": 0.4155,
"step": 3000
},
{
"epoch": 0.06,
"grad_norm": 1.1086294651031494,
"learning_rate": 0.00004729002939275813,
"loss": 0.3912,
"step": 3100
},
{
"epoch": 0.07,
"grad_norm": 1.0423736572265625,
"learning_rate": 0.00004718579975401806,
"loss": 0.4174,
"step": 3200
},
{
"epoch": 0.07,
"grad_norm": 1.1115955114364624,
"learning_rate": 0.000047081570115277984,
"loss": 0.4293,
"step": 3300
},
{
"epoch": 0.07,
"grad_norm": 1.1089532375335693,
"learning_rate": 0.000046977340476537915,
"loss": 0.3957,
"step": 3400
},
{
"epoch": 0.07,
"grad_norm": 1.1033278703689575,
"learning_rate": 0.00004687311083779784,
"loss": 0.3913,
"step": 3500
},
{
"epoch": 0.07,
"grad_norm": 1.0068538188934326,
"learning_rate": 0.00004676888119905777,
"loss": 0.3931,
"step": 3600
},
{
"epoch": 0.08,
"grad_norm": 1.0825397968292236,
"learning_rate": 0.000046664651560317695,
"loss": 0.4043,
"step": 3700
},
{
"epoch": 0.08,
"grad_norm": 1.1124091148376465,
"learning_rate": 0.000046560421921577626,
"loss": 0.3892,
"step": 3800
},
{
"epoch": 0.08,
"grad_norm": 0.8287498354911804,
"learning_rate": 0.00004645619228283755,
"loss": 0.3902,
"step": 3900
},
{
"epoch": 0.08,
"grad_norm": 1.3230834007263184,
"learning_rate": 0.00004635196264409748,
"loss": 0.3946,
"step": 4000
},
{
"epoch": 0.08,
"grad_norm": 1.1308443546295166,
"learning_rate": 0.00004624773300535741,
"loss": 0.388,
"step": 4100
},
{
"epoch": 0.09,
"grad_norm": 1.016345739364624,
"learning_rate": 0.00004614350336661734,
"loss": 0.3885,
"step": 4200
},
{
"epoch": 0.09,
"grad_norm": 0.7567281723022461,
"learning_rate": 0.00004603927372787726,
"loss": 0.3733,
"step": 4300
},
{
"epoch": 0.09,
"grad_norm": 1.0191072225570679,
"learning_rate": 0.000045935044089137194,
"loss": 0.3755,
"step": 4400
},
{
"epoch": 0.09,
"grad_norm": 0.9752495288848877,
"learning_rate": 0.00004583081445039712,
"loss": 0.3891,
"step": 4500
},
{
"epoch": 0.09,
"grad_norm": 1.063784122467041,
"learning_rate": 0.00004572658481165705,
"loss": 0.3502,
"step": 4600
},
{
"epoch": 0.1,
"grad_norm": 1.4277328252792358,
"learning_rate": 0.000045622355172916974,
"loss": 0.3832,
"step": 4700
},
{
"epoch": 0.1,
"grad_norm": 1.051148533821106,
"learning_rate": 0.000045518125534176906,
"loss": 0.3844,
"step": 4800
},
{
"epoch": 0.1,
"grad_norm": 1.0719451904296875,
"learning_rate": 0.00004541389589543683,
"loss": 0.3591,
"step": 4900
},
{
"epoch": 0.1,
"grad_norm": 1.0701349973678589,
"learning_rate": 0.00004530966625669676,
"loss": 0.3549,
"step": 5000
},
{
"epoch": 0.11,
"grad_norm": 0.9545413255691528,
"learning_rate": 0.000045205436617956686,
"loss": 0.3791,
"step": 5100
},
{
"epoch": 0.11,
"grad_norm": 1.2077405452728271,
"learning_rate": 0.00004510120697921662,
"loss": 0.3794,
"step": 5200
},
{
"epoch": 0.11,
"grad_norm": 1.0325422286987305,
"learning_rate": 0.00004499697734047654,
"loss": 0.3709,
"step": 5300
},
{
"epoch": 0.11,
"grad_norm": 1.231856107711792,
"learning_rate": 0.00004489274770173647,
"loss": 0.375,
"step": 5400
},
{
"epoch": 0.11,
"grad_norm": 1.0496745109558105,
"learning_rate": 0.0000447885180629964,
"loss": 0.3633,
"step": 5500
},
{
"epoch": 0.12,
"grad_norm": 0.9745790362358093,
"learning_rate": 0.00004468428842425633,
"loss": 0.3541,
"step": 5600
},
{
"epoch": 0.12,
"grad_norm": 0.8768660426139832,
"learning_rate": 0.000044580058785516246,
"loss": 0.354,
"step": 5700
},
{
"epoch": 0.12,
"grad_norm": 1.2748613357543945,
"learning_rate": 0.00004447582914677618,
"loss": 0.3463,
"step": 5800
},
{
"epoch": 0.12,
"grad_norm": 1.6291444301605225,
"learning_rate": 0.0000443715995080361,
"loss": 0.3483,
"step": 5900
},
{
"epoch": 0.12,
"grad_norm": 1.1184431314468384,
"learning_rate": 0.000044267369869296034,
"loss": 0.3616,
"step": 6000
},
{
"epoch": 0.13,
"grad_norm": 1.042864441871643,
"learning_rate": 0.00004416314023055596,
"loss": 0.3659,
"step": 6100
},
{
"epoch": 0.13,
"grad_norm": 1.2588552236557007,
"learning_rate": 0.00004405891059181589,
"loss": 0.3685,
"step": 6200
},
{
"epoch": 0.13,
"grad_norm": 0.9592663645744324,
"learning_rate": 0.000043954680953075814,
"loss": 0.3346,
"step": 6300
},
{
"epoch": 0.13,
"grad_norm": 1.0394409894943237,
"learning_rate": 0.000043850451314335745,
"loss": 0.3543,
"step": 6400
},
{
"epoch": 0.13,
"grad_norm": 1.0655689239501953,
"learning_rate": 0.00004374622167559567,
"loss": 0.3608,
"step": 6500
},
{
"epoch": 0.14,
"grad_norm": 1.0429097414016724,
"learning_rate": 0.0000436419920368556,
"loss": 0.3752,
"step": 6600
},
{
"epoch": 0.14,
"grad_norm": 1.105862021446228,
"learning_rate": 0.000043537762398115525,
"loss": 0.3656,
"step": 6700
},
{
"epoch": 0.14,
"grad_norm": 1.396264672279358,
"learning_rate": 0.00004343353275937546,
"loss": 0.3649,
"step": 6800
},
{
"epoch": 0.14,
"grad_norm": 1.1206328868865967,
"learning_rate": 0.00004332930312063538,
"loss": 0.3658,
"step": 6900
},
{
"epoch": 0.14,
"grad_norm": 1.1256790161132812,
"learning_rate": 0.00004322507348189531,
"loss": 0.3604,
"step": 7000
},
{
"epoch": 0.15,
"grad_norm": 1.0484185218811035,
"learning_rate": 0.00004312084384315524,
"loss": 0.3538,
"step": 7100
},
{
"epoch": 0.15,
"grad_norm": 1.1310747861862183,
"learning_rate": 0.00004301661420441517,
"loss": 0.3454,
"step": 7200
},
{
"epoch": 0.15,
"grad_norm": 1.3282368183135986,
"learning_rate": 0.00004291238456567509,
"loss": 0.3531,
"step": 7300
},
{
"epoch": 0.15,
"grad_norm": 1.1081411838531494,
"learning_rate": 0.000042808154926935024,
"loss": 0.3572,
"step": 7400
},
{
"epoch": 0.15,
"grad_norm": 1.0838216543197632,
"learning_rate": 0.00004270392528819495,
"loss": 0.3609,
"step": 7500
},
{
"epoch": 0.16,
"grad_norm": 1.0632649660110474,
"learning_rate": 0.00004259969564945488,
"loss": 0.3555,
"step": 7600
},
{
"epoch": 0.16,
"grad_norm": 1.2421514987945557,
"learning_rate": 0.000042495466010714804,
"loss": 0.3504,
"step": 7700
},
{
"epoch": 0.16,
"grad_norm": 1.0846999883651733,
"learning_rate": 0.000042391236371974736,
"loss": 0.3697,
"step": 7800
},
{
"epoch": 0.16,
"grad_norm": 1.0196987390518188,
"learning_rate": 0.00004228700673323466,
"loss": 0.331,
"step": 7900
},
{
"epoch": 0.17,
"grad_norm": 0.896594226360321,
"learning_rate": 0.00004218277709449459,
"loss": 0.3501,
"step": 8000
},
{
"epoch": 0.17,
"grad_norm": 1.0059070587158203,
"learning_rate": 0.000042078547455754516,
"loss": 0.3414,
"step": 8100
},
{
"epoch": 0.17,
"grad_norm": 0.9597504734992981,
"learning_rate": 0.00004197431781701445,
"loss": 0.3405,
"step": 8200
},
{
"epoch": 0.17,
"grad_norm": 0.9826390743255615,
"learning_rate": 0.00004187008817827437,
"loss": 0.3444,
"step": 8300
},
{
"epoch": 0.17,
"grad_norm": 1.1618746519088745,
"learning_rate": 0.0000417658585395343,
"loss": 0.3428,
"step": 8400
},
{
"epoch": 0.18,
"grad_norm": 1.3454729318618774,
"learning_rate": 0.00004166162890079423,
"loss": 0.3546,
"step": 8500
},
{
"epoch": 0.18,
"grad_norm": 1.0757421255111694,
"learning_rate": 0.00004155739926205416,
"loss": 0.3409,
"step": 8600
},
{
"epoch": 0.18,
"grad_norm": 1.1405227184295654,
"learning_rate": 0.000041453169623314083,
"loss": 0.367,
"step": 8700
},
{
"epoch": 0.18,
"grad_norm": 1.336688756942749,
"learning_rate": 0.000041348939984574015,
"loss": 0.353,
"step": 8800
},
{
"epoch": 0.18,
"grad_norm": 1.0525643825531006,
"learning_rate": 0.00004124471034583394,
"loss": 0.3394,
"step": 8900
},
{
"epoch": 0.19,
"grad_norm": 0.9103542566299438,
"learning_rate": 0.00004114048070709387,
"loss": 0.3542,
"step": 9000
},
{
"epoch": 0.19,
"grad_norm": 1.1108696460723877,
"learning_rate": 0.000041036251068353795,
"loss": 0.3468,
"step": 9100
},
{
"epoch": 0.19,
"grad_norm": 0.9561033248901367,
"learning_rate": 0.000040932021429613726,
"loss": 0.3234,
"step": 9200
},
{
"epoch": 0.19,
"grad_norm": 1.0222822427749634,
"learning_rate": 0.00004082779179087365,
"loss": 0.3557,
"step": 9300
},
{
"epoch": 0.19,
"grad_norm": 0.7020508050918579,
"learning_rate": 0.00004072356215213358,
"loss": 0.3387,
"step": 9400
},
{
"epoch": 0.2,
"grad_norm": 1.1984186172485352,
"learning_rate": 0.00004061933251339351,
"loss": 0.3396,
"step": 9500
},
{
"epoch": 0.2,
"grad_norm": 1.0309916734695435,
"learning_rate": 0.00004051510287465344,
"loss": 0.3361,
"step": 9600
},
{
"epoch": 0.2,
"grad_norm": 0.9544962048530579,
"learning_rate": 0.00004041087323591336,
"loss": 0.3198,
"step": 9700
},
{
"epoch": 0.2,
"grad_norm": 0.924387514591217,
"learning_rate": 0.000040306643597173294,
"loss": 0.346,
"step": 9800
},
{
"epoch": 0.2,
"grad_norm": 0.9332679510116577,
"learning_rate": 0.00004020241395843322,
"loss": 0.3583,
"step": 9900
},
{
"epoch": 0.21,
"grad_norm": 0.8733212351799011,
"learning_rate": 0.00004009818431969315,
"loss": 0.3258,
"step": 10000
},
{
"epoch": 0.21,
"grad_norm": 1.1735975742340088,
"learning_rate": 0.000039993954680953074,
"loss": 0.3319,
"step": 10100
},
{
"epoch": 0.21,
"grad_norm": 1.1871587038040161,
"learning_rate": 0.000039889725042213005,
"loss": 0.3323,
"step": 10200
},
{
"epoch": 0.21,
"grad_norm": 1.0501062870025635,
"learning_rate": 0.00003978549540347294,
"loss": 0.3501,
"step": 10300
},
{
"epoch": 0.21,
"grad_norm": 1.6133193969726562,
"learning_rate": 0.00003968126576473286,
"loss": 0.3353,
"step": 10400
},
{
"epoch": 0.22,
"grad_norm": 1.0860315561294556,
"learning_rate": 0.00003957703612599279,
"loss": 0.3247,
"step": 10500
},
{
"epoch": 0.22,
"grad_norm": 0.9817527532577515,
"learning_rate": 0.00003947280648725272,
"loss": 0.3486,
"step": 10600
},
{
"epoch": 0.22,
"grad_norm": 1.185621738433838,
"learning_rate": 0.00003936857684851265,
"loss": 0.3344,
"step": 10700
},
{
"epoch": 0.22,
"grad_norm": 1.211653709411621,
"learning_rate": 0.00003926434720977257,
"loss": 0.3308,
"step": 10800
},
{
"epoch": 0.22,
"grad_norm": 1.1859573125839233,
"learning_rate": 0.000039160117571032504,
"loss": 0.3462,
"step": 10900
},
{
"epoch": 0.23,
"grad_norm": 0.9363047480583191,
"learning_rate": 0.00003905588793229243,
"loss": 0.3331,
"step": 11000
},
{
"epoch": 0.23,
"grad_norm": 0.8858780860900879,
"learning_rate": 0.00003895165829355236,
"loss": 0.3501,
"step": 11100
},
{
"epoch": 0.23,
"grad_norm": 1.187888264656067,
"learning_rate": 0.000038847428654812284,
"loss": 0.3202,
"step": 11200
},
{
"epoch": 0.23,
"grad_norm": 1.33687424659729,
"learning_rate": 0.000038743199016072216,
"loss": 0.3488,
"step": 11300
},
{
"epoch": 0.24,
"grad_norm": 1.230508804321289,
"learning_rate": 0.00003863896937733214,
"loss": 0.322,
"step": 11400
},
{
"epoch": 0.24,
"grad_norm": 1.2062541246414185,
"learning_rate": 0.00003853473973859207,
"loss": 0.3446,
"step": 11500
},
{
"epoch": 0.24,
"grad_norm": 0.9526596069335938,
"learning_rate": 0.000038430510099851996,
"loss": 0.3167,
"step": 11600
},
{
"epoch": 0.24,
"grad_norm": 0.8020007014274597,
"learning_rate": 0.00003832628046111193,
"loss": 0.3289,
"step": 11700
},
{
"epoch": 0.24,
"grad_norm": 0.8741321563720703,
"learning_rate": 0.00003822205082237185,
"loss": 0.3307,
"step": 11800
},
{
"epoch": 0.25,
"grad_norm": 0.8475280404090881,
"learning_rate": 0.00003811782118363178,
"loss": 0.3116,
"step": 11900
},
{
"epoch": 0.25,
"grad_norm": 0.8570387959480286,
"learning_rate": 0.00003801359154489171,
"loss": 0.3345,
"step": 12000
},
{
"epoch": 0.25,
"grad_norm": 1.237730860710144,
"learning_rate": 0.00003790936190615164,
"loss": 0.3457,
"step": 12100
},
{
"epoch": 0.25,
"grad_norm": 1.1261372566223145,
"learning_rate": 0.00003780513226741156,
"loss": 0.3261,
"step": 12200
},
{
"epoch": 0.25,
"grad_norm": 0.7940207719802856,
"learning_rate": 0.000037700902628671495,
"loss": 0.3272,
"step": 12300
},
{
"epoch": 0.26,
"grad_norm": 1.0150914192199707,
"learning_rate": 0.00003759667298993142,
"loss": 0.3338,
"step": 12400
},
{
"epoch": 0.26,
"grad_norm": 0.8289426565170288,
"learning_rate": 0.00003749244335119135,
"loss": 0.3485,
"step": 12500
},
{
"epoch": 0.26,
"grad_norm": 1.11201012134552,
"learning_rate": 0.000037388213712451275,
"loss": 0.321,
"step": 12600
},
{
"epoch": 0.26,
"grad_norm": 0.7759777903556824,
"learning_rate": 0.000037283984073711206,
"loss": 0.336,
"step": 12700
},
{
"epoch": 0.26,
"grad_norm": 1.2184884548187256,
"learning_rate": 0.00003717975443497113,
"loss": 0.3029,
"step": 12800
},
{
"epoch": 0.27,
"grad_norm": 0.7660321593284607,
"learning_rate": 0.00003707552479623106,
"loss": 0.336,
"step": 12900
},
{
"epoch": 0.27,
"grad_norm": 0.8834217190742493,
"learning_rate": 0.000036971295157490987,
"loss": 0.3184,
"step": 13000
},
{
"epoch": 0.27,
"grad_norm": 1.1337840557098389,
"learning_rate": 0.00003686706551875092,
"loss": 0.3216,
"step": 13100
},
{
"epoch": 0.27,
"grad_norm": 1.0339350700378418,
"learning_rate": 0.00003676283588001084,
"loss": 0.3162,
"step": 13200
},
{
"epoch": 0.27,
"grad_norm": 1.222301721572876,
"learning_rate": 0.000036658606241270774,
"loss": 0.3344,
"step": 13300
},
{
"epoch": 0.28,
"grad_norm": 1.290209412574768,
"learning_rate": 0.0000365543766025307,
"loss": 0.3212,
"step": 13400
},
{
"epoch": 0.28,
"grad_norm": 1.372733235359192,
"learning_rate": 0.00003645014696379063,
"loss": 0.3375,
"step": 13500
},
{
"epoch": 0.28,
"grad_norm": 0.7824010252952576,
"learning_rate": 0.000036345917325050554,
"loss": 0.3393,
"step": 13600
},
{
"epoch": 0.28,
"grad_norm": 1.637102484703064,
"learning_rate": 0.000036241687686310485,
"loss": 0.3318,
"step": 13700
},
{
"epoch": 0.28,
"grad_norm": 0.9548413753509521,
"learning_rate": 0.00003613745804757041,
"loss": 0.3041,
"step": 13800
},
{
"epoch": 0.29,
"grad_norm": 1.3793456554412842,
"learning_rate": 0.00003603322840883034,
"loss": 0.3208,
"step": 13900
},
{
"epoch": 0.29,
"grad_norm": 1.323983073234558,
"learning_rate": 0.000035928998770090266,
"loss": 0.3077,
"step": 14000
},
{
"epoch": 0.29,
"grad_norm": 0.8947316408157349,
"learning_rate": 0.0000358247691313502,
"loss": 0.3081,
"step": 14100
},
{
"epoch": 0.29,
"grad_norm": 1.2341340780258179,
"learning_rate": 0.00003572053949261012,
"loss": 0.3243,
"step": 14200
},
{
"epoch": 0.3,
"grad_norm": 1.1560657024383545,
"learning_rate": 0.00003561630985387005,
"loss": 0.3045,
"step": 14300
},
{
"epoch": 0.3,
"grad_norm": 1.0063599348068237,
"learning_rate": 0.00003551208021512998,
"loss": 0.2984,
"step": 14400
},
{
"epoch": 0.3,
"grad_norm": 0.9587939977645874,
"learning_rate": 0.00003540785057638991,
"loss": 0.3215,
"step": 14500
},
{
"epoch": 0.3,
"grad_norm": 0.9009751081466675,
"learning_rate": 0.00003530362093764983,
"loss": 0.3257,
"step": 14600
},
{
"epoch": 0.3,
"grad_norm": 1.1289176940917969,
"learning_rate": 0.000035199391298909764,
"loss": 0.3151,
"step": 14700
},
{
"epoch": 0.31,
"grad_norm": 1.0544753074645996,
"learning_rate": 0.00003509516166016969,
"loss": 0.3123,
"step": 14800
},
{
"epoch": 0.31,
"grad_norm": 1.6340482234954834,
"learning_rate": 0.00003499093202142962,
"loss": 0.3238,
"step": 14900
},
{
"epoch": 0.31,
"grad_norm": 0.8671633005142212,
"learning_rate": 0.000034886702382689545,
"loss": 0.3174,
"step": 15000
},
{
"epoch": 0.31,
"grad_norm": 0.8871846199035645,
"learning_rate": 0.000034782472743949476,
"loss": 0.3045,
"step": 15100
},
{
"epoch": 0.31,
"grad_norm": 0.9014525413513184,
"learning_rate": 0.0000346782431052094,
"loss": 0.3335,
"step": 15200
},
{
"epoch": 0.32,
"grad_norm": 0.819088876247406,
"learning_rate": 0.00003457401346646933,
"loss": 0.3222,
"step": 15300
},
{
"epoch": 0.32,
"grad_norm": 1.1166553497314453,
"learning_rate": 0.000034469783827729256,
"loss": 0.3303,
"step": 15400
},
{
"epoch": 0.32,
"grad_norm": 1.0952930450439453,
"learning_rate": 0.00003436555418898919,
"loss": 0.3095,
"step": 15500
},
{
"epoch": 0.32,
"grad_norm": 1.186812400817871,
"learning_rate": 0.00003426132455024911,
"loss": 0.3146,
"step": 15600
},
{
"epoch": 0.32,
"grad_norm": 0.9790433049201965,
"learning_rate": 0.00003415709491150904,
"loss": 0.321,
"step": 15700
},
{
"epoch": 0.33,
"grad_norm": 0.9488071203231812,
"learning_rate": 0.00003405286527276897,
"loss": 0.321,
"step": 15800
},
{
"epoch": 0.33,
"grad_norm": 0.9488551616668701,
"learning_rate": 0.00003394863563402889,
"loss": 0.2923,
"step": 15900
},
{
"epoch": 0.33,
"grad_norm": 0.9801424145698547,
"learning_rate": 0.00003384440599528882,
"loss": 0.3131,
"step": 16000
},
{
"epoch": 0.33,
"grad_norm": 1.5585353374481201,
"learning_rate": 0.00003374017635654875,
"loss": 0.3274,
"step": 16100
},
{
"epoch": 0.33,
"grad_norm": 1.0194026231765747,
"learning_rate": 0.00003363594671780867,
"loss": 0.3294,
"step": 16200
},
{
"epoch": 0.34,
"grad_norm": 0.89570552110672,
"learning_rate": 0.000033531717079068604,
"loss": 0.3041,
"step": 16300
},
{
"epoch": 0.34,
"grad_norm": 1.361720085144043,
"learning_rate": 0.00003342748744032853,
"loss": 0.3001,
"step": 16400
},
{
"epoch": 0.34,
"grad_norm": 0.6049641370773315,
"learning_rate": 0.00003332325780158846,
"loss": 0.2993,
"step": 16500
},
{
"epoch": 0.34,
"grad_norm": 1.1714413166046143,
"learning_rate": 0.000033219028162848384,
"loss": 0.3252,
"step": 16600
},
{
"epoch": 0.34,
"grad_norm": 0.6133936643600464,
"learning_rate": 0.000033114798524108315,
"loss": 0.3162,
"step": 16700
},
{
"epoch": 0.35,
"grad_norm": 0.9461352229118347,
"learning_rate": 0.00003301056888536824,
"loss": 0.3,
"step": 16800
},
{
"epoch": 0.35,
"grad_norm": 1.1674548387527466,
"learning_rate": 0.00003290633924662817,
"loss": 0.3053,
"step": 16900
},
{
"epoch": 0.35,
"grad_norm": 0.9445935487747192,
"learning_rate": 0.000032802109607888096,
"loss": 0.319,
"step": 17000
},
{
"epoch": 0.35,
"grad_norm": 0.7129838466644287,
"learning_rate": 0.00003269787996914803,
"loss": 0.3095,
"step": 17100
},
{
"epoch": 0.35,
"grad_norm": 0.9326237440109253,
"learning_rate": 0.00003259365033040795,
"loss": 0.3174,
"step": 17200
},
{
"epoch": 0.36,
"grad_norm": 0.8677362203598022,
"learning_rate": 0.00003248942069166788,
"loss": 0.3056,
"step": 17300
},
{
"epoch": 0.36,
"grad_norm": 0.7561855316162109,
"learning_rate": 0.00003238519105292781,
"loss": 0.2939,
"step": 17400
},
{
"epoch": 0.36,
"grad_norm": 1.0813933610916138,
"learning_rate": 0.00003228096141418774,
"loss": 0.296,
"step": 17500
},
{
"epoch": 0.36,
"grad_norm": 1.1002167463302612,
"learning_rate": 0.00003217673177544766,
"loss": 0.3142,
"step": 17600
},
{
"epoch": 0.37,
"grad_norm": 0.709709882736206,
"learning_rate": 0.000032072502136707594,
"loss": 0.2931,
"step": 17700
},
{
"epoch": 0.37,
"grad_norm": 0.699057936668396,
"learning_rate": 0.00003196827249796752,
"loss": 0.3009,
"step": 17800
},
{
"epoch": 0.37,
"grad_norm": 1.4942712783813477,
"learning_rate": 0.00003186404285922745,
"loss": 0.3203,
"step": 17900
},
{
"epoch": 0.37,
"grad_norm": 1.069948434829712,
"learning_rate": 0.000031759813220487375,
"loss": 0.3118,
"step": 18000
},
{
"epoch": 0.37,
"grad_norm": 0.9980233311653137,
"learning_rate": 0.000031655583581747306,
"loss": 0.2937,
"step": 18100
},
{
"epoch": 0.38,
"grad_norm": 0.743611752986908,
"learning_rate": 0.00003155135394300723,
"loss": 0.3064,
"step": 18200
},
{
"epoch": 0.38,
"grad_norm": 0.8888816237449646,
"learning_rate": 0.00003144712430426716,
"loss": 0.309,
"step": 18300
},
{
"epoch": 0.38,
"grad_norm": 0.9147290587425232,
"learning_rate": 0.000031342894665527086,
"loss": 0.3104,
"step": 18400
},
{
"epoch": 0.38,
"grad_norm": 1.191470742225647,
"learning_rate": 0.00003123866502678702,
"loss": 0.307,
"step": 18500
},
{
"epoch": 0.38,
"grad_norm": 0.8025168776512146,
"learning_rate": 0.00003113443538804694,
"loss": 0.2981,
"step": 18600
},
{
"epoch": 0.39,
"grad_norm": 1.0088508129119873,
"learning_rate": 0.000031030205749306873,
"loss": 0.2946,
"step": 18700
},
{
"epoch": 0.39,
"grad_norm": 0.9163653254508972,
"learning_rate": 0.0000309259761105668,
"loss": 0.314,
"step": 18800
},
{
"epoch": 0.39,
"grad_norm": 0.7615305185317993,
"learning_rate": 0.00003082174647182673,
"loss": 0.2987,
"step": 18900
},
{
"epoch": 0.39,
"grad_norm": 1.1720337867736816,
"learning_rate": 0.000030717516833086654,
"loss": 0.284,
"step": 19000
},
{
"epoch": 0.39,
"grad_norm": 0.9668058156967163,
"learning_rate": 0.000030613287194346585,
"loss": 0.2961,
"step": 19100
},
{
"epoch": 0.4,
"grad_norm": 1.0331960916519165,
"learning_rate": 0.000030509057555606513,
"loss": 0.3126,
"step": 19200
},
{
"epoch": 0.4,
"grad_norm": 1.011672854423523,
"learning_rate": 0.00003040482791686644,
"loss": 0.2909,
"step": 19300
},
{
"epoch": 0.4,
"grad_norm": 0.7572203874588013,
"learning_rate": 0.00003030059827812637,
"loss": 0.3003,
"step": 19400
},
{
"epoch": 0.4,
"grad_norm": 1.2314035892486572,
"learning_rate": 0.000030196368639386297,
"loss": 0.2967,
"step": 19500
},
{
"epoch": 0.4,
"grad_norm": 0.7441611886024475,
"learning_rate": 0.000030092139000646225,
"loss": 0.3081,
"step": 19600
},
{
"epoch": 0.41,
"grad_norm": 1.1014299392700195,
"learning_rate": 0.000029987909361906152,
"loss": 0.3076,
"step": 19700
},
{
"epoch": 0.41,
"grad_norm": 0.8321337699890137,
"learning_rate": 0.00002988367972316608,
"loss": 0.3,
"step": 19800
},
{
"epoch": 0.41,
"grad_norm": 1.0691934823989868,
"learning_rate": 0.000029779450084426008,
"loss": 0.3113,
"step": 19900
},
{
"epoch": 0.41,
"grad_norm": 0.8844496607780457,
"learning_rate": 0.000029675220445685936,
"loss": 0.3135,
"step": 20000
},
{
"epoch": 0.41,
"grad_norm": 0.9948704242706299,
"learning_rate": 0.000029570990806945864,
"loss": 0.2905,
"step": 20100
},
{
"epoch": 0.42,
"grad_norm": 0.7587433457374573,
"learning_rate": 0.000029466761168205792,
"loss": 0.3187,
"step": 20200
},
{
"epoch": 0.42,
"grad_norm": 0.8162456750869751,
"learning_rate": 0.00002936253152946572,
"loss": 0.3101,
"step": 20300
},
{
"epoch": 0.42,
"grad_norm": 0.5907912850379944,
"learning_rate": 0.000029258301890725648,
"loss": 0.2926,
"step": 20400
},
{
"epoch": 0.42,
"grad_norm": 0.9535139799118042,
"learning_rate": 0.000029154072251985576,
"loss": 0.3237,
"step": 20500
},
{
"epoch": 0.42,
"grad_norm": 0.9831104874610901,
"learning_rate": 0.000029049842613245504,
"loss": 0.2922,
"step": 20600
},
{
"epoch": 0.43,
"grad_norm": 0.8171008229255676,
"learning_rate": 0.00002894561297450543,
"loss": 0.3127,
"step": 20700
},
{
"epoch": 0.43,
"grad_norm": 0.9013519287109375,
"learning_rate": 0.00002884138333576536,
"loss": 0.2974,
"step": 20800
},
{
"epoch": 0.43,
"grad_norm": 1.0881197452545166,
"learning_rate": 0.000028737153697025287,
"loss": 0.3033,
"step": 20900
},
{
"epoch": 0.43,
"grad_norm": 1.0165661573410034,
"learning_rate": 0.000028632924058285215,
"loss": 0.2969,
"step": 21000
},
{
"epoch": 0.44,
"grad_norm": 0.9244837760925293,
"learning_rate": 0.000028528694419545143,
"loss": 0.3074,
"step": 21100
},
{
"epoch": 0.44,
"grad_norm": 0.8180138468742371,
"learning_rate": 0.00002842446478080507,
"loss": 0.3022,
"step": 21200
},
{
"epoch": 0.44,
"grad_norm": 0.8913204073905945,
"learning_rate": 0.000028320235142065,
"loss": 0.3041,
"step": 21300
},
{
"epoch": 0.44,
"grad_norm": 0.944990336894989,
"learning_rate": 0.000028216005503324927,
"loss": 0.3034,
"step": 21400
},
{
"epoch": 0.44,
"grad_norm": 0.6920465230941772,
"learning_rate": 0.000028111775864584855,
"loss": 0.287,
"step": 21500
},
{
"epoch": 0.45,
"grad_norm": 0.9530170559883118,
"learning_rate": 0.000028007546225844783,
"loss": 0.2924,
"step": 21600
},
{
"epoch": 0.45,
"grad_norm": 1.0395070314407349,
"learning_rate": 0.00002790331658710471,
"loss": 0.2976,
"step": 21700
},
{
"epoch": 0.45,
"grad_norm": 1.0013209581375122,
"learning_rate": 0.00002779908694836464,
"loss": 0.2765,
"step": 21800
},
{
"epoch": 0.45,
"grad_norm": 0.6239937543869019,
"learning_rate": 0.000027694857309624566,
"loss": 0.3001,
"step": 21900
},
{
"epoch": 0.45,
"grad_norm": 0.6583734154701233,
"learning_rate": 0.000027590627670884494,
"loss": 0.2984,
"step": 22000
},
{
"epoch": 0.46,
"grad_norm": 0.9175625443458557,
"learning_rate": 0.000027486398032144422,
"loss": 0.305,
"step": 22100
},
{
"epoch": 0.46,
"grad_norm": 1.192954182624817,
"learning_rate": 0.00002738216839340435,
"loss": 0.2913,
"step": 22200
},
{
"epoch": 0.46,
"grad_norm": 0.9170815944671631,
"learning_rate": 0.000027277938754664278,
"loss": 0.2886,
"step": 22300
},
{
"epoch": 0.46,
"grad_norm": 0.7114281058311462,
"learning_rate": 0.000027173709115924206,
"loss": 0.2872,
"step": 22400
},
{
"epoch": 0.46,
"grad_norm": 1.1340280771255493,
"learning_rate": 0.000027069479477184134,
"loss": 0.2927,
"step": 22500
},
{
"epoch": 0.47,
"grad_norm": 1.0684434175491333,
"learning_rate": 0.00002696524983844406,
"loss": 0.2762,
"step": 22600
},
{
"epoch": 0.47,
"grad_norm": 0.8208048343658447,
"learning_rate": 0.00002686102019970399,
"loss": 0.2933,
"step": 22700
},
{
"epoch": 0.47,
"grad_norm": 0.9516797065734863,
"learning_rate": 0.000026756790560963917,
"loss": 0.2809,
"step": 22800
},
{
"epoch": 0.47,
"grad_norm": 0.7305499315261841,
"learning_rate": 0.000026652560922223845,
"loss": 0.2968,
"step": 22900
},
{
"epoch": 0.47,
"grad_norm": 0.7523550391197205,
"learning_rate": 0.000026548331283483773,
"loss": 0.289,
"step": 23000
},
{
"epoch": 0.48,
"grad_norm": 1.0975223779678345,
"learning_rate": 0.0000264441016447437,
"loss": 0.2961,
"step": 23100
},
{
"epoch": 0.48,
"grad_norm": 0.906672477722168,
"learning_rate": 0.00002633987200600363,
"loss": 0.2904,
"step": 23200
},
{
"epoch": 0.48,
"grad_norm": 0.8232882022857666,
"learning_rate": 0.000026235642367263557,
"loss": 0.2876,
"step": 23300
},
{
"epoch": 0.48,
"grad_norm": 1.0251737833023071,
"learning_rate": 0.000026131412728523485,
"loss": 0.2842,
"step": 23400
},
{
"epoch": 0.48,
"grad_norm": 1.1119544506072998,
"learning_rate": 0.000026027183089783413,
"loss": 0.2778,
"step": 23500
},
{
"epoch": 0.49,
"grad_norm": 1.1404234170913696,
"learning_rate": 0.00002592295345104334,
"loss": 0.2865,
"step": 23600
},
{
"epoch": 0.49,
"grad_norm": 1.0083096027374268,
"learning_rate": 0.00002581872381230327,
"loss": 0.2923,
"step": 23700
},
{
"epoch": 0.49,
"grad_norm": 1.1562743186950684,
"learning_rate": 0.000025714494173563196,
"loss": 0.3009,
"step": 23800
},
{
"epoch": 0.49,
"grad_norm": 0.7771180272102356,
"learning_rate": 0.000025610264534823124,
"loss": 0.2938,
"step": 23900
},
{
"epoch": 0.5,
"grad_norm": 0.8902438282966614,
"learning_rate": 0.000025506034896083052,
"loss": 0.3102,
"step": 24000
},
{
"epoch": 0.5,
"grad_norm": 0.9011629819869995,
"learning_rate": 0.00002540180525734298,
"loss": 0.2885,
"step": 24100
},
{
"epoch": 0.5,
"grad_norm": 0.8738746643066406,
"learning_rate": 0.000025297575618602908,
"loss": 0.2739,
"step": 24200
},
{
"epoch": 0.5,
"grad_norm": 0.5818448662757874,
"learning_rate": 0.000025193345979862836,
"loss": 0.281,
"step": 24300
},
{
"epoch": 0.5,
"grad_norm": 0.927828311920166,
"learning_rate": 0.000025089116341122764,
"loss": 0.3039,
"step": 24400
},
{
"epoch": 0.51,
"grad_norm": 0.8303672671318054,
"learning_rate": 0.00002498488670238269,
"loss": 0.2921,
"step": 24500
},
{
"epoch": 0.51,
"grad_norm": 1.7229905128479004,
"learning_rate": 0.000024880657063642616,
"loss": 0.3003,
"step": 24600
},
{
"epoch": 0.51,
"grad_norm": 0.898224949836731,
"learning_rate": 0.000024776427424902544,
"loss": 0.3034,
"step": 24700
},
{
"epoch": 0.51,
"grad_norm": 0.7359880805015564,
"learning_rate": 0.000024672197786162472,
"loss": 0.2918,
"step": 24800
},
{
"epoch": 0.51,
"grad_norm": 0.9466255903244019,
"learning_rate": 0.0000245679681474224,
"loss": 0.2982,
"step": 24900
},
{
"epoch": 0.52,
"grad_norm": 0.9300728440284729,
"learning_rate": 0.00002446373850868233,
"loss": 0.3049,
"step": 25000
},
{
"epoch": 0.52,
"grad_norm": 1.2316802740097046,
"learning_rate": 0.00002435950886994226,
"loss": 0.3088,
"step": 25100
},
{
"epoch": 0.52,
"grad_norm": 1.1127874851226807,
"learning_rate": 0.000024255279231202187,
"loss": 0.2844,
"step": 25200
},
{
"epoch": 0.52,
"grad_norm": 1.0291988849639893,
"learning_rate": 0.000024151049592462115,
"loss": 0.2844,
"step": 25300
},
{
"epoch": 0.52,
"grad_norm": 1.0049411058425903,
"learning_rate": 0.000024046819953722043,
"loss": 0.2902,
"step": 25400
},
{
"epoch": 0.53,
"grad_norm": 7.540700912475586,
"learning_rate": 0.00002394259031498197,
"loss": 0.2966,
"step": 25500
},
{
"epoch": 0.53,
"grad_norm": 0.7970338463783264,
"learning_rate": 0.0000238383606762419,
"loss": 0.2714,
"step": 25600
},
{
"epoch": 0.53,
"grad_norm": 0.7852484583854675,
"learning_rate": 0.000023734131037501827,
"loss": 0.3103,
"step": 25700
},
{
"epoch": 0.53,
"grad_norm": 1.1321452856063843,
"learning_rate": 0.000023629901398761754,
"loss": 0.2939,
"step": 25800
},
{
"epoch": 0.53,
"grad_norm": 0.8240513801574707,
"learning_rate": 0.000023525671760021682,
"loss": 0.2933,
"step": 25900
},
{
"epoch": 0.54,
"grad_norm": 0.8263924717903137,
"learning_rate": 0.00002342144212128161,
"loss": 0.2764,
"step": 26000
},
{
"epoch": 0.54,
"grad_norm": 0.48156994581222534,
"learning_rate": 0.000023317212482541538,
"loss": 0.2785,
"step": 26100
},
{
"epoch": 0.54,
"grad_norm": 0.7034128904342651,
"learning_rate": 0.000023212982843801466,
"loss": 0.2884,
"step": 26200
},
{
"epoch": 0.54,
"grad_norm": 0.7832284569740295,
"learning_rate": 0.000023108753205061394,
"loss": 0.2933,
"step": 26300
},
{
"epoch": 0.54,
"grad_norm": 1.0077266693115234,
"learning_rate": 0.000023004523566321322,
"loss": 0.2929,
"step": 26400
},
{
"epoch": 0.55,
"grad_norm": 0.7659132480621338,
"learning_rate": 0.00002290029392758125,
"loss": 0.277,
"step": 26500
},
{
"epoch": 0.55,
"grad_norm": 0.7871528267860413,
"learning_rate": 0.000022796064288841178,
"loss": 0.2995,
"step": 26600
},
{
"epoch": 0.55,
"grad_norm": 0.9168059825897217,
"learning_rate": 0.000022691834650101106,
"loss": 0.3038,
"step": 26700
},
{
"epoch": 0.55,
"grad_norm": 0.6936691403388977,
"learning_rate": 0.000022587605011361033,
"loss": 0.2838,
"step": 26800
},
{
"epoch": 0.55,
"grad_norm": 0.9125056266784668,
"learning_rate": 0.00002248337537262096,
"loss": 0.2832,
"step": 26900
},
{
"epoch": 0.56,
"grad_norm": 0.8433107733726501,
"learning_rate": 0.00002237914573388089,
"loss": 0.2963,
"step": 27000
},
{
"epoch": 0.56,
"grad_norm": 0.6140872836112976,
"learning_rate": 0.000022274916095140817,
"loss": 0.256,
"step": 27100
},
{
"epoch": 0.56,
"grad_norm": 0.9464013576507568,
"learning_rate": 0.000022170686456400745,
"loss": 0.279,
"step": 27200
},
{
"epoch": 0.56,
"grad_norm": 0.6698806881904602,
"learning_rate": 0.000022066456817660673,
"loss": 0.2664,
"step": 27300
},
{
"epoch": 0.57,
"grad_norm": 1.0536932945251465,
"learning_rate": 0.0000219622271789206,
"loss": 0.2768,
"step": 27400
},
{
"epoch": 0.57,
"grad_norm": 0.9017496705055237,
"learning_rate": 0.00002185799754018053,
"loss": 0.2723,
"step": 27500
},
{
"epoch": 0.57,
"grad_norm": 0.9188879728317261,
"learning_rate": 0.000021753767901440457,
"loss": 0.2925,
"step": 27600
},
{
"epoch": 0.57,
"grad_norm": 0.716852068901062,
"learning_rate": 0.000021649538262700385,
"loss": 0.2789,
"step": 27700
},
{
"epoch": 0.57,
"grad_norm": 0.8829745650291443,
"learning_rate": 0.000021545308623960312,
"loss": 0.2613,
"step": 27800
},
{
"epoch": 0.58,
"grad_norm": 0.8111447095870972,
"learning_rate": 0.00002144107898522024,
"loss": 0.2847,
"step": 27900
},
{
"epoch": 0.58,
"grad_norm": 1.069851279258728,
"learning_rate": 0.000021336849346480168,
"loss": 0.2865,
"step": 28000
},
{
"epoch": 0.58,
"grad_norm": 1.1249984502792358,
"learning_rate": 0.000021232619707740096,
"loss": 0.285,
"step": 28100
},
{
"epoch": 0.58,
"grad_norm": 0.8425958156585693,
"learning_rate": 0.000021128390069000024,
"loss": 0.2822,
"step": 28200
},
{
"epoch": 0.58,
"grad_norm": 0.8334409594535828,
"learning_rate": 0.000021024160430259952,
"loss": 0.2915,
"step": 28300
},
{
"epoch": 0.59,
"grad_norm": 0.8733804225921631,
"learning_rate": 0.00002091993079151988,
"loss": 0.2784,
"step": 28400
},
{
"epoch": 0.59,
"grad_norm": 1.1405348777770996,
"learning_rate": 0.000020815701152779808,
"loss": 0.2632,
"step": 28500
},
{
"epoch": 0.59,
"grad_norm": 0.930985689163208,
"learning_rate": 0.000020711471514039732,
"loss": 0.2804,
"step": 28600
},
{
"epoch": 0.59,
"grad_norm": 0.7500423789024353,
"learning_rate": 0.00002060724187529966,
"loss": 0.271,
"step": 28700
},
{
"epoch": 0.59,
"grad_norm": 0.830214262008667,
"learning_rate": 0.000020503012236559588,
"loss": 0.2802,
"step": 28800
},
{
"epoch": 0.6,
"grad_norm": 1.1190361976623535,
"learning_rate": 0.000020398782597819516,
"loss": 0.2978,
"step": 28900
},
{
"epoch": 0.6,
"grad_norm": 0.938105583190918,
"learning_rate": 0.000020294552959079444,
"loss": 0.273,
"step": 29000
},
{
"epoch": 0.6,
"grad_norm": 1.2504827976226807,
"learning_rate": 0.000020190323320339372,
"loss": 0.2802,
"step": 29100
},
{
"epoch": 0.6,
"grad_norm": 0.6229212284088135,
"learning_rate": 0.0000200860936815993,
"loss": 0.2751,
"step": 29200
},
{
"epoch": 0.6,
"grad_norm": 0.917674720287323,
"learning_rate": 0.000019981864042859228,
"loss": 0.2668,
"step": 29300
},
{
"epoch": 0.61,
"grad_norm": 0.7844977378845215,
"learning_rate": 0.000019877634404119155,
"loss": 0.2772,
"step": 29400
},
{
"epoch": 0.61,
"grad_norm": 0.7926586866378784,
"learning_rate": 0.000019773404765379083,
"loss": 0.2851,
"step": 29500
},
{
"epoch": 0.61,
"grad_norm": 0.9423878192901611,
"learning_rate": 0.00001966917512663901,
"loss": 0.2973,
"step": 29600
},
{
"epoch": 0.61,
"grad_norm": 1.1130839586257935,
"learning_rate": 0.00001956494548789894,
"loss": 0.2858,
"step": 29700
},
{
"epoch": 0.61,
"grad_norm": 0.8832184076309204,
"learning_rate": 0.000019460715849158867,
"loss": 0.2714,
"step": 29800
},
{
"epoch": 0.62,
"grad_norm": 0.7974615693092346,
"learning_rate": 0.000019356486210418795,
"loss": 0.2672,
"step": 29900
},
{
"epoch": 0.62,
"grad_norm": 1.0496609210968018,
"learning_rate": 0.000019252256571678723,
"loss": 0.2967,
"step": 30000
},
{
"epoch": 0.62,
"grad_norm": 0.7052696943283081,
"learning_rate": 0.00001914802693293865,
"loss": 0.2682,
"step": 30100
},
{
"epoch": 0.62,
"grad_norm": 0.8177437782287598,
"learning_rate": 0.00001904379729419858,
"loss": 0.2772,
"step": 30200
},
{
"epoch": 0.63,
"grad_norm": 1.1367732286453247,
"learning_rate": 0.000018939567655458507,
"loss": 0.2751,
"step": 30300
},
{
"epoch": 0.63,
"grad_norm": 1.4945389032363892,
"learning_rate": 0.000018835338016718434,
"loss": 0.2845,
"step": 30400
},
{
"epoch": 0.63,
"grad_norm": 1.035019040107727,
"learning_rate": 0.000018731108377978362,
"loss": 0.2881,
"step": 30500
},
{
"epoch": 0.63,
"grad_norm": 0.4365576207637787,
"learning_rate": 0.00001862687873923829,
"loss": 0.2805,
"step": 30600
},
{
"epoch": 0.63,
"grad_norm": 0.6886979341506958,
"learning_rate": 0.000018522649100498218,
"loss": 0.2755,
"step": 30700
},
{
"epoch": 0.64,
"grad_norm": 0.7902783751487732,
"learning_rate": 0.000018418419461758146,
"loss": 0.2746,
"step": 30800
},
{
"epoch": 0.64,
"grad_norm": 1.1486902236938477,
"learning_rate": 0.000018314189823018074,
"loss": 0.2734,
"step": 30900
},
{
"epoch": 0.64,
"grad_norm": 0.8375297784805298,
"learning_rate": 0.000018209960184278002,
"loss": 0.2672,
"step": 31000
},
{
"epoch": 0.64,
"grad_norm": 0.8426914811134338,
"learning_rate": 0.00001810573054553793,
"loss": 0.266,
"step": 31100
},
{
"epoch": 0.64,
"grad_norm": 1.0207959413528442,
"learning_rate": 0.000018001500906797858,
"loss": 0.2879,
"step": 31200
},
{
"epoch": 0.65,
"grad_norm": 0.8999843597412109,
"learning_rate": 0.000017897271268057786,
"loss": 0.2617,
"step": 31300
},
{
"epoch": 0.65,
"grad_norm": 0.7014563083648682,
"learning_rate": 0.000017793041629317713,
"loss": 0.2757,
"step": 31400
},
{
"epoch": 0.65,
"grad_norm": 0.8926547765731812,
"learning_rate": 0.00001768881199057764,
"loss": 0.2805,
"step": 31500
},
{
"epoch": 0.65,
"grad_norm": 0.9726804494857788,
"learning_rate": 0.00001758458235183757,
"loss": 0.2824,
"step": 31600
},
{
"epoch": 0.65,
"grad_norm": 0.949054479598999,
"learning_rate": 0.000017480352713097497,
"loss": 0.2578,
"step": 31700
},
{
"epoch": 0.66,
"grad_norm": 1.0712580680847168,
"learning_rate": 0.000017376123074357425,
"loss": 0.259,
"step": 31800
},
{
"epoch": 0.66,
"grad_norm": 0.8288059234619141,
"learning_rate": 0.000017271893435617353,
"loss": 0.2788,
"step": 31900
},
{
"epoch": 0.66,
"grad_norm": 0.9404420852661133,
"learning_rate": 0.00001716766379687728,
"loss": 0.2766,
"step": 32000
},
{
"epoch": 0.66,
"grad_norm": 0.8590795993804932,
"learning_rate": 0.00001706343415813721,
"loss": 0.2768,
"step": 32100
},
{
"epoch": 0.66,
"grad_norm": 1.1562901735305786,
"learning_rate": 0.000016959204519397137,
"loss": 0.2876,
"step": 32200
},
{
"epoch": 0.67,
"grad_norm": 0.8512832522392273,
"learning_rate": 0.000016854974880657065,
"loss": 0.2599,
"step": 32300
},
{
"epoch": 0.67,
"grad_norm": 0.8616644740104675,
"learning_rate": 0.000016750745241916992,
"loss": 0.2725,
"step": 32400
},
{
"epoch": 0.67,
"grad_norm": 1.4701212644577026,
"learning_rate": 0.00001664651560317692,
"loss": 0.2592,
"step": 32500
},
{
"epoch": 0.67,
"grad_norm": 1.0344711542129517,
"learning_rate": 0.000016542285964436848,
"loss": 0.2782,
"step": 32600
},
{
"epoch": 0.67,
"grad_norm": 0.9090347290039062,
"learning_rate": 0.000016438056325696776,
"loss": 0.2683,
"step": 32700
},
{
"epoch": 0.68,
"grad_norm": 0.9814984798431396,
"learning_rate": 0.000016333826686956704,
"loss": 0.2692,
"step": 32800
},
{
"epoch": 0.68,
"grad_norm": 0.8351295590400696,
"learning_rate": 0.000016229597048216632,
"loss": 0.2707,
"step": 32900
},
{
"epoch": 0.68,
"grad_norm": 0.8099172115325928,
"learning_rate": 0.00001612536740947656,
"loss": 0.2737,
"step": 33000
},
{
"epoch": 0.68,
"grad_norm": 0.8135939240455627,
"learning_rate": 0.000016021137770736488,
"loss": 0.2641,
"step": 33100
},
{
"epoch": 0.68,
"grad_norm": 1.0211602449417114,
"learning_rate": 0.000015916908131996416,
"loss": 0.2719,
"step": 33200
},
{
"epoch": 0.69,
"grad_norm": 1.499255657196045,
"learning_rate": 0.000015812678493256344,
"loss": 0.2774,
"step": 33300
},
{
"epoch": 0.69,
"grad_norm": 1.062286376953125,
"learning_rate": 0.00001570844885451627,
"loss": 0.2631,
"step": 33400
},
{
"epoch": 0.69,
"grad_norm": 0.7480090260505676,
"learning_rate": 0.0000156042192157762,
"loss": 0.2697,
"step": 33500
},
{
"epoch": 0.69,
"grad_norm": 0.8319742679595947,
"learning_rate": 0.000015499989577036127,
"loss": 0.286,
"step": 33600
},
{
"epoch": 0.7,
"grad_norm": 1.1275886297225952,
"learning_rate": 0.000015395759938296052,
"loss": 0.2855,
"step": 33700
},
{
"epoch": 0.7,
"grad_norm": 0.9062207937240601,
"learning_rate": 0.00001529153029955598,
"loss": 0.268,
"step": 33800
},
{
"epoch": 0.7,
"grad_norm": 0.8947410583496094,
"learning_rate": 0.00001518730066081591,
"loss": 0.2679,
"step": 33900
},
{
"epoch": 0.7,
"grad_norm": 1.0549769401550293,
"learning_rate": 0.000015083071022075837,
"loss": 0.2691,
"step": 34000
},
{
"epoch": 0.7,
"grad_norm": 0.8998539447784424,
"learning_rate": 0.000014978841383335765,
"loss": 0.2688,
"step": 34100
},
{
"epoch": 0.71,
"grad_norm": 0.6037629246711731,
"learning_rate": 0.000014874611744595693,
"loss": 0.267,
"step": 34200
},
{
"epoch": 0.71,
"grad_norm": 0.92879718542099,
"learning_rate": 0.000014770382105855621,
"loss": 0.2667,
"step": 34300
},
{
"epoch": 0.71,
"grad_norm": 1.562403678894043,
"learning_rate": 0.000014666152467115549,
"loss": 0.2812,
"step": 34400
},
{
"epoch": 0.71,
"grad_norm": 1.0794057846069336,
"learning_rate": 0.000014561922828375477,
"loss": 0.2691,
"step": 34500
},
{
"epoch": 0.71,
"grad_norm": 1.1239275932312012,
"learning_rate": 0.000014457693189635405,
"loss": 0.2626,
"step": 34600
},
{
"epoch": 0.72,
"grad_norm": 1.2695655822753906,
"learning_rate": 0.000014353463550895332,
"loss": 0.2679,
"step": 34700
},
{
"epoch": 0.72,
"grad_norm": 0.9684453010559082,
"learning_rate": 0.00001424923391215526,
"loss": 0.2689,
"step": 34800
},
{
"epoch": 0.72,
"grad_norm": 0.8536735773086548,
"learning_rate": 0.000014145004273415188,
"loss": 0.2505,
"step": 34900
},
{
"epoch": 0.72,
"grad_norm": 0.8420373797416687,
"learning_rate": 0.000014040774634675116,
"loss": 0.2808,
"step": 35000
},
{
"epoch": 0.72,
"grad_norm": 1.7913522720336914,
"learning_rate": 0.000013936544995935044,
"loss": 0.2713,
"step": 35100
},
{
"epoch": 0.73,
"grad_norm": 0.9944103360176086,
"learning_rate": 0.000013832315357194972,
"loss": 0.2767,
"step": 35200
},
{
"epoch": 0.73,
"grad_norm": 0.647555947303772,
"learning_rate": 0.0000137280857184549,
"loss": 0.2637,
"step": 35300
},
{
"epoch": 0.73,
"grad_norm": 0.6362162828445435,
"learning_rate": 0.000013623856079714828,
"loss": 0.2626,
"step": 35400
},
{
"epoch": 0.73,
"grad_norm": 0.8359599113464355,
"learning_rate": 0.000013519626440974756,
"loss": 0.2819,
"step": 35500
},
{
"epoch": 0.73,
"grad_norm": 0.8736098408699036,
"learning_rate": 0.000013415396802234684,
"loss": 0.2699,
"step": 35600
},
{
"epoch": 0.74,
"grad_norm": 0.8288611173629761,
"learning_rate": 0.000013311167163494611,
"loss": 0.2621,
"step": 35700
},
{
"epoch": 0.74,
"grad_norm": 0.7499838471412659,
"learning_rate": 0.00001320693752475454,
"loss": 0.2692,
"step": 35800
},
{
"epoch": 0.74,
"grad_norm": 1.0514994859695435,
"learning_rate": 0.000013102707886014467,
"loss": 0.2712,
"step": 35900
},
{
"epoch": 0.74,
"grad_norm": 0.8135703206062317,
"learning_rate": 0.000012998478247274395,
"loss": 0.267,
"step": 36000
},
{
"epoch": 0.74,
"grad_norm": 0.780437707901001,
"learning_rate": 0.000012894248608534323,
"loss": 0.2578,
"step": 36100
},
{
"epoch": 0.75,
"grad_norm": 1.0182126760482788,
"learning_rate": 0.000012790018969794251,
"loss": 0.2848,
"step": 36200
},
{
"epoch": 0.75,
"grad_norm": 0.9728681445121765,
"learning_rate": 0.000012685789331054179,
"loss": 0.2772,
"step": 36300
},
{
"epoch": 0.75,
"grad_norm": 0.7952257394790649,
"learning_rate": 0.000012581559692314107,
"loss": 0.261,
"step": 36400
},
{
"epoch": 0.75,
"grad_norm": 0.7164922952651978,
"learning_rate": 0.000012477330053574035,
"loss": 0.2403,
"step": 36500
},
{
"epoch": 0.76,
"grad_norm": 0.6388373970985413,
"learning_rate": 0.000012373100414833963,
"loss": 0.2705,
"step": 36600
},
{
"epoch": 0.76,
"grad_norm": 1.4427376985549927,
"learning_rate": 0.00001226887077609389,
"loss": 0.277,
"step": 36700
},
{
"epoch": 0.76,
"grad_norm": 0.9559366106987,
"learning_rate": 0.000012164641137353818,
"loss": 0.2784,
"step": 36800
},
{
"epoch": 0.76,
"grad_norm": 0.8561313152313232,
"learning_rate": 0.000012060411498613746,
"loss": 0.272,
"step": 36900
},
{
"epoch": 0.76,
"grad_norm": 0.9756980538368225,
"learning_rate": 0.000011956181859873674,
"loss": 0.2644,
"step": 37000
},
{
"epoch": 0.77,
"grad_norm": 1.350045919418335,
"learning_rate": 0.000011851952221133602,
"loss": 0.2726,
"step": 37100
},
{
"epoch": 0.77,
"grad_norm": 1.2612251043319702,
"learning_rate": 0.00001174772258239353,
"loss": 0.2696,
"step": 37200
},
{
"epoch": 0.77,
"grad_norm": 0.9203113913536072,
"learning_rate": 0.000011643492943653458,
"loss": 0.2721,
"step": 37300
},
{
"epoch": 0.77,
"grad_norm": 1.0679327249526978,
"learning_rate": 0.000011539263304913386,
"loss": 0.2738,
"step": 37400
},
{
"epoch": 0.77,
"grad_norm": 0.7020294070243835,
"learning_rate": 0.000011435033666173314,
"loss": 0.2806,
"step": 37500
},
{
"epoch": 0.78,
"grad_norm": 0.9969758987426758,
"learning_rate": 0.000011330804027433242,
"loss": 0.2556,
"step": 37600
},
{
"epoch": 0.78,
"grad_norm": 0.7139135003089905,
"learning_rate": 0.00001122657438869317,
"loss": 0.2739,
"step": 37700
},
{
"epoch": 0.78,
"grad_norm": 1.3120732307434082,
"learning_rate": 0.000011122344749953097,
"loss": 0.2774,
"step": 37800
},
{
"epoch": 0.78,
"grad_norm": 0.9226138591766357,
"learning_rate": 0.000011018115111213025,
"loss": 0.2414,
"step": 37900
},
{
"epoch": 0.78,
"grad_norm": 1.024018406867981,
"learning_rate": 0.000010913885472472953,
"loss": 0.2453,
"step": 38000
},
{
"epoch": 0.79,
"grad_norm": 0.7024143934249878,
"learning_rate": 0.000010809655833732881,
"loss": 0.2681,
"step": 38100
},
{
"epoch": 0.79,
"grad_norm": 1.008748173713684,
"learning_rate": 0.000010705426194992809,
"loss": 0.2656,
"step": 38200
},
{
"epoch": 0.79,
"grad_norm": 0.8661462068557739,
"learning_rate": 0.000010601196556252737,
"loss": 0.2679,
"step": 38300
},
{
"epoch": 0.79,
"grad_norm": 0.9956786632537842,
"learning_rate": 0.000010496966917512665,
"loss": 0.2622,
"step": 38400
},
{
"epoch": 0.79,
"grad_norm": 1.3166755437850952,
"learning_rate": 0.000010392737278772593,
"loss": 0.2595,
"step": 38500
},
{
"epoch": 0.8,
"grad_norm": 1.0491749048233032,
"learning_rate": 0.00001028850764003252,
"loss": 0.2682,
"step": 38600
},
{
"epoch": 0.8,
"grad_norm": 1.0107011795043945,
"learning_rate": 0.000010184278001292448,
"loss": 0.2544,
"step": 38700
},
{
"epoch": 0.8,
"grad_norm": 1.0187588930130005,
"learning_rate": 0.000010080048362552376,
"loss": 0.2611,
"step": 38800
},
{
"epoch": 0.8,
"grad_norm": 0.8718214631080627,
"learning_rate": 0.000009975818723812304,
"loss": 0.2567,
"step": 38900
},
{
"epoch": 0.8,
"grad_norm": 0.7450688481330872,
"learning_rate": 0.000009871589085072232,
"loss": 0.2642,
"step": 39000
},
{
"epoch": 0.81,
"grad_norm": 1.0107316970825195,
"learning_rate": 0.00000976735944633216,
"loss": 0.2613,
"step": 39100
},
{
"epoch": 0.81,
"grad_norm": 0.8210121393203735,
"learning_rate": 0.000009663129807592088,
"loss": 0.2768,
"step": 39200
},
{
"epoch": 0.81,
"grad_norm": 0.8526451587677002,
"learning_rate": 0.000009558900168852016,
"loss": 0.2473,
"step": 39300
},
{
"epoch": 0.81,
"grad_norm": 0.7720713019371033,
"learning_rate": 0.000009454670530111944,
"loss": 0.2715,
"step": 39400
},
{
"epoch": 0.81,
"grad_norm": 0.8657224774360657,
"learning_rate": 0.000009350440891371872,
"loss": 0.2612,
"step": 39500
},
{
"epoch": 0.82,
"grad_norm": 0.9144985675811768,
"learning_rate": 0.0000092462112526318,
"loss": 0.2648,
"step": 39600
},
{
"epoch": 0.82,
"grad_norm": 1.0658880472183228,
"learning_rate": 0.000009141981613891727,
"loss": 0.2661,
"step": 39700
},
{
"epoch": 0.82,
"grad_norm": 0.9904933571815491,
"learning_rate": 0.000009037751975151655,
"loss": 0.2513,
"step": 39800
},
{
"epoch": 0.82,
"grad_norm": 0.9688336253166199,
"learning_rate": 0.000008933522336411583,
"loss": 0.2622,
"step": 39900
},
{
"epoch": 0.83,
"grad_norm": 0.6780712604522705,
"learning_rate": 0.000008829292697671511,
"loss": 0.2665,
"step": 40000
},
{
"epoch": 0.83,
"grad_norm": 0.9881765246391296,
"learning_rate": 0.000008725063058931437,
"loss": 0.2486,
"step": 40100
},
{
"epoch": 0.83,
"grad_norm": 0.9511051774024963,
"learning_rate": 0.000008620833420191365,
"loss": 0.2655,
"step": 40200
},
{
"epoch": 0.83,
"grad_norm": 0.8047460317611694,
"learning_rate": 0.000008516603781451293,
"loss": 0.262,
"step": 40300
},
{
"epoch": 0.83,
"grad_norm": 1.0902456045150757,
"learning_rate": 0.000008412374142711221,
"loss": 0.2681,
"step": 40400
},
{
"epoch": 0.84,
"grad_norm": 0.9015824794769287,
"learning_rate": 0.000008308144503971149,
"loss": 0.2689,
"step": 40500
},
{
"epoch": 0.84,
"grad_norm": 0.9453705549240112,
"learning_rate": 0.000008203914865231077,
"loss": 0.2693,
"step": 40600
},
{
"epoch": 0.84,
"grad_norm": 0.8557701706886292,
"learning_rate": 0.000008099685226491005,
"loss": 0.2753,
"step": 40700
},
{
"epoch": 0.84,
"grad_norm": 1.022003173828125,
"learning_rate": 0.000007995455587750933,
"loss": 0.2646,
"step": 40800
},
{
"epoch": 0.84,
"grad_norm": 0.9399078488349915,
"learning_rate": 0.00000789122594901086,
"loss": 0.2671,
"step": 40900
},
{
"epoch": 0.85,
"grad_norm": 1.1249728202819824,
"learning_rate": 0.000007786996310270788,
"loss": 0.2655,
"step": 41000
},
{
"epoch": 0.85,
"grad_norm": 1.2603721618652344,
"learning_rate": 0.000007682766671530716,
"loss": 0.262,
"step": 41100
},
{
"epoch": 0.85,
"grad_norm": 0.6507100462913513,
"learning_rate": 0.000007578537032790644,
"loss": 0.2648,
"step": 41200
},
{
"epoch": 0.85,
"grad_norm": 1.0216801166534424,
"learning_rate": 0.000007474307394050572,
"loss": 0.2727,
"step": 41300
},
{
"epoch": 0.85,
"grad_norm": 0.8479890823364258,
"learning_rate": 0.0000073700777553105,
"loss": 0.2644,
"step": 41400
},
{
"epoch": 0.86,
"grad_norm": 0.953072726726532,
"learning_rate": 0.000007265848116570428,
"loss": 0.2601,
"step": 41500
},
{
"epoch": 0.86,
"grad_norm": 0.7130771279335022,
"learning_rate": 0.000007161618477830357,
"loss": 0.2634,
"step": 41600
},
{
"epoch": 0.86,
"grad_norm": 1.1879602670669556,
"learning_rate": 0.000007057388839090285,
"loss": 0.2588,
"step": 41700
},
{
"epoch": 0.86,
"grad_norm": 0.9556657075881958,
"learning_rate": 0.0000069531592003502125,
"loss": 0.2673,
"step": 41800
},
{
"epoch": 0.86,
"grad_norm": 1.1229112148284912,
"learning_rate": 0.0000068489295616101404,
"loss": 0.264,
"step": 41900
},
{
"epoch": 0.87,
"grad_norm": 1.2128034830093384,
"learning_rate": 0.000006744699922870068,
"loss": 0.2626,
"step": 42000
},
{
"epoch": 0.87,
"grad_norm": 0.9225343465805054,
"learning_rate": 0.000006640470284129996,
"loss": 0.2713,
"step": 42100
},
{
"epoch": 0.87,
"grad_norm": 1.0495810508728027,
"learning_rate": 0.000006536240645389924,
"loss": 0.2739,
"step": 42200
},
{
"epoch": 0.87,
"grad_norm": 0.5960795879364014,
"learning_rate": 0.000006432011006649852,
"loss": 0.2629,
"step": 42300
},
{
"epoch": 0.87,
"grad_norm": 0.653361439704895,
"learning_rate": 0.00000632778136790978,
"loss": 0.2585,
"step": 42400
},
{
"epoch": 0.88,
"grad_norm": 0.8684765100479126,
"learning_rate": 0.000006223551729169707,
"loss": 0.2426,
"step": 42500
},
{
"epoch": 0.88,
"grad_norm": 0.5730326771736145,
"learning_rate": 0.000006119322090429635,
"loss": 0.2537,
"step": 42600
},
{
"epoch": 0.88,
"grad_norm": 0.8172651529312134,
"learning_rate": 0.000006015092451689563,
"loss": 0.2553,
"step": 42700
},
{
"epoch": 0.88,
"grad_norm": 0.9099143147468567,
"learning_rate": 0.000005910862812949491,
"loss": 0.2569,
"step": 42800
},
{
"epoch": 0.89,
"grad_norm": 0.9645257592201233,
"learning_rate": 0.000005806633174209419,
"loss": 0.2689,
"step": 42900
},
{
"epoch": 0.89,
"grad_norm": 0.6569193005561829,
"learning_rate": 0.0000057024035354693465,
"loss": 0.2662,
"step": 43000
},
{
"epoch": 0.89,
"grad_norm": 1.2637701034545898,
"learning_rate": 0.000005598173896729274,
"loss": 0.2874,
"step": 43100
},
{
"epoch": 0.89,
"grad_norm": 1.0651448965072632,
"learning_rate": 0.000005493944257989202,
"loss": 0.255,
"step": 43200
},
{
"epoch": 0.89,
"grad_norm": 1.1091278791427612,
"learning_rate": 0.000005389714619249129,
"loss": 0.283,
"step": 43300
},
{
"epoch": 0.9,
"grad_norm": 0.9137535095214844,
"learning_rate": 0.000005285484980509057,
"loss": 0.2674,
"step": 43400
},
{
"epoch": 0.9,
"grad_norm": 0.8751276731491089,
"learning_rate": 0.000005181255341768985,
"loss": 0.2638,
"step": 43500
},
{
"epoch": 0.9,
"grad_norm": 0.9242710471153259,
"learning_rate": 0.000005077025703028913,
"loss": 0.2582,
"step": 43600
},
{
"epoch": 0.9,
"grad_norm": 0.7860730886459351,
"learning_rate": 0.000004972796064288841,
"loss": 0.2557,
"step": 43700
},
{
"epoch": 0.9,
"grad_norm": 0.9240506291389465,
"learning_rate": 0.000004868566425548769,
"loss": 0.2591,
"step": 43800
},
{
"epoch": 0.91,
"grad_norm": 1.0865675210952759,
"learning_rate": 0.000004764336786808697,
"loss": 0.2511,
"step": 43900
},
{
"epoch": 0.91,
"grad_norm": 1.0534336566925049,
"learning_rate": 0.0000046601071480686255,
"loss": 0.2651,
"step": 44000
},
{
"epoch": 0.91,
"grad_norm": 0.9301556348800659,
"learning_rate": 0.000004555877509328553,
"loss": 0.2414,
"step": 44100
},
{
"epoch": 0.91,
"grad_norm": 0.9242545366287231,
"learning_rate": 0.000004451647870588481,
"loss": 0.2636,
"step": 44200
},
{
"epoch": 0.91,
"grad_norm": 0.7292113304138184,
"learning_rate": 0.000004347418231848409,
"loss": 0.2672,
"step": 44300
},
{
"epoch": 0.92,
"grad_norm": 1.2003198862075806,
"learning_rate": 0.000004243188593108337,
"loss": 0.2661,
"step": 44400
},
{
"epoch": 0.92,
"grad_norm": 0.9352708458900452,
"learning_rate": 0.000004138958954368265,
"loss": 0.275,
"step": 44500
},
{
"epoch": 0.92,
"grad_norm": 0.7767049074172974,
"learning_rate": 0.000004034729315628192,
"loss": 0.2709,
"step": 44600
},
{
"epoch": 0.92,
"grad_norm": 0.8384830355644226,
"learning_rate": 0.00000393049967688812,
"loss": 0.2655,
"step": 44700
},
{
"epoch": 0.92,
"grad_norm": 0.9187309145927429,
"learning_rate": 0.000003826270038148048,
"loss": 0.2609,
"step": 44800
},
{
"epoch": 0.93,
"grad_norm": 0.8865881562232971,
"learning_rate": 0.0000037220403994079758,
"loss": 0.2677,
"step": 44900
},
{
"epoch": 0.93,
"grad_norm": 1.0104628801345825,
"learning_rate": 0.0000036178107606679037,
"loss": 0.2649,
"step": 45000
},
{
"epoch": 0.93,
"grad_norm": 0.8638792634010315,
"learning_rate": 0.0000035135811219278316,
"loss": 0.2694,
"step": 45100
},
{
"epoch": 0.93,
"grad_norm": 1.0379241704940796,
"learning_rate": 0.0000034093514831877595,
"loss": 0.2489,
"step": 45200
},
{
"epoch": 0.93,
"grad_norm": 1.123955488204956,
"learning_rate": 0.0000033051218444476874,
"loss": 0.2576,
"step": 45300
},
{
"epoch": 0.94,
"grad_norm": 1.1420966386795044,
"learning_rate": 0.0000032008922057076153,
"loss": 0.2706,
"step": 45400
},
{
"epoch": 0.94,
"grad_norm": 1.0519020557403564,
"learning_rate": 0.0000030966625669675427,
"loss": 0.2616,
"step": 45500
},
{
"epoch": 0.94,
"grad_norm": 1.040588140487671,
"learning_rate": 0.0000029924329282274706,
"loss": 0.2546,
"step": 45600
},
{
"epoch": 0.94,
"grad_norm": 0.8306300640106201,
"learning_rate": 0.0000028882032894873985,
"loss": 0.2683,
"step": 45700
},
{
"epoch": 0.94,
"grad_norm": 0.6718655228614807,
"learning_rate": 0.0000027839736507473264,
"loss": 0.2483,
"step": 45800
},
{
"epoch": 0.95,
"grad_norm": 0.8920142650604248,
"learning_rate": 0.0000026797440120072548,
"loss": 0.2612,
"step": 45900
},
{
"epoch": 0.95,
"grad_norm": 1.09241783618927,
"learning_rate": 0.0000025755143732671827,
"loss": 0.2483,
"step": 46000
},
{
"epoch": 0.95,
"grad_norm": 0.9406448602676392,
"learning_rate": 0.0000024712847345271106,
"loss": 0.2639,
"step": 46100
},
{
"epoch": 0.95,
"grad_norm": 0.9145969748497009,
"learning_rate": 0.000002367055095787038,
"loss": 0.2493,
"step": 46200
},
{
"epoch": 0.96,
"grad_norm": 0.722114086151123,
"learning_rate": 0.000002262825457046966,
"loss": 0.266,
"step": 46300
},
{
"epoch": 0.96,
"grad_norm": 0.7077659964561462,
"learning_rate": 0.000002158595818306894,
"loss": 0.2712,
"step": 46400
},
{
"epoch": 0.96,
"grad_norm": 0.7417329549789429,
"learning_rate": 0.0000020543661795668217,
"loss": 0.2435,
"step": 46500
},
{
"epoch": 0.96,
"grad_norm": 0.8229231238365173,
"learning_rate": 0.0000019501365408267496,
"loss": 0.2647,
"step": 46600
},
{
"epoch": 0.96,
"grad_norm": 0.49395614862442017,
"learning_rate": 0.0000018459069020866775,
"loss": 0.2508,
"step": 46700
},
{
"epoch": 0.97,
"grad_norm": 1.0020008087158203,
"learning_rate": 0.0000017416772633466052,
"loss": 0.2505,
"step": 46800
},
{
"epoch": 0.97,
"grad_norm": 0.8525140881538391,
"learning_rate": 0.0000016374476246065331,
"loss": 0.2518,
"step": 46900
},
{
"epoch": 0.97,
"grad_norm": 0.8383646011352539,
"learning_rate": 0.000001533217985866461,
"loss": 0.2768,
"step": 47000
},
{
"epoch": 0.97,
"grad_norm": 0.586855947971344,
"learning_rate": 0.000001428988347126389,
"loss": 0.2379,
"step": 47100
},
{
"epoch": 0.97,
"grad_norm": 0.7666946053504944,
"learning_rate": 0.0000013247587083863168,
"loss": 0.2501,
"step": 47200
},
{
"epoch": 0.98,
"grad_norm": 1.0712182521820068,
"learning_rate": 0.0000012205290696462445,
"loss": 0.2539,
"step": 47300
},
{
"epoch": 0.98,
"grad_norm": 0.9935363531112671,
"learning_rate": 0.0000011162994309061726,
"loss": 0.2603,
"step": 47400
},
{
"epoch": 0.98,
"grad_norm": 0.8433282971382141,
"learning_rate": 0.0000010120697921661005,
"loss": 0.261,
"step": 47500
},
{
"epoch": 0.98,
"grad_norm": 0.942387044429779,
"learning_rate": 9.078401534260282e-7,
"loss": 0.2702,
"step": 47600
},
{
"epoch": 0.98,
"grad_norm": 1.199331521987915,
"learning_rate": 8.036105146859561e-7,
"loss": 0.2452,
"step": 47700
},
{
"epoch": 0.99,
"grad_norm": 0.9567368626594543,
"learning_rate": 6.993808759458839e-7,
"loss": 0.2691,
"step": 47800
},
{
"epoch": 0.99,
"grad_norm": 1.0403625965118408,
"learning_rate": 5.951512372058119e-7,
"loss": 0.2702,
"step": 47900
},
{
"epoch": 0.99,
"grad_norm": 0.9573351740837097,
"learning_rate": 4.909215984657397e-7,
"loss": 0.2539,
"step": 48000
}
],
"logging_steps": 100,
"max_steps": 48471,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 16000,
"total_flos": 104021020901376000,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}