{ "best_metric": 0.6864747405052185, "best_model_checkpoint": "saves/starcoder2-7b/lora/sft/checkpoint-11000", "epoch": 1.7557861133280128, "eval_steps": 100, "global_step": 11000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0, "grad_norm": 0.628385603427887, "learning_rate": 4.999999126897802e-05, "loss": 1.2582, "step": 5 }, { "epoch": 0.0, "grad_norm": 1.0855119228363037, "learning_rate": 4.999996507591817e-05, "loss": 0.801, "step": 10 }, { "epoch": 0.0, "grad_norm": 1.5689586400985718, "learning_rate": 4.9999921420838745e-05, "loss": 1.067, "step": 15 }, { "epoch": 0.0, "grad_norm": 2.0851330757141113, "learning_rate": 4.999986030377024e-05, "loss": 1.2953, "step": 20 }, { "epoch": 0.0, "grad_norm": 1.397479772567749, "learning_rate": 4.999978172475535e-05, "loss": 0.9826, "step": 25 }, { "epoch": 0.0, "grad_norm": 1.344118595123291, "learning_rate": 4.9999685683848954e-05, "loss": 0.9485, "step": 30 }, { "epoch": 0.01, "grad_norm": 1.158163070678711, "learning_rate": 4.9999596278606616e-05, "loss": 0.8103, "step": 35 }, { "epoch": 0.01, "grad_norm": 1.602233648300171, "learning_rate": 4.999946880647276e-05, "loss": 0.8648, "step": 40 }, { "epoch": 0.01, "grad_norm": 1.557242751121521, "learning_rate": 4.999932387266596e-05, "loss": 1.0198, "step": 45 }, { "epoch": 0.01, "grad_norm": 1.36068856716156, "learning_rate": 4.999916147728746e-05, "loss": 0.9367, "step": 50 }, { "epoch": 0.01, "grad_norm": 1.3263639211654663, "learning_rate": 4.999898162045068e-05, "loss": 0.9695, "step": 55 }, { "epoch": 0.01, "grad_norm": 1.333601474761963, "learning_rate": 4.999878430228126e-05, "loss": 1.1509, "step": 60 }, { "epoch": 0.01, "grad_norm": 1.4753800630569458, "learning_rate": 4.999856952291702e-05, "loss": 1.1461, "step": 65 }, { "epoch": 0.01, "grad_norm": 1.5096240043640137, "learning_rate": 4.9998337282507965e-05, "loss": 1.1722, "step": 70 }, { "epoch": 0.01, "grad_norm": 1.189892053604126, "learning_rate": 4.999808758121633e-05, "loss": 1.1834, "step": 75 }, { "epoch": 0.01, "grad_norm": 0.9292634725570679, "learning_rate": 4.999782041921651e-05, "loss": 0.9498, "step": 80 }, { "epoch": 0.01, "grad_norm": 2.1775777339935303, "learning_rate": 4.9997535796695134e-05, "loss": 0.9346, "step": 85 }, { "epoch": 0.01, "grad_norm": 1.6854296922683716, "learning_rate": 4.999723371385099e-05, "loss": 1.119, "step": 90 }, { "epoch": 0.02, "grad_norm": 1.4571490287780762, "learning_rate": 4.999691417089507e-05, "loss": 0.8671, "step": 95 }, { "epoch": 0.02, "grad_norm": 1.277044653892517, "learning_rate": 4.999657716805059e-05, "loss": 1.2469, "step": 100 }, { "epoch": 0.02, "eval_loss": 0.8478816747665405, "eval_runtime": 96.2736, "eval_samples_per_second": 7.24, "eval_steps_per_second": 7.24, "step": 100 }, { "epoch": 0.02, "grad_norm": 0.6687743067741394, "learning_rate": 4.9996222705552933e-05, "loss": 0.735, "step": 105 }, { "epoch": 0.02, "grad_norm": 1.3488354682922363, "learning_rate": 4.9995850783649665e-05, "loss": 0.8344, "step": 110 }, { "epoch": 0.02, "grad_norm": 1.1043323278427124, "learning_rate": 4.9995461402600593e-05, "loss": 0.8254, "step": 115 }, { "epoch": 0.02, "grad_norm": 0.9382895827293396, "learning_rate": 4.9995054562677684e-05, "loss": 0.9179, "step": 120 }, { "epoch": 0.02, "grad_norm": 1.2824612855911255, "learning_rate": 4.9994630264165107e-05, "loss": 0.8663, "step": 125 }, { "epoch": 0.02, "grad_norm": 1.0491925477981567, "learning_rate": 4.999418850735923e-05, "loss": 0.9247, "step": 130 }, { "epoch": 0.02, "grad_norm": 1.3642233610153198, "learning_rate": 4.99937292925686e-05, "loss": 0.8253, "step": 135 }, { "epoch": 0.02, "grad_norm": 3.747757911682129, "learning_rate": 4.9993252620113976e-05, "loss": 1.0245, "step": 140 }, { "epoch": 0.02, "grad_norm": 1.299494981765747, "learning_rate": 4.999275849032832e-05, "loss": 0.8723, "step": 145 }, { "epoch": 0.02, "grad_norm": 1.7195830345153809, "learning_rate": 4.999224690355675e-05, "loss": 1.0524, "step": 150 }, { "epoch": 0.02, "grad_norm": 0.9922987222671509, "learning_rate": 4.9991717860156616e-05, "loss": 0.9502, "step": 155 }, { "epoch": 0.03, "grad_norm": 1.0577458143234253, "learning_rate": 4.9991171360497437e-05, "loss": 1.0115, "step": 160 }, { "epoch": 0.03, "grad_norm": 1.0001195669174194, "learning_rate": 4.999060740496093e-05, "loss": 1.1999, "step": 165 }, { "epoch": 0.03, "grad_norm": 1.2456804513931274, "learning_rate": 4.999002599394102e-05, "loss": 0.8882, "step": 170 }, { "epoch": 0.03, "grad_norm": 1.0445325374603271, "learning_rate": 4.9989427127843814e-05, "loss": 1.0615, "step": 175 }, { "epoch": 0.03, "grad_norm": 1.2410887479782104, "learning_rate": 4.9988810807087584e-05, "loss": 1.1068, "step": 180 }, { "epoch": 0.03, "grad_norm": 0.8935971260070801, "learning_rate": 4.998817703210285e-05, "loss": 0.6683, "step": 185 }, { "epoch": 0.03, "grad_norm": 1.1614488363265991, "learning_rate": 4.9987525803332265e-05, "loss": 0.7446, "step": 190 }, { "epoch": 0.03, "grad_norm": 0.9392004013061523, "learning_rate": 4.998685712123072e-05, "loss": 0.7397, "step": 195 }, { "epoch": 0.03, "grad_norm": 1.0314444303512573, "learning_rate": 4.9986170986265266e-05, "loss": 1.3584, "step": 200 }, { "epoch": 0.03, "eval_loss": 0.8368077278137207, "eval_runtime": 96.5262, "eval_samples_per_second": 7.221, "eval_steps_per_second": 7.221, "step": 200 }, { "epoch": 0.03, "grad_norm": 0.8964811563491821, "learning_rate": 4.998546739891516e-05, "loss": 0.9546, "step": 205 }, { "epoch": 0.03, "grad_norm": 1.0679796934127808, "learning_rate": 4.998474635967185e-05, "loss": 0.864, "step": 210 }, { "epoch": 0.03, "grad_norm": 1.2340985536575317, "learning_rate": 4.998400786903896e-05, "loss": 0.885, "step": 215 }, { "epoch": 0.04, "grad_norm": 1.7219617366790771, "learning_rate": 4.9983251927532315e-05, "loss": 1.1069, "step": 220 }, { "epoch": 0.04, "grad_norm": 1.1480705738067627, "learning_rate": 4.9982478535679924e-05, "loss": 1.0416, "step": 225 }, { "epoch": 0.04, "grad_norm": 1.515589714050293, "learning_rate": 4.9981687694021996e-05, "loss": 1.1844, "step": 230 }, { "epoch": 0.04, "grad_norm": 1.6687963008880615, "learning_rate": 4.998087940311091e-05, "loss": 0.8664, "step": 235 }, { "epoch": 0.04, "grad_norm": 1.9256645441055298, "learning_rate": 4.998005366351125e-05, "loss": 1.0125, "step": 240 }, { "epoch": 0.04, "grad_norm": 1.2500052452087402, "learning_rate": 4.997921047579978e-05, "loss": 1.1374, "step": 245 }, { "epoch": 0.04, "grad_norm": 1.0543216466903687, "learning_rate": 4.9978349840565434e-05, "loss": 0.8502, "step": 250 }, { "epoch": 0.04, "grad_norm": 1.3009012937545776, "learning_rate": 4.997747175840937e-05, "loss": 1.0357, "step": 255 }, { "epoch": 0.04, "grad_norm": 0.8456661105155945, "learning_rate": 4.997657622994491e-05, "loss": 0.6883, "step": 260 }, { "epoch": 0.04, "grad_norm": 0.5856515765190125, "learning_rate": 4.9975663255797555e-05, "loss": 0.7656, "step": 265 }, { "epoch": 0.04, "grad_norm": 0.973818302154541, "learning_rate": 4.997473283660501e-05, "loss": 0.823, "step": 270 }, { "epoch": 0.04, "grad_norm": 0.9960187673568726, "learning_rate": 4.997378497301715e-05, "loss": 0.8726, "step": 275 }, { "epoch": 0.04, "grad_norm": 1.2900679111480713, "learning_rate": 4.997281966569604e-05, "loss": 0.9781, "step": 280 }, { "epoch": 0.05, "grad_norm": 1.828894853591919, "learning_rate": 4.9971836915315926e-05, "loss": 0.8932, "step": 285 }, { "epoch": 0.05, "grad_norm": 1.239621877670288, "learning_rate": 4.9970836722563256e-05, "loss": 1.2022, "step": 290 }, { "epoch": 0.05, "grad_norm": 1.0117149353027344, "learning_rate": 4.996981908813664e-05, "loss": 0.8032, "step": 295 }, { "epoch": 0.05, "grad_norm": 0.8861119747161865, "learning_rate": 4.996878401274687e-05, "loss": 1.0651, "step": 300 }, { "epoch": 0.05, "eval_loss": 0.8281473517417908, "eval_runtime": 96.5283, "eval_samples_per_second": 7.221, "eval_steps_per_second": 7.221, "step": 300 }, { "epoch": 0.05, "grad_norm": 0.8583046197891235, "learning_rate": 4.996773149711693e-05, "loss": 0.8784, "step": 305 }, { "epoch": 0.05, "grad_norm": 2.5717499256134033, "learning_rate": 4.9966661541981984e-05, "loss": 0.8395, "step": 310 }, { "epoch": 0.05, "grad_norm": 0.982342004776001, "learning_rate": 4.9965574148089376e-05, "loss": 0.9869, "step": 315 }, { "epoch": 0.05, "grad_norm": 0.9000777006149292, "learning_rate": 4.9964469316198633e-05, "loss": 0.8435, "step": 320 }, { "epoch": 0.05, "grad_norm": 0.8733209371566772, "learning_rate": 4.9963347047081464e-05, "loss": 0.7281, "step": 325 }, { "epoch": 0.05, "grad_norm": 3.323739767074585, "learning_rate": 4.9962207341521746e-05, "loss": 1.1013, "step": 330 }, { "epoch": 0.05, "grad_norm": 1.7102876901626587, "learning_rate": 4.996105020031554e-05, "loss": 0.8276, "step": 335 }, { "epoch": 0.05, "grad_norm": 0.9196123480796814, "learning_rate": 4.995987562427109e-05, "loss": 0.8274, "step": 340 }, { "epoch": 0.06, "grad_norm": 1.210099458694458, "learning_rate": 4.995868361420883e-05, "loss": 1.3257, "step": 345 }, { "epoch": 0.06, "grad_norm": 0.8923581838607788, "learning_rate": 4.9957474170961335e-05, "loss": 0.6815, "step": 350 }, { "epoch": 0.06, "grad_norm": 0.9576735496520996, "learning_rate": 4.9956247295373396e-05, "loss": 1.23, "step": 355 }, { "epoch": 0.06, "grad_norm": 1.3774089813232422, "learning_rate": 4.995500298830196e-05, "loss": 1.0556, "step": 360 }, { "epoch": 0.06, "grad_norm": 1.1523677110671997, "learning_rate": 4.995374125061614e-05, "loss": 1.1787, "step": 365 }, { "epoch": 0.06, "grad_norm": 0.8310608863830566, "learning_rate": 4.9952462083197246e-05, "loss": 0.8525, "step": 370 }, { "epoch": 0.06, "grad_norm": 0.9814196825027466, "learning_rate": 4.9951165486938765e-05, "loss": 0.8522, "step": 375 }, { "epoch": 0.06, "grad_norm": 0.9878122210502625, "learning_rate": 4.994985146274633e-05, "loss": 0.6618, "step": 380 }, { "epoch": 0.06, "grad_norm": 1.2652586698532104, "learning_rate": 4.994852001153777e-05, "loss": 1.0489, "step": 385 }, { "epoch": 0.06, "grad_norm": 1.2940975427627563, "learning_rate": 4.994717113424307e-05, "loss": 1.104, "step": 390 }, { "epoch": 0.06, "grad_norm": 0.9636249542236328, "learning_rate": 4.99458048318044e-05, "loss": 0.9228, "step": 395 }, { "epoch": 0.06, "grad_norm": 0.8122813105583191, "learning_rate": 4.994442110517611e-05, "loss": 0.9209, "step": 400 }, { "epoch": 0.06, "eval_loss": 0.8184689879417419, "eval_runtime": 96.4572, "eval_samples_per_second": 7.226, "eval_steps_per_second": 7.226, "step": 400 }, { "epoch": 0.06, "grad_norm": 0.8742052912712097, "learning_rate": 4.99430199553247e-05, "loss": 0.9608, "step": 405 }, { "epoch": 0.07, "grad_norm": 0.5679522752761841, "learning_rate": 4.9941601383228835e-05, "loss": 0.5963, "step": 410 }, { "epoch": 0.07, "grad_norm": 1.0234627723693848, "learning_rate": 4.994016538987938e-05, "loss": 0.8642, "step": 415 }, { "epoch": 0.07, "grad_norm": 0.8581897616386414, "learning_rate": 4.993871197627934e-05, "loss": 0.8993, "step": 420 }, { "epoch": 0.07, "grad_norm": 1.4666485786437988, "learning_rate": 4.9937241143443904e-05, "loss": 0.8565, "step": 425 }, { "epoch": 0.07, "grad_norm": 1.1166578531265259, "learning_rate": 4.993575289240041e-05, "loss": 0.881, "step": 430 }, { "epoch": 0.07, "grad_norm": 1.303992748260498, "learning_rate": 4.9934247224188393e-05, "loss": 0.9962, "step": 435 }, { "epoch": 0.07, "grad_norm": 0.9011989235877991, "learning_rate": 4.993272413985952e-05, "loss": 0.9316, "step": 440 }, { "epoch": 0.07, "grad_norm": 0.8321458101272583, "learning_rate": 4.993118364047764e-05, "loss": 0.7889, "step": 445 }, { "epoch": 0.07, "grad_norm": 0.7780352234840393, "learning_rate": 4.992962572711877e-05, "loss": 0.8287, "step": 450 }, { "epoch": 0.07, "grad_norm": 0.9090210199356079, "learning_rate": 4.992805040087108e-05, "loss": 0.7018, "step": 455 }, { "epoch": 0.07, "grad_norm": 0.8694137334823608, "learning_rate": 4.9926457662834906e-05, "loss": 0.8484, "step": 460 }, { "epoch": 0.07, "grad_norm": 0.6327371001243591, "learning_rate": 4.992484751412274e-05, "loss": 0.716, "step": 465 }, { "epoch": 0.08, "grad_norm": 1.200668215751648, "learning_rate": 4.9923219955859254e-05, "loss": 0.9525, "step": 470 }, { "epoch": 0.08, "grad_norm": 0.8530198931694031, "learning_rate": 4.9921574989181266e-05, "loss": 0.744, "step": 475 }, { "epoch": 0.08, "grad_norm": 1.168479323387146, "learning_rate": 4.991991261523775e-05, "loss": 0.729, "step": 480 }, { "epoch": 0.08, "grad_norm": 0.9499714970588684, "learning_rate": 4.9918232835189834e-05, "loss": 0.7725, "step": 485 }, { "epoch": 0.08, "grad_norm": 0.8434467911720276, "learning_rate": 4.991653565021084e-05, "loss": 1.1558, "step": 490 }, { "epoch": 0.08, "grad_norm": 0.7665804624557495, "learning_rate": 4.99148210614862e-05, "loss": 1.0208, "step": 495 }, { "epoch": 0.08, "grad_norm": 0.5782546401023865, "learning_rate": 4.991308907021353e-05, "loss": 0.8306, "step": 500 }, { "epoch": 0.08, "eval_loss": 0.8132078051567078, "eval_runtime": 96.433, "eval_samples_per_second": 7.228, "eval_steps_per_second": 7.228, "step": 500 }, { "epoch": 0.08, "grad_norm": 1.0821778774261475, "learning_rate": 4.9911339677602584e-05, "loss": 0.9503, "step": 505 }, { "epoch": 0.08, "grad_norm": 0.5409029126167297, "learning_rate": 4.99095728848753e-05, "loss": 0.8586, "step": 510 }, { "epoch": 0.08, "grad_norm": 0.9011789560317993, "learning_rate": 4.990778869326575e-05, "loss": 0.7981, "step": 515 }, { "epoch": 0.08, "grad_norm": 1.0092263221740723, "learning_rate": 4.990598710402013e-05, "loss": 1.0174, "step": 520 }, { "epoch": 0.08, "grad_norm": 1.4362307786941528, "learning_rate": 4.9904168118396844e-05, "loss": 0.8373, "step": 525 }, { "epoch": 0.08, "grad_norm": 2.1772639751434326, "learning_rate": 4.9902331737666414e-05, "loss": 0.9599, "step": 530 }, { "epoch": 0.09, "grad_norm": 0.9610542058944702, "learning_rate": 4.990047796311151e-05, "loss": 0.6895, "step": 535 }, { "epoch": 0.09, "grad_norm": 0.9922348260879517, "learning_rate": 4.989860679602698e-05, "loss": 0.7315, "step": 540 }, { "epoch": 0.09, "grad_norm": 1.2409151792526245, "learning_rate": 4.9896718237719785e-05, "loss": 0.8574, "step": 545 }, { "epoch": 0.09, "grad_norm": 1.016333818435669, "learning_rate": 4.9894812289509046e-05, "loss": 1.1248, "step": 550 }, { "epoch": 0.09, "grad_norm": 0.9131489396095276, "learning_rate": 4.989288895272604e-05, "loss": 0.9847, "step": 555 }, { "epoch": 0.09, "grad_norm": 1.215469479560852, "learning_rate": 4.989094822871419e-05, "loss": 0.912, "step": 560 }, { "epoch": 0.09, "grad_norm": 1.0536105632781982, "learning_rate": 4.988899011882903e-05, "loss": 0.8425, "step": 565 }, { "epoch": 0.09, "grad_norm": 1.9705311059951782, "learning_rate": 4.988701462443829e-05, "loss": 0.9385, "step": 570 }, { "epoch": 0.09, "grad_norm": 1.2488442659378052, "learning_rate": 4.98850217469218e-05, "loss": 0.7865, "step": 575 }, { "epoch": 0.09, "grad_norm": 1.7318600416183472, "learning_rate": 4.988301148767157e-05, "loss": 0.8231, "step": 580 }, { "epoch": 0.09, "grad_norm": 0.8247858881950378, "learning_rate": 4.9880983848091704e-05, "loss": 0.8553, "step": 585 }, { "epoch": 0.09, "grad_norm": 0.858172595500946, "learning_rate": 4.987893882959849e-05, "loss": 1.3952, "step": 590 }, { "epoch": 0.09, "grad_norm": 1.2286418676376343, "learning_rate": 4.987687643362033e-05, "loss": 0.837, "step": 595 }, { "epoch": 0.1, "grad_norm": 1.034350872039795, "learning_rate": 4.9874796661597765e-05, "loss": 0.9175, "step": 600 }, { "epoch": 0.1, "eval_loss": 0.8063747882843018, "eval_runtime": 96.4224, "eval_samples_per_second": 7.229, "eval_steps_per_second": 7.229, "step": 600 }, { "epoch": 0.1, "grad_norm": 0.7192366123199463, "learning_rate": 4.987269951498348e-05, "loss": 0.8563, "step": 605 }, { "epoch": 0.1, "grad_norm": 1.2645854949951172, "learning_rate": 4.98705849952423e-05, "loss": 0.6663, "step": 610 }, { "epoch": 0.1, "grad_norm": 1.0610381364822388, "learning_rate": 4.9868453103851176e-05, "loss": 0.8452, "step": 615 }, { "epoch": 0.1, "grad_norm": 0.8550002574920654, "learning_rate": 4.986630384229919e-05, "loss": 0.8894, "step": 620 }, { "epoch": 0.1, "grad_norm": 0.7490519285202026, "learning_rate": 4.986413721208757e-05, "loss": 0.9106, "step": 625 }, { "epoch": 0.1, "grad_norm": 0.557860255241394, "learning_rate": 4.986195321472965e-05, "loss": 0.685, "step": 630 }, { "epoch": 0.1, "grad_norm": 0.7450752258300781, "learning_rate": 4.9859751851750934e-05, "loss": 0.8472, "step": 635 }, { "epoch": 0.1, "grad_norm": 1.176376461982727, "learning_rate": 4.985753312468903e-05, "loss": 1.0197, "step": 640 }, { "epoch": 0.1, "grad_norm": 1.0625300407409668, "learning_rate": 4.985529703509367e-05, "loss": 0.9685, "step": 645 }, { "epoch": 0.1, "grad_norm": 0.8808372616767883, "learning_rate": 4.985304358452672e-05, "loss": 0.8612, "step": 650 }, { "epoch": 0.1, "grad_norm": 0.8110201954841614, "learning_rate": 4.985077277456218e-05, "loss": 0.8401, "step": 655 }, { "epoch": 0.11, "grad_norm": 0.9364888072013855, "learning_rate": 4.984848460678618e-05, "loss": 0.6197, "step": 660 }, { "epoch": 0.11, "grad_norm": 1.0113518238067627, "learning_rate": 4.984617908279694e-05, "loss": 0.9889, "step": 665 }, { "epoch": 0.11, "grad_norm": 1.1148868799209595, "learning_rate": 4.984385620420485e-05, "loss": 0.9558, "step": 670 }, { "epoch": 0.11, "grad_norm": 0.9506175518035889, "learning_rate": 4.984151597263238e-05, "loss": 0.7323, "step": 675 }, { "epoch": 0.11, "grad_norm": 1.0044193267822266, "learning_rate": 4.983915838971415e-05, "loss": 0.7504, "step": 680 }, { "epoch": 0.11, "grad_norm": 2.2674214839935303, "learning_rate": 4.9836783457096875e-05, "loss": 1.032, "step": 685 }, { "epoch": 0.11, "grad_norm": 1.4945333003997803, "learning_rate": 4.983439117643942e-05, "loss": 1.0359, "step": 690 }, { "epoch": 0.11, "grad_norm": 0.9860715866088867, "learning_rate": 4.9831981549412744e-05, "loss": 1.1152, "step": 695 }, { "epoch": 0.11, "grad_norm": 0.8287227153778076, "learning_rate": 4.982955457769992e-05, "loss": 0.8157, "step": 700 }, { "epoch": 0.11, "eval_loss": 0.8022791743278503, "eval_runtime": 96.5324, "eval_samples_per_second": 7.22, "eval_steps_per_second": 7.22, "step": 700 }, { "epoch": 0.11, "grad_norm": 0.9216273427009583, "learning_rate": 4.9827110262996144e-05, "loss": 0.8395, "step": 705 }, { "epoch": 0.11, "grad_norm": 0.7642357349395752, "learning_rate": 4.982464860700874e-05, "loss": 0.8817, "step": 710 }, { "epoch": 0.11, "grad_norm": 0.8851175308227539, "learning_rate": 4.982216961145711e-05, "loss": 0.8558, "step": 715 }, { "epoch": 0.11, "grad_norm": 0.44226109981536865, "learning_rate": 4.98196732780728e-05, "loss": 0.882, "step": 720 }, { "epoch": 0.12, "grad_norm": 0.8005027174949646, "learning_rate": 4.981715960859945e-05, "loss": 0.8835, "step": 725 }, { "epoch": 0.12, "grad_norm": 0.7451304793357849, "learning_rate": 4.981462860479281e-05, "loss": 0.8551, "step": 730 }, { "epoch": 0.12, "grad_norm": 1.1069347858428955, "learning_rate": 4.9812080268420745e-05, "loss": 0.999, "step": 735 }, { "epoch": 0.12, "grad_norm": 0.8892244100570679, "learning_rate": 4.980951460126322e-05, "loss": 1.012, "step": 740 }, { "epoch": 0.12, "grad_norm": 0.8935977816581726, "learning_rate": 4.9806931605112305e-05, "loss": 0.9911, "step": 745 }, { "epoch": 0.12, "grad_norm": 0.8456961512565613, "learning_rate": 4.9804331281772176e-05, "loss": 0.7595, "step": 750 }, { "epoch": 0.12, "grad_norm": 0.78443443775177, "learning_rate": 4.980171363305911e-05, "loss": 0.8308, "step": 755 }, { "epoch": 0.12, "grad_norm": 1.0028038024902344, "learning_rate": 4.979907866080149e-05, "loss": 0.9637, "step": 760 }, { "epoch": 0.12, "grad_norm": 1.1801577806472778, "learning_rate": 4.9796426366839786e-05, "loss": 0.6159, "step": 765 }, { "epoch": 0.12, "grad_norm": 0.8370681405067444, "learning_rate": 4.979375675302659e-05, "loss": 0.9276, "step": 770 }, { "epoch": 0.12, "grad_norm": 0.8605382442474365, "learning_rate": 4.979106982122658e-05, "loss": 1.1077, "step": 775 }, { "epoch": 0.12, "grad_norm": 0.7788259387016296, "learning_rate": 4.978836557331652e-05, "loss": 0.8172, "step": 780 }, { "epoch": 0.13, "grad_norm": 1.4312686920166016, "learning_rate": 4.978564401118528e-05, "loss": 0.8759, "step": 785 }, { "epoch": 0.13, "grad_norm": 0.9109662175178528, "learning_rate": 4.978290513673381e-05, "loss": 0.947, "step": 790 }, { "epoch": 0.13, "grad_norm": 1.1819065809249878, "learning_rate": 4.9780148951875195e-05, "loss": 0.7364, "step": 795 }, { "epoch": 0.13, "grad_norm": 0.9400575160980225, "learning_rate": 4.977737545853455e-05, "loss": 0.9469, "step": 800 }, { "epoch": 0.13, "eval_loss": 0.7995806932449341, "eval_runtime": 96.5877, "eval_samples_per_second": 7.216, "eval_steps_per_second": 7.216, "step": 800 }, { "epoch": 0.13, "grad_norm": 1.693812370300293, "learning_rate": 4.9774584658649126e-05, "loss": 0.9433, "step": 805 }, { "epoch": 0.13, "grad_norm": 1.0892895460128784, "learning_rate": 4.9771776554168234e-05, "loss": 0.7027, "step": 810 }, { "epoch": 0.13, "grad_norm": 0.9118362665176392, "learning_rate": 4.976895114705329e-05, "loss": 0.9468, "step": 815 }, { "epoch": 0.13, "grad_norm": 0.8032681345939636, "learning_rate": 4.976610843927779e-05, "loss": 0.7927, "step": 820 }, { "epoch": 0.13, "grad_norm": 1.168225646018982, "learning_rate": 4.976324843282732e-05, "loss": 0.9673, "step": 825 }, { "epoch": 0.13, "grad_norm": 1.077602744102478, "learning_rate": 4.976037112969953e-05, "loss": 0.9156, "step": 830 }, { "epoch": 0.13, "grad_norm": 0.8643108606338501, "learning_rate": 4.9757476531904165e-05, "loss": 0.6999, "step": 835 }, { "epoch": 0.13, "grad_norm": 0.933397650718689, "learning_rate": 4.975456464146306e-05, "loss": 0.8828, "step": 840 }, { "epoch": 0.13, "grad_norm": 0.7036295533180237, "learning_rate": 4.975163546041011e-05, "loss": 0.8709, "step": 845 }, { "epoch": 0.14, "grad_norm": 0.5974694490432739, "learning_rate": 4.974868899079128e-05, "loss": 0.7594, "step": 850 }, { "epoch": 0.14, "grad_norm": 0.7244943380355835, "learning_rate": 4.974572523466465e-05, "loss": 0.8714, "step": 855 }, { "epoch": 0.14, "grad_norm": 0.5783522725105286, "learning_rate": 4.9742744194100345e-05, "loss": 0.8941, "step": 860 }, { "epoch": 0.14, "grad_norm": 0.7480617761611938, "learning_rate": 4.973974587118055e-05, "loss": 0.9798, "step": 865 }, { "epoch": 0.14, "grad_norm": 0.7548874020576477, "learning_rate": 4.973673026799956e-05, "loss": 0.7767, "step": 870 }, { "epoch": 0.14, "grad_norm": 0.7075071930885315, "learning_rate": 4.97336973866637e-05, "loss": 0.7779, "step": 875 }, { "epoch": 0.14, "grad_norm": 0.7042987942695618, "learning_rate": 4.97306472292914e-05, "loss": 0.8249, "step": 880 }, { "epoch": 0.14, "grad_norm": 1.0242459774017334, "learning_rate": 4.972757979801313e-05, "loss": 0.9223, "step": 885 }, { "epoch": 0.14, "grad_norm": 0.6138095259666443, "learning_rate": 4.9724495094971436e-05, "loss": 0.9842, "step": 890 }, { "epoch": 0.14, "grad_norm": 0.7905042767524719, "learning_rate": 4.9721393122320925e-05, "loss": 0.8738, "step": 895 }, { "epoch": 0.14, "grad_norm": 0.9658048748970032, "learning_rate": 4.9718273882228265e-05, "loss": 0.8872, "step": 900 }, { "epoch": 0.14, "eval_loss": 0.7954564690589905, "eval_runtime": 96.643, "eval_samples_per_second": 7.212, "eval_steps_per_second": 7.212, "step": 900 }, { "epoch": 0.14, "grad_norm": 0.8425014019012451, "learning_rate": 4.97151373768722e-05, "loss": 0.778, "step": 905 }, { "epoch": 0.15, "grad_norm": 0.5527231693267822, "learning_rate": 4.971198360844351e-05, "loss": 0.8332, "step": 910 }, { "epoch": 0.15, "grad_norm": 0.7870334386825562, "learning_rate": 4.9708812579145056e-05, "loss": 0.9265, "step": 915 }, { "epoch": 0.15, "grad_norm": 0.9935321807861328, "learning_rate": 4.970562429119173e-05, "loss": 0.7243, "step": 920 }, { "epoch": 0.15, "grad_norm": 0.9546892046928406, "learning_rate": 4.970241874681051e-05, "loss": 0.9908, "step": 925 }, { "epoch": 0.15, "grad_norm": 0.7340118885040283, "learning_rate": 4.969919594824039e-05, "loss": 0.7932, "step": 930 }, { "epoch": 0.15, "grad_norm": 5.1686015129089355, "learning_rate": 4.9695955897732453e-05, "loss": 0.9842, "step": 935 }, { "epoch": 0.15, "grad_norm": 0.9721456170082092, "learning_rate": 4.9692698597549815e-05, "loss": 0.9271, "step": 940 }, { "epoch": 0.15, "grad_norm": 0.6477334499359131, "learning_rate": 4.9689424049967623e-05, "loss": 0.934, "step": 945 }, { "epoch": 0.15, "grad_norm": 1.0759055614471436, "learning_rate": 4.968613225727311e-05, "loss": 1.0465, "step": 950 }, { "epoch": 0.15, "grad_norm": 0.7222158908843994, "learning_rate": 4.968282322176552e-05, "loss": 0.7732, "step": 955 }, { "epoch": 0.15, "grad_norm": 0.8591343760490417, "learning_rate": 4.9679496945756155e-05, "loss": 0.9062, "step": 960 }, { "epoch": 0.15, "grad_norm": 1.8495111465454102, "learning_rate": 4.967615343156837e-05, "loss": 0.8861, "step": 965 }, { "epoch": 0.15, "grad_norm": 0.6847331523895264, "learning_rate": 4.967279268153753e-05, "loss": 0.8001, "step": 970 }, { "epoch": 0.16, "grad_norm": 0.690113365650177, "learning_rate": 4.9669414698011074e-05, "loss": 0.7378, "step": 975 }, { "epoch": 0.16, "grad_norm": 0.8349626064300537, "learning_rate": 4.9666019483348456e-05, "loss": 0.7193, "step": 980 }, { "epoch": 0.16, "grad_norm": 0.6444108486175537, "learning_rate": 4.966260703992116e-05, "loss": 0.8729, "step": 985 }, { "epoch": 0.16, "grad_norm": 0.9515655040740967, "learning_rate": 4.965917737011274e-05, "loss": 0.7532, "step": 990 }, { "epoch": 0.16, "grad_norm": 0.8138986229896545, "learning_rate": 4.965573047631873e-05, "loss": 1.0124, "step": 995 }, { "epoch": 0.16, "grad_norm": 1.0182080268859863, "learning_rate": 4.9652266360946745e-05, "loss": 0.8842, "step": 1000 }, { "epoch": 0.16, "eval_loss": 0.7912728190422058, "eval_runtime": 96.5004, "eval_samples_per_second": 7.223, "eval_steps_per_second": 7.223, "step": 1000 }, { "epoch": 0.16, "grad_norm": 0.9665297269821167, "learning_rate": 4.96487850264164e-05, "loss": 1.0155, "step": 1005 }, { "epoch": 0.16, "grad_norm": 1.1356585025787354, "learning_rate": 4.964528647515933e-05, "loss": 0.8705, "step": 1010 }, { "epoch": 0.16, "grad_norm": 0.5548833608627319, "learning_rate": 4.9641770709619234e-05, "loss": 0.9634, "step": 1015 }, { "epoch": 0.16, "grad_norm": 0.8028444647789001, "learning_rate": 4.9638237732251794e-05, "loss": 0.8722, "step": 1020 }, { "epoch": 0.16, "grad_norm": 0.934234082698822, "learning_rate": 4.9634687545524724e-05, "loss": 0.9731, "step": 1025 }, { "epoch": 0.16, "grad_norm": 0.7293463349342346, "learning_rate": 4.963112015191778e-05, "loss": 1.0237, "step": 1030 }, { "epoch": 0.17, "grad_norm": 0.6442769169807434, "learning_rate": 4.962753555392271e-05, "loss": 1.1331, "step": 1035 }, { "epoch": 0.17, "grad_norm": 0.7877534031867981, "learning_rate": 4.962393375404331e-05, "loss": 1.0737, "step": 1040 }, { "epoch": 0.17, "grad_norm": 0.5739997625350952, "learning_rate": 4.9620314754795343e-05, "loss": 0.8836, "step": 1045 }, { "epoch": 0.17, "grad_norm": 0.7318402528762817, "learning_rate": 4.9616678558706634e-05, "loss": 0.9981, "step": 1050 }, { "epoch": 0.17, "grad_norm": 0.5463365316390991, "learning_rate": 4.961302516831699e-05, "loss": 0.7336, "step": 1055 }, { "epoch": 0.17, "grad_norm": 0.7839176654815674, "learning_rate": 4.960935458617824e-05, "loss": 1.025, "step": 1060 }, { "epoch": 0.17, "grad_norm": 0.7076404690742493, "learning_rate": 4.9605666814854225e-05, "loss": 0.833, "step": 1065 }, { "epoch": 0.17, "grad_norm": 0.732940673828125, "learning_rate": 4.960196185692077e-05, "loss": 0.5103, "step": 1070 }, { "epoch": 0.17, "grad_norm": 0.7256388068199158, "learning_rate": 4.959823971496574e-05, "loss": 0.8617, "step": 1075 }, { "epoch": 0.17, "grad_norm": 1.1714242696762085, "learning_rate": 4.959450039158898e-05, "loss": 1.0345, "step": 1080 }, { "epoch": 0.17, "grad_norm": 0.5849193930625916, "learning_rate": 4.9590743889402325e-05, "loss": 0.729, "step": 1085 }, { "epoch": 0.17, "grad_norm": 0.6283109784126282, "learning_rate": 4.958697021102963e-05, "loss": 0.8527, "step": 1090 }, { "epoch": 0.17, "grad_norm": 0.6387770175933838, "learning_rate": 4.9583179359106746e-05, "loss": 0.7411, "step": 1095 }, { "epoch": 0.18, "grad_norm": 0.5853758454322815, "learning_rate": 4.957937133628151e-05, "loss": 0.7909, "step": 1100 }, { "epoch": 0.18, "eval_loss": 0.7863278985023499, "eval_runtime": 96.3784, "eval_samples_per_second": 7.232, "eval_steps_per_second": 7.232, "step": 1100 }, { "epoch": 0.18, "grad_norm": 0.9301708936691284, "learning_rate": 4.9575546145213755e-05, "loss": 0.7149, "step": 1105 }, { "epoch": 0.18, "grad_norm": 1.125088095664978, "learning_rate": 4.9571703788575314e-05, "loss": 0.8034, "step": 1110 }, { "epoch": 0.18, "grad_norm": 1.0697988271713257, "learning_rate": 4.956784426905e-05, "loss": 0.8874, "step": 1115 }, { "epoch": 0.18, "grad_norm": 0.7094873189926147, "learning_rate": 4.956396758933361e-05, "loss": 0.6612, "step": 1120 }, { "epoch": 0.18, "grad_norm": 0.8048680424690247, "learning_rate": 4.956007375213393e-05, "loss": 0.9558, "step": 1125 }, { "epoch": 0.18, "grad_norm": 0.8820949196815491, "learning_rate": 4.9556162760170756e-05, "loss": 0.9442, "step": 1130 }, { "epoch": 0.18, "grad_norm": 0.7214958071708679, "learning_rate": 4.955223461617583e-05, "loss": 0.8392, "step": 1135 }, { "epoch": 0.18, "grad_norm": 0.8364250063896179, "learning_rate": 4.954828932289288e-05, "loss": 0.9834, "step": 1140 }, { "epoch": 0.18, "grad_norm": 0.8735854625701904, "learning_rate": 4.954432688307764e-05, "loss": 0.8817, "step": 1145 }, { "epoch": 0.18, "grad_norm": 0.810013473033905, "learning_rate": 4.9540347299497805e-05, "loss": 0.7723, "step": 1150 }, { "epoch": 0.18, "grad_norm": 0.8791002035140991, "learning_rate": 4.953635057493302e-05, "loss": 0.706, "step": 1155 }, { "epoch": 0.19, "grad_norm": 0.7556783556938171, "learning_rate": 4.953233671217493e-05, "loss": 0.8145, "step": 1160 }, { "epoch": 0.19, "grad_norm": 1.3251086473464966, "learning_rate": 4.952830571402716e-05, "loss": 0.8413, "step": 1165 }, { "epoch": 0.19, "grad_norm": 0.8531173467636108, "learning_rate": 4.952425758330527e-05, "loss": 0.8236, "step": 1170 }, { "epoch": 0.19, "grad_norm": 1.0738744735717773, "learning_rate": 4.952019232283681e-05, "loss": 0.8357, "step": 1175 }, { "epoch": 0.19, "grad_norm": 0.7908213138580322, "learning_rate": 4.9516109935461306e-05, "loss": 0.6165, "step": 1180 }, { "epoch": 0.19, "grad_norm": 0.9802565574645996, "learning_rate": 4.951201042403021e-05, "loss": 0.7203, "step": 1185 }, { "epoch": 0.19, "grad_norm": 0.7866708636283875, "learning_rate": 4.9507893791406974e-05, "loss": 0.8479, "step": 1190 }, { "epoch": 0.19, "grad_norm": 0.6721138954162598, "learning_rate": 4.950376004046698e-05, "loss": 0.8871, "step": 1195 }, { "epoch": 0.19, "grad_norm": 1.1981366872787476, "learning_rate": 4.9499609174097574e-05, "loss": 0.8196, "step": 1200 }, { "epoch": 0.19, "eval_loss": 0.7843652367591858, "eval_runtime": 96.5411, "eval_samples_per_second": 7.22, "eval_steps_per_second": 7.22, "step": 1200 }, { "epoch": 0.19, "grad_norm": 0.7013841867446899, "learning_rate": 4.9495441195198064e-05, "loss": 1.0009, "step": 1205 }, { "epoch": 0.19, "grad_norm": 0.8476290702819824, "learning_rate": 4.949125610667972e-05, "loss": 0.5127, "step": 1210 }, { "epoch": 0.19, "grad_norm": 0.7680797576904297, "learning_rate": 4.9487053911465735e-05, "loss": 0.7003, "step": 1215 }, { "epoch": 0.19, "grad_norm": 0.9771925806999207, "learning_rate": 4.948283461249127e-05, "loss": 1.1135, "step": 1220 }, { "epoch": 0.2, "grad_norm": 1.4247405529022217, "learning_rate": 4.947859821270342e-05, "loss": 0.8253, "step": 1225 }, { "epoch": 0.2, "grad_norm": 1.184887409210205, "learning_rate": 4.947434471506125e-05, "loss": 1.1208, "step": 1230 }, { "epoch": 0.2, "grad_norm": 0.7579745054244995, "learning_rate": 4.9470074122535745e-05, "loss": 1.1363, "step": 1235 }, { "epoch": 0.2, "grad_norm": 0.8529625535011292, "learning_rate": 4.9465786438109826e-05, "loss": 0.8699, "step": 1240 }, { "epoch": 0.2, "grad_norm": 1.810576319694519, "learning_rate": 4.9461481664778374e-05, "loss": 1.0166, "step": 1245 }, { "epoch": 0.2, "grad_norm": 0.8605110049247742, "learning_rate": 4.9457159805548187e-05, "loss": 0.9427, "step": 1250 }, { "epoch": 0.2, "grad_norm": 0.59971684217453, "learning_rate": 4.945282086343801e-05, "loss": 0.6536, "step": 1255 }, { "epoch": 0.2, "grad_norm": 1.0233818292617798, "learning_rate": 4.9448464841478506e-05, "loss": 0.9505, "step": 1260 }, { "epoch": 0.2, "grad_norm": 0.8945149779319763, "learning_rate": 4.9444091742712293e-05, "loss": 0.8416, "step": 1265 }, { "epoch": 0.2, "grad_norm": 0.702805757522583, "learning_rate": 4.9439701570193886e-05, "loss": 0.9419, "step": 1270 }, { "epoch": 0.2, "grad_norm": 0.7464181184768677, "learning_rate": 4.9435294326989745e-05, "loss": 0.7972, "step": 1275 }, { "epoch": 0.2, "grad_norm": 1.1765002012252808, "learning_rate": 4.943175624360097e-05, "loss": 0.9914, "step": 1280 }, { "epoch": 0.21, "grad_norm": 0.6549853682518005, "learning_rate": 4.9427318280928034e-05, "loss": 0.8924, "step": 1285 }, { "epoch": 0.21, "grad_norm": 0.5978650450706482, "learning_rate": 4.942286325621888e-05, "loss": 0.6224, "step": 1290 }, { "epoch": 0.21, "grad_norm": 0.7752617597579956, "learning_rate": 4.941839117258523e-05, "loss": 0.8666, "step": 1295 }, { "epoch": 0.21, "grad_norm": 0.6919072866439819, "learning_rate": 4.941390203315078e-05, "loss": 0.9341, "step": 1300 }, { "epoch": 0.21, "eval_loss": 0.7824844717979431, "eval_runtime": 96.8874, "eval_samples_per_second": 7.194, "eval_steps_per_second": 7.194, "step": 1300 }, { "epoch": 0.21, "grad_norm": 0.7222729325294495, "learning_rate": 4.94093958410511e-05, "loss": 0.9925, "step": 1305 }, { "epoch": 0.21, "grad_norm": 0.9575716853141785, "learning_rate": 4.9404872599433686e-05, "loss": 0.8623, "step": 1310 }, { "epoch": 0.21, "grad_norm": 0.7721400260925293, "learning_rate": 4.940033231145793e-05, "loss": 1.0061, "step": 1315 }, { "epoch": 0.21, "grad_norm": 0.7019990682601929, "learning_rate": 4.9395774980295165e-05, "loss": 0.8697, "step": 1320 }, { "epoch": 0.21, "grad_norm": 0.7828916907310486, "learning_rate": 4.939120060912858e-05, "loss": 1.0066, "step": 1325 }, { "epoch": 0.21, "grad_norm": 1.0238871574401855, "learning_rate": 4.93866092011533e-05, "loss": 1.0285, "step": 1330 }, { "epoch": 0.21, "grad_norm": 0.48669734597206116, "learning_rate": 4.938200075957634e-05, "loss": 0.7454, "step": 1335 }, { "epoch": 0.21, "grad_norm": 0.8834619522094727, "learning_rate": 4.93773752876166e-05, "loss": 0.9998, "step": 1340 }, { "epoch": 0.21, "grad_norm": 0.6462609767913818, "learning_rate": 4.9372732788504905e-05, "loss": 0.7278, "step": 1345 }, { "epoch": 0.22, "grad_norm": 0.7309257388114929, "learning_rate": 4.936807326548395e-05, "loss": 0.7301, "step": 1350 }, { "epoch": 0.22, "grad_norm": 0.8515027165412903, "learning_rate": 4.936339672180833e-05, "loss": 0.8307, "step": 1355 }, { "epoch": 0.22, "grad_norm": 0.913206934928894, "learning_rate": 4.935870316074451e-05, "loss": 0.9467, "step": 1360 }, { "epoch": 0.22, "grad_norm": 0.6705841422080994, "learning_rate": 4.935399258557088e-05, "loss": 0.7124, "step": 1365 }, { "epoch": 0.22, "grad_norm": 0.676695704460144, "learning_rate": 4.934926499957767e-05, "loss": 0.9318, "step": 1370 }, { "epoch": 0.22, "grad_norm": 1.0529104471206665, "learning_rate": 4.934452040606703e-05, "loss": 1.0307, "step": 1375 }, { "epoch": 0.22, "grad_norm": 0.7150225639343262, "learning_rate": 4.933975880835296e-05, "loss": 0.8718, "step": 1380 }, { "epoch": 0.22, "grad_norm": 0.7180047035217285, "learning_rate": 4.933498020976135e-05, "loss": 0.7515, "step": 1385 }, { "epoch": 0.22, "grad_norm": 1.0961759090423584, "learning_rate": 4.933018461362997e-05, "loss": 0.8797, "step": 1390 }, { "epoch": 0.22, "grad_norm": 0.830609142780304, "learning_rate": 4.9325372023308446e-05, "loss": 0.6927, "step": 1395 }, { "epoch": 0.22, "grad_norm": 0.5277318358421326, "learning_rate": 4.9320542442158305e-05, "loss": 0.8801, "step": 1400 }, { "epoch": 0.22, "eval_loss": 0.7787255644798279, "eval_runtime": 96.8812, "eval_samples_per_second": 7.194, "eval_steps_per_second": 7.194, "step": 1400 }, { "epoch": 0.22, "grad_norm": 1.3845161199569702, "learning_rate": 4.931569587355289e-05, "loss": 0.8782, "step": 1405 }, { "epoch": 0.23, "grad_norm": 0.8579941987991333, "learning_rate": 4.9310832320877476e-05, "loss": 0.713, "step": 1410 }, { "epoch": 0.23, "grad_norm": 0.2643532454967499, "learning_rate": 4.930595178752914e-05, "loss": 0.9781, "step": 1415 }, { "epoch": 0.23, "grad_norm": 0.4968445897102356, "learning_rate": 4.930105427691685e-05, "loss": 0.93, "step": 1420 }, { "epoch": 0.23, "grad_norm": 0.9254417419433594, "learning_rate": 4.929613979246144e-05, "loss": 0.6353, "step": 1425 }, { "epoch": 0.23, "grad_norm": 0.9814417958259583, "learning_rate": 4.9291208337595574e-05, "loss": 0.9672, "step": 1430 }, { "epoch": 0.23, "grad_norm": 0.7159338593482971, "learning_rate": 4.928625991576379e-05, "loss": 0.9482, "step": 1435 }, { "epoch": 0.23, "grad_norm": 0.623866617679596, "learning_rate": 4.9281294530422476e-05, "loss": 0.623, "step": 1440 }, { "epoch": 0.23, "grad_norm": 0.8750379681587219, "learning_rate": 4.927631218503985e-05, "loss": 0.772, "step": 1445 }, { "epoch": 0.23, "grad_norm": 0.5593128800392151, "learning_rate": 4.9271312883096e-05, "loss": 0.6579, "step": 1450 }, { "epoch": 0.23, "grad_norm": 0.6411569714546204, "learning_rate": 4.9266296628082834e-05, "loss": 0.9239, "step": 1455 }, { "epoch": 0.23, "grad_norm": 0.9317705631256104, "learning_rate": 4.9261263423504135e-05, "loss": 0.9315, "step": 1460 }, { "epoch": 0.23, "grad_norm": 0.8312699198722839, "learning_rate": 4.9256213272875486e-05, "loss": 0.7334, "step": 1465 }, { "epoch": 0.23, "grad_norm": 0.6170663833618164, "learning_rate": 4.925114617972433e-05, "loss": 0.8603, "step": 1470 }, { "epoch": 0.24, "grad_norm": 0.7176920771598816, "learning_rate": 4.924606214758995e-05, "loss": 0.8738, "step": 1475 }, { "epoch": 0.24, "grad_norm": 0.8957033157348633, "learning_rate": 4.924096118002343e-05, "loss": 0.8861, "step": 1480 }, { "epoch": 0.24, "grad_norm": 0.5490685701370239, "learning_rate": 4.923584328058772e-05, "loss": 0.712, "step": 1485 }, { "epoch": 0.24, "grad_norm": 0.7401763796806335, "learning_rate": 4.923070845285757e-05, "loss": 0.8118, "step": 1490 }, { "epoch": 0.24, "grad_norm": 0.7380841374397278, "learning_rate": 4.922555670041957e-05, "loss": 0.8476, "step": 1495 }, { "epoch": 0.24, "grad_norm": 1.0009427070617676, "learning_rate": 4.922038802687212e-05, "loss": 0.9109, "step": 1500 }, { "epoch": 0.24, "eval_loss": 0.777683675289154, "eval_runtime": 96.9147, "eval_samples_per_second": 7.192, "eval_steps_per_second": 7.192, "step": 1500 }, { "epoch": 0.24, "grad_norm": 0.7970065474510193, "learning_rate": 4.921520243582545e-05, "loss": 0.616, "step": 1505 }, { "epoch": 0.24, "grad_norm": 0.6530303955078125, "learning_rate": 4.92099999309016e-05, "loss": 0.9223, "step": 1510 }, { "epoch": 0.24, "grad_norm": 0.48044708371162415, "learning_rate": 4.9204780515734406e-05, "loss": 0.6762, "step": 1515 }, { "epoch": 0.24, "grad_norm": 0.7560244798660278, "learning_rate": 4.919954419396956e-05, "loss": 0.8726, "step": 1520 }, { "epoch": 0.24, "grad_norm": 0.8580659031867981, "learning_rate": 4.919429096926453e-05, "loss": 0.7654, "step": 1525 }, { "epoch": 0.24, "grad_norm": 1.1246473789215088, "learning_rate": 4.918902084528859e-05, "loss": 0.9123, "step": 1530 }, { "epoch": 0.25, "grad_norm": 1.0745307207107544, "learning_rate": 4.918373382572283e-05, "loss": 0.79, "step": 1535 }, { "epoch": 0.25, "grad_norm": 0.9591856598854065, "learning_rate": 4.917842991426014e-05, "loss": 1.1778, "step": 1540 }, { "epoch": 0.25, "grad_norm": 1.0233389139175415, "learning_rate": 4.91731091146052e-05, "loss": 0.8827, "step": 1545 }, { "epoch": 0.25, "grad_norm": 0.648965060710907, "learning_rate": 4.91677714304745e-05, "loss": 0.8634, "step": 1550 }, { "epoch": 0.25, "grad_norm": 0.6523327231407166, "learning_rate": 4.91624168655963e-05, "loss": 0.9916, "step": 1555 }, { "epoch": 0.25, "grad_norm": 0.8029198050498962, "learning_rate": 4.915704542371068e-05, "loss": 0.7867, "step": 1560 }, { "epoch": 0.25, "grad_norm": 0.6397082805633545, "learning_rate": 4.915165710856948e-05, "loss": 0.7738, "step": 1565 }, { "epoch": 0.25, "grad_norm": 0.5862845778465271, "learning_rate": 4.914625192393636e-05, "loss": 0.7026, "step": 1570 }, { "epoch": 0.25, "grad_norm": 0.5333505868911743, "learning_rate": 4.914082987358673e-05, "loss": 0.8623, "step": 1575 }, { "epoch": 0.25, "grad_norm": 0.5689602494239807, "learning_rate": 4.913539096130779e-05, "loss": 0.7619, "step": 1580 }, { "epoch": 0.25, "grad_norm": 0.7333836555480957, "learning_rate": 4.912993519089853e-05, "loss": 0.8116, "step": 1585 }, { "epoch": 0.25, "grad_norm": 0.7610496282577515, "learning_rate": 4.91244625661697e-05, "loss": 0.74, "step": 1590 }, { "epoch": 0.25, "grad_norm": 0.6331669092178345, "learning_rate": 4.9118973090943835e-05, "loss": 1.0445, "step": 1595 }, { "epoch": 0.26, "grad_norm": 0.7263479828834534, "learning_rate": 4.911346676905521e-05, "loss": 0.8964, "step": 1600 }, { "epoch": 0.26, "eval_loss": 0.7759388089179993, "eval_runtime": 96.8818, "eval_samples_per_second": 7.194, "eval_steps_per_second": 7.194, "step": 1600 }, { "epoch": 0.26, "grad_norm": 0.6523721814155579, "learning_rate": 4.910794360434993e-05, "loss": 1.0127, "step": 1605 }, { "epoch": 0.26, "grad_norm": 1.055384874343872, "learning_rate": 4.9102403600685796e-05, "loss": 0.9855, "step": 1610 }, { "epoch": 0.26, "grad_norm": 0.7640814185142517, "learning_rate": 4.9096846761932414e-05, "loss": 0.7963, "step": 1615 }, { "epoch": 0.26, "grad_norm": 0.5843799710273743, "learning_rate": 4.9091273091971124e-05, "loss": 0.8854, "step": 1620 }, { "epoch": 0.26, "grad_norm": 0.9825207591056824, "learning_rate": 4.9085682594695036e-05, "loss": 0.8086, "step": 1625 }, { "epoch": 0.26, "grad_norm": 0.9490563869476318, "learning_rate": 4.908007527400901e-05, "loss": 0.6838, "step": 1630 }, { "epoch": 0.26, "grad_norm": 0.9472922682762146, "learning_rate": 4.907445113382966e-05, "loss": 0.8732, "step": 1635 }, { "epoch": 0.26, "grad_norm": 0.6690593957901001, "learning_rate": 4.9068810178085344e-05, "loss": 0.8551, "step": 1640 }, { "epoch": 0.26, "grad_norm": 0.7245538830757141, "learning_rate": 4.906315241071616e-05, "loss": 0.7639, "step": 1645 }, { "epoch": 0.26, "grad_norm": 0.8342815041542053, "learning_rate": 4.905747783567397e-05, "loss": 0.9417, "step": 1650 }, { "epoch": 0.26, "grad_norm": 0.6241989135742188, "learning_rate": 4.9051786456922354e-05, "loss": 0.9394, "step": 1655 }, { "epoch": 0.26, "grad_norm": 0.5671687126159668, "learning_rate": 4.904607827843663e-05, "loss": 0.6381, "step": 1660 }, { "epoch": 0.27, "grad_norm": 0.795868456363678, "learning_rate": 4.9040353304203864e-05, "loss": 0.7676, "step": 1665 }, { "epoch": 0.27, "grad_norm": 0.9995182156562805, "learning_rate": 4.9034611538222844e-05, "loss": 1.0327, "step": 1670 }, { "epoch": 0.27, "grad_norm": 0.7473803758621216, "learning_rate": 4.902885298450409e-05, "loss": 0.8835, "step": 1675 }, { "epoch": 0.27, "grad_norm": 0.5757468938827515, "learning_rate": 4.902307764706984e-05, "loss": 0.7548, "step": 1680 }, { "epoch": 0.27, "grad_norm": 0.8357987403869629, "learning_rate": 4.901728552995407e-05, "loss": 0.9184, "step": 1685 }, { "epoch": 0.27, "grad_norm": 0.6664137244224548, "learning_rate": 4.901147663720247e-05, "loss": 0.9872, "step": 1690 }, { "epoch": 0.27, "grad_norm": 0.861997663974762, "learning_rate": 4.900565097287243e-05, "loss": 0.8541, "step": 1695 }, { "epoch": 0.27, "grad_norm": 0.7566475868225098, "learning_rate": 4.8999808541033086e-05, "loss": 0.9265, "step": 1700 }, { "epoch": 0.27, "eval_loss": 0.7741928696632385, "eval_runtime": 96.9038, "eval_samples_per_second": 7.193, "eval_steps_per_second": 7.193, "step": 1700 }, { "epoch": 0.27, "grad_norm": 0.45475611090660095, "learning_rate": 4.8993949345765266e-05, "loss": 0.7186, "step": 1705 }, { "epoch": 0.27, "grad_norm": 0.8672823905944824, "learning_rate": 4.8988073391161515e-05, "loss": 0.919, "step": 1710 }, { "epoch": 0.27, "grad_norm": 0.7782495617866516, "learning_rate": 4.8982180681326074e-05, "loss": 0.6618, "step": 1715 }, { "epoch": 0.27, "grad_norm": 0.6640329957008362, "learning_rate": 4.897627122037489e-05, "loss": 0.6662, "step": 1720 }, { "epoch": 0.28, "grad_norm": 0.8019454479217529, "learning_rate": 4.897034501243561e-05, "loss": 0.9459, "step": 1725 }, { "epoch": 0.28, "grad_norm": 0.8336368799209595, "learning_rate": 4.896440206164761e-05, "loss": 0.8058, "step": 1730 }, { "epoch": 0.28, "grad_norm": 0.6316781044006348, "learning_rate": 4.8958442372161906e-05, "loss": 0.9132, "step": 1735 }, { "epoch": 0.28, "grad_norm": 0.7768308520317078, "learning_rate": 4.895246594814124e-05, "loss": 0.7512, "step": 1740 }, { "epoch": 0.28, "grad_norm": 0.9891632795333862, "learning_rate": 4.894647279376002e-05, "loss": 0.843, "step": 1745 }, { "epoch": 0.28, "grad_norm": 0.6162430047988892, "learning_rate": 4.894046291320439e-05, "loss": 0.8233, "step": 1750 }, { "epoch": 0.28, "grad_norm": 0.6184887290000916, "learning_rate": 4.893443631067211e-05, "loss": 0.7428, "step": 1755 }, { "epoch": 0.28, "grad_norm": 0.7117312550544739, "learning_rate": 4.892839299037267e-05, "loss": 0.8707, "step": 1760 }, { "epoch": 0.28, "grad_norm": 0.7165163159370422, "learning_rate": 4.892233295652721e-05, "loss": 1.0485, "step": 1765 }, { "epoch": 0.28, "grad_norm": 0.8377657532691956, "learning_rate": 4.891625621336855e-05, "loss": 0.7368, "step": 1770 }, { "epoch": 0.28, "grad_norm": 0.6349939703941345, "learning_rate": 4.89101627651412e-05, "loss": 0.7357, "step": 1775 }, { "epoch": 0.28, "grad_norm": 4.969137191772461, "learning_rate": 4.890405261610131e-05, "loss": 0.7605, "step": 1780 }, { "epoch": 0.28, "grad_norm": 1.5980018377304077, "learning_rate": 4.889792577051671e-05, "loss": 0.9253, "step": 1785 }, { "epoch": 0.29, "grad_norm": 0.681398332118988, "learning_rate": 4.889178223266688e-05, "loss": 0.7235, "step": 1790 }, { "epoch": 0.29, "grad_norm": 0.6999421715736389, "learning_rate": 4.888562200684299e-05, "loss": 0.8521, "step": 1795 }, { "epoch": 0.29, "grad_norm": 0.7693730592727661, "learning_rate": 4.887944509734783e-05, "loss": 0.8632, "step": 1800 }, { "epoch": 0.29, "eval_loss": 0.76987224817276, "eval_runtime": 96.9052, "eval_samples_per_second": 7.193, "eval_steps_per_second": 7.193, "step": 1800 }, { "epoch": 0.29, "grad_norm": 0.7641138434410095, "learning_rate": 4.8873251508495865e-05, "loss": 0.7074, "step": 1805 }, { "epoch": 0.29, "grad_norm": 0.732545018196106, "learning_rate": 4.886704124461321e-05, "loss": 0.6901, "step": 1810 }, { "epoch": 0.29, "grad_norm": 1.0327179431915283, "learning_rate": 4.88608143100376e-05, "loss": 0.8256, "step": 1815 }, { "epoch": 0.29, "grad_norm": 0.7066757082939148, "learning_rate": 4.885457070911845e-05, "loss": 0.6635, "step": 1820 }, { "epoch": 0.29, "grad_norm": 0.809877336025238, "learning_rate": 4.8848310446216806e-05, "loss": 0.795, "step": 1825 }, { "epoch": 0.29, "grad_norm": 0.738153338432312, "learning_rate": 4.8842033525705335e-05, "loss": 0.9089, "step": 1830 }, { "epoch": 0.29, "grad_norm": 0.754896879196167, "learning_rate": 4.883573995196836e-05, "loss": 0.7103, "step": 1835 }, { "epoch": 0.29, "grad_norm": 1.0111182928085327, "learning_rate": 4.8829429729401826e-05, "loss": 1.046, "step": 1840 }, { "epoch": 0.29, "grad_norm": 0.6233395934104919, "learning_rate": 4.8823102862413306e-05, "loss": 0.761, "step": 1845 }, { "epoch": 0.3, "grad_norm": 1.3443419933319092, "learning_rate": 4.8816759355422e-05, "loss": 0.8436, "step": 1850 }, { "epoch": 0.3, "grad_norm": 0.6685923337936401, "learning_rate": 4.8810399212858736e-05, "loss": 0.8956, "step": 1855 }, { "epoch": 0.3, "grad_norm": 1.0405924320220947, "learning_rate": 4.880402243916596e-05, "loss": 1.1458, "step": 1860 }, { "epoch": 0.3, "grad_norm": 0.8413107991218567, "learning_rate": 4.879762903879772e-05, "loss": 0.8133, "step": 1865 }, { "epoch": 0.3, "grad_norm": 0.7151504158973694, "learning_rate": 4.8791219016219705e-05, "loss": 0.9207, "step": 1870 }, { "epoch": 0.3, "grad_norm": 0.6887856125831604, "learning_rate": 4.878479237590918e-05, "loss": 0.8185, "step": 1875 }, { "epoch": 0.3, "grad_norm": 0.5687748193740845, "learning_rate": 4.877834912235506e-05, "loss": 0.9035, "step": 1880 }, { "epoch": 0.3, "grad_norm": 0.9966350793838501, "learning_rate": 4.877188926005782e-05, "loss": 0.7764, "step": 1885 }, { "epoch": 0.3, "grad_norm": 1.0459462404251099, "learning_rate": 4.8765412793529574e-05, "loss": 0.6658, "step": 1890 }, { "epoch": 0.3, "grad_norm": 0.8338847160339355, "learning_rate": 4.8758919727293995e-05, "loss": 0.7363, "step": 1895 }, { "epoch": 0.3, "grad_norm": 0.7602768540382385, "learning_rate": 4.875241006588638e-05, "loss": 1.0081, "step": 1900 }, { "epoch": 0.3, "eval_loss": 0.7692809700965881, "eval_runtime": 96.4899, "eval_samples_per_second": 7.224, "eval_steps_per_second": 7.224, "step": 1900 }, { "epoch": 0.3, "grad_norm": 0.5455746054649353, "learning_rate": 4.874588381385362e-05, "loss": 0.7855, "step": 1905 }, { "epoch": 0.3, "grad_norm": 0.8574795126914978, "learning_rate": 4.8739340975754165e-05, "loss": 1.068, "step": 1910 }, { "epoch": 0.31, "grad_norm": 1.0321904420852661, "learning_rate": 4.873278155615808e-05, "loss": 0.8239, "step": 1915 }, { "epoch": 0.31, "grad_norm": 1.2484744787216187, "learning_rate": 4.8726205559646996e-05, "loss": 0.9307, "step": 1920 }, { "epoch": 0.31, "grad_norm": 0.7140147686004639, "learning_rate": 4.871961299081412e-05, "loss": 0.9876, "step": 1925 }, { "epoch": 0.31, "grad_norm": 0.8003590106964111, "learning_rate": 4.871300385426426e-05, "loss": 0.8615, "step": 1930 }, { "epoch": 0.31, "grad_norm": 0.7282931208610535, "learning_rate": 4.870637815461376e-05, "loss": 0.8734, "step": 1935 }, { "epoch": 0.31, "grad_norm": 0.6800629496574402, "learning_rate": 4.869973589649055e-05, "loss": 0.7718, "step": 1940 }, { "epoch": 0.31, "grad_norm": 0.8813210129737854, "learning_rate": 4.869307708453413e-05, "loss": 0.7943, "step": 1945 }, { "epoch": 0.31, "grad_norm": 0.6612805724143982, "learning_rate": 4.868640172339557e-05, "loss": 0.6807, "step": 1950 }, { "epoch": 0.31, "grad_norm": 0.653191328048706, "learning_rate": 4.867970981773748e-05, "loss": 0.8948, "step": 1955 }, { "epoch": 0.31, "grad_norm": 0.7479822635650635, "learning_rate": 4.8673001372234025e-05, "loss": 0.8583, "step": 1960 }, { "epoch": 0.31, "grad_norm": NaN, "learning_rate": 4.8667622710291026e-05, "loss": 0.7443, "step": 1965 }, { "epoch": 0.31, "grad_norm": 0.5788535475730896, "learning_rate": 4.866088450488172e-05, "loss": 0.7249, "step": 1970 }, { "epoch": 0.32, "grad_norm": 0.7408040165901184, "learning_rate": 4.86541297727762e-05, "loss": 0.7115, "step": 1975 }, { "epoch": 0.32, "grad_norm": 0.6549968719482422, "learning_rate": 4.864735851869251e-05, "loss": 0.9095, "step": 1980 }, { "epoch": 0.32, "grad_norm": 0.4595119059085846, "learning_rate": 4.864057074736026e-05, "loss": 1.2808, "step": 1985 }, { "epoch": 0.32, "grad_norm": 0.5746715068817139, "learning_rate": 4.863376646352058e-05, "loss": 0.8139, "step": 1990 }, { "epoch": 0.32, "grad_norm": 0.6972643136978149, "learning_rate": 4.862694567192614e-05, "loss": 0.9797, "step": 1995 }, { "epoch": 0.32, "grad_norm": 0.6935243010520935, "learning_rate": 4.8620108377341124e-05, "loss": 0.7651, "step": 2000 }, { "epoch": 0.32, "eval_loss": 0.766412615776062, "eval_runtime": 96.4555, "eval_samples_per_second": 7.226, "eval_steps_per_second": 7.226, "step": 2000 }, { "epoch": 0.32, "grad_norm": 0.9983006715774536, "learning_rate": 4.861325458454128e-05, "loss": 0.8256, "step": 2005 }, { "epoch": 0.32, "grad_norm": 0.6732650995254517, "learning_rate": 4.860638429831384e-05, "loss": 0.8136, "step": 2010 }, { "epoch": 0.32, "grad_norm": 0.6780042052268982, "learning_rate": 4.859949752345758e-05, "loss": 0.8911, "step": 2015 }, { "epoch": 0.32, "grad_norm": 0.9892123937606812, "learning_rate": 4.8592594264782794e-05, "loss": 0.7907, "step": 2020 }, { "epoch": 0.32, "grad_norm": 0.9327254295349121, "learning_rate": 4.8585674527111266e-05, "loss": 0.8712, "step": 2025 }, { "epoch": 0.32, "grad_norm": 1.0295612812042236, "learning_rate": 4.857873831527632e-05, "loss": 0.9188, "step": 2030 }, { "epoch": 0.32, "grad_norm": 3.3071186542510986, "learning_rate": 4.8571785634122766e-05, "loss": 0.8801, "step": 2035 }, { "epoch": 0.33, "grad_norm": 0.9625150561332703, "learning_rate": 4.856481648850694e-05, "loss": 0.8333, "step": 2040 }, { "epoch": 0.33, "grad_norm": 0.6674854159355164, "learning_rate": 4.855783088329664e-05, "loss": 1.0388, "step": 2045 }, { "epoch": 0.33, "grad_norm": 0.5447000861167908, "learning_rate": 4.8550828823371196e-05, "loss": 0.7893, "step": 2050 }, { "epoch": 0.33, "grad_norm": 0.9970148801803589, "learning_rate": 4.854381031362142e-05, "loss": 0.8198, "step": 2055 }, { "epoch": 0.33, "grad_norm": 0.7657136917114258, "learning_rate": 4.853677535894961e-05, "loss": 0.5977, "step": 2060 }, { "epoch": 0.33, "grad_norm": 0.4694065451622009, "learning_rate": 4.852972396426956e-05, "loss": 0.5965, "step": 2065 }, { "epoch": 0.33, "grad_norm": 0.8955700993537903, "learning_rate": 4.852265613450653e-05, "loss": 0.6938, "step": 2070 }, { "epoch": 0.33, "grad_norm": 0.9884099960327148, "learning_rate": 4.851557187459727e-05, "loss": 0.8946, "step": 2075 }, { "epoch": 0.33, "grad_norm": 0.6793637871742249, "learning_rate": 4.850847118949002e-05, "loss": 0.841, "step": 2080 }, { "epoch": 0.33, "grad_norm": 0.7438017725944519, "learning_rate": 4.850135408414447e-05, "loss": 0.8843, "step": 2085 }, { "epoch": 0.33, "grad_norm": 0.7632609009742737, "learning_rate": 4.849422056353178e-05, "loss": 0.8263, "step": 2090 }, { "epoch": 0.33, "grad_norm": 0.7281492352485657, "learning_rate": 4.84870706326346e-05, "loss": 0.8989, "step": 2095 }, { "epoch": 0.34, "grad_norm": 0.6480591893196106, "learning_rate": 4.847990429644702e-05, "loss": 1.0037, "step": 2100 }, { "epoch": 0.34, "eval_loss": 0.7653521299362183, "eval_runtime": 96.4452, "eval_samples_per_second": 7.227, "eval_steps_per_second": 7.227, "step": 2100 }, { "epoch": 0.34, "grad_norm": 0.5578673481941223, "learning_rate": 4.8472721559974584e-05, "loss": 0.911, "step": 2105 }, { "epoch": 0.34, "grad_norm": 0.5615595579147339, "learning_rate": 4.846552242823433e-05, "loss": 0.6938, "step": 2110 }, { "epoch": 0.34, "grad_norm": 0.588246762752533, "learning_rate": 4.845830690625469e-05, "loss": 0.7898, "step": 2115 }, { "epoch": 0.34, "grad_norm": 0.8140611052513123, "learning_rate": 4.8451074999075595e-05, "loss": 0.7702, "step": 2120 }, { "epoch": 0.34, "grad_norm": 0.9400056600570679, "learning_rate": 4.8443826711748385e-05, "loss": 0.7959, "step": 2125 }, { "epoch": 0.34, "grad_norm": 0.7187873721122742, "learning_rate": 4.8436562049335874e-05, "loss": 0.7223, "step": 2130 }, { "epoch": 0.34, "grad_norm": 0.7627830505371094, "learning_rate": 4.8429281016912275e-05, "loss": 0.793, "step": 2135 }, { "epoch": 0.34, "grad_norm": 0.6755004525184631, "learning_rate": 4.842198361956328e-05, "loss": 0.7665, "step": 2140 }, { "epoch": 0.34, "grad_norm": 0.6032254695892334, "learning_rate": 4.8414669862385966e-05, "loss": 0.7952, "step": 2145 }, { "epoch": 0.34, "grad_norm": 0.8377916216850281, "learning_rate": 4.840733975048887e-05, "loss": 1.0016, "step": 2150 }, { "epoch": 0.34, "grad_norm": 0.7361429929733276, "learning_rate": 4.839999328899194e-05, "loss": 0.8773, "step": 2155 }, { "epoch": 0.34, "grad_norm": 0.8006517887115479, "learning_rate": 4.8392630483026546e-05, "loss": 0.9334, "step": 2160 }, { "epoch": 0.35, "grad_norm": 0.9716467261314392, "learning_rate": 4.8385251337735473e-05, "loss": 1.0359, "step": 2165 }, { "epoch": 0.35, "grad_norm": 0.6826418042182922, "learning_rate": 4.8377855858272925e-05, "loss": 0.6841, "step": 2170 }, { "epoch": 0.35, "grad_norm": 0.4519975781440735, "learning_rate": 4.8370444049804494e-05, "loss": 0.8326, "step": 2175 }, { "epoch": 0.35, "grad_norm": 0.677891731262207, "learning_rate": 4.836301591750721e-05, "loss": 1.0841, "step": 2180 }, { "epoch": 0.35, "grad_norm": 1.5161852836608887, "learning_rate": 4.835557146656948e-05, "loss": 0.8701, "step": 2185 }, { "epoch": 0.35, "grad_norm": 0.6586780548095703, "learning_rate": 4.834811070219112e-05, "loss": 0.8261, "step": 2190 }, { "epoch": 0.35, "grad_norm": 0.48046165704727173, "learning_rate": 4.834063362958333e-05, "loss": 0.6375, "step": 2195 }, { "epoch": 0.35, "grad_norm": 1.0315968990325928, "learning_rate": 4.833314025396872e-05, "loss": 0.8768, "step": 2200 }, { "epoch": 0.35, "eval_loss": 0.7641988396644592, "eval_runtime": 96.3923, "eval_samples_per_second": 7.231, "eval_steps_per_second": 7.231, "step": 2200 }, { "epoch": 0.35, "grad_norm": 0.7704123258590698, "learning_rate": 4.8325630580581263e-05, "loss": 0.8849, "step": 2205 }, { "epoch": 0.35, "grad_norm": 1.087425708770752, "learning_rate": 4.831810461466634e-05, "loss": 0.9828, "step": 2210 }, { "epoch": 0.35, "grad_norm": 0.4766077995300293, "learning_rate": 4.83105623614807e-05, "loss": 0.7103, "step": 2215 }, { "epoch": 0.35, "grad_norm": 0.6079148054122925, "learning_rate": 4.830300382629247e-05, "loss": 0.7253, "step": 2220 }, { "epoch": 0.36, "grad_norm": 0.6767585873603821, "learning_rate": 4.829542901438115e-05, "loss": 0.7852, "step": 2225 }, { "epoch": 0.36, "grad_norm": 0.7065784335136414, "learning_rate": 4.8287837931037585e-05, "loss": 0.8047, "step": 2230 }, { "epoch": 0.36, "grad_norm": 0.8305274248123169, "learning_rate": 4.828023058156404e-05, "loss": 0.7912, "step": 2235 }, { "epoch": 0.36, "grad_norm": 0.8435990810394287, "learning_rate": 4.827260697127409e-05, "loss": 0.826, "step": 2240 }, { "epoch": 0.36, "grad_norm": 0.8484389185905457, "learning_rate": 4.8264967105492705e-05, "loss": 0.706, "step": 2245 }, { "epoch": 0.36, "grad_norm": 0.7461299300193787, "learning_rate": 4.825731098955617e-05, "loss": 0.763, "step": 2250 }, { "epoch": 0.36, "grad_norm": 0.7928741574287415, "learning_rate": 4.824963862881216e-05, "loss": 0.8125, "step": 2255 }, { "epoch": 0.36, "grad_norm": 0.7152695059776306, "learning_rate": 4.824195002861968e-05, "loss": 1.129, "step": 2260 }, { "epoch": 0.36, "grad_norm": 0.8594226241111755, "learning_rate": 4.8234245194349056e-05, "loss": 0.8873, "step": 2265 }, { "epoch": 0.36, "grad_norm": 0.9760085940361023, "learning_rate": 4.822652413138199e-05, "loss": 0.9713, "step": 2270 }, { "epoch": 0.36, "grad_norm": 0.7297483682632446, "learning_rate": 4.8218786845111505e-05, "loss": 0.6953, "step": 2275 }, { "epoch": 0.36, "grad_norm": 0.8251492381095886, "learning_rate": 4.8211033340941956e-05, "loss": 0.7649, "step": 2280 }, { "epoch": 0.36, "grad_norm": 0.742917537689209, "learning_rate": 4.820326362428901e-05, "loss": 0.9756, "step": 2285 }, { "epoch": 0.37, "grad_norm": 0.7784115076065063, "learning_rate": 4.819547770057969e-05, "loss": 0.6937, "step": 2290 }, { "epoch": 0.37, "grad_norm": 1.782772183418274, "learning_rate": 4.8187675575252314e-05, "loss": 0.9062, "step": 2295 }, { "epoch": 0.37, "grad_norm": 0.7802585363388062, "learning_rate": 4.8179857253756514e-05, "loss": 0.8052, "step": 2300 }, { "epoch": 0.37, "eval_loss": 0.7618402242660522, "eval_runtime": 96.4079, "eval_samples_per_second": 7.23, "eval_steps_per_second": 7.23, "step": 2300 }, { "epoch": 0.37, "grad_norm": 0.799985945224762, "learning_rate": 4.8172022741553255e-05, "loss": 0.9046, "step": 2305 }, { "epoch": 0.37, "grad_norm": 1.026978850364685, "learning_rate": 4.816417204411481e-05, "loss": 0.7195, "step": 2310 }, { "epoch": 0.37, "grad_norm": 0.8067365884780884, "learning_rate": 4.8156305166924734e-05, "loss": 0.8193, "step": 2315 }, { "epoch": 0.37, "grad_norm": 1.247164249420166, "learning_rate": 4.81484221154779e-05, "loss": 0.6138, "step": 2320 }, { "epoch": 0.37, "grad_norm": 0.8662647604942322, "learning_rate": 4.814052289528047e-05, "loss": 0.7763, "step": 2325 }, { "epoch": 0.37, "grad_norm": 0.9020537734031677, "learning_rate": 4.813260751184992e-05, "loss": 0.9236, "step": 2330 }, { "epoch": 0.37, "grad_norm": 0.6113781929016113, "learning_rate": 4.812467597071499e-05, "loss": 0.8753, "step": 2335 }, { "epoch": 0.37, "grad_norm": 0.6988622546195984, "learning_rate": 4.811672827741572e-05, "loss": 0.6747, "step": 2340 }, { "epoch": 0.37, "grad_norm": 0.9095928072929382, "learning_rate": 4.810876443750344e-05, "loss": 1.0578, "step": 2345 }, { "epoch": 0.38, "grad_norm": 0.643699049949646, "learning_rate": 4.8100784456540724e-05, "loss": 0.8177, "step": 2350 }, { "epoch": 0.38, "grad_norm": 0.7084022760391235, "learning_rate": 4.809278834010146e-05, "loss": 0.9345, "step": 2355 }, { "epoch": 0.38, "grad_norm": 0.5328305959701538, "learning_rate": 4.808477609377078e-05, "loss": 0.6781, "step": 2360 }, { "epoch": 0.38, "grad_norm": 0.8238436579704285, "learning_rate": 4.80767477231451e-05, "loss": 0.7306, "step": 2365 }, { "epoch": 0.38, "grad_norm": 1.0184216499328613, "learning_rate": 4.806870323383208e-05, "loss": 1.0288, "step": 2370 }, { "epoch": 0.38, "grad_norm": 0.8620426654815674, "learning_rate": 4.806064263145066e-05, "loss": 0.7925, "step": 2375 }, { "epoch": 0.38, "grad_norm": 0.6541377305984497, "learning_rate": 4.805256592163102e-05, "loss": 0.8629, "step": 2380 }, { "epoch": 0.38, "grad_norm": 0.8664489984512329, "learning_rate": 4.8044473110014594e-05, "loss": 0.8184, "step": 2385 }, { "epoch": 0.38, "grad_norm": 0.7283564209938049, "learning_rate": 4.803636420225406e-05, "loss": 0.9444, "step": 2390 }, { "epoch": 0.38, "grad_norm": 0.7168800234794617, "learning_rate": 4.802823920401335e-05, "loss": 0.8118, "step": 2395 }, { "epoch": 0.38, "grad_norm": 0.8198531866073608, "learning_rate": 4.802009812096762e-05, "loss": 0.7271, "step": 2400 }, { "epoch": 0.38, "eval_loss": 0.7595117688179016, "eval_runtime": 96.4847, "eval_samples_per_second": 7.224, "eval_steps_per_second": 7.224, "step": 2400 }, { "epoch": 0.38, "grad_norm": 0.5693966150283813, "learning_rate": 4.801194095880327e-05, "loss": 0.7801, "step": 2405 }, { "epoch": 0.38, "grad_norm": 0.7175332307815552, "learning_rate": 4.800376772321793e-05, "loss": 0.7873, "step": 2410 }, { "epoch": 0.39, "grad_norm": 0.7779633402824402, "learning_rate": 4.799557841992046e-05, "loss": 0.894, "step": 2415 }, { "epoch": 0.39, "grad_norm": 0.7832231521606445, "learning_rate": 4.798737305463092e-05, "loss": 0.8035, "step": 2420 }, { "epoch": 0.39, "grad_norm": 0.5115272998809814, "learning_rate": 4.797915163308064e-05, "loss": 0.8885, "step": 2425 }, { "epoch": 0.39, "grad_norm": 0.9534878730773926, "learning_rate": 4.79709141610121e-05, "loss": 0.8175, "step": 2430 }, { "epoch": 0.39, "grad_norm": 0.7053850889205933, "learning_rate": 4.796266064417905e-05, "loss": 0.6971, "step": 2435 }, { "epoch": 0.39, "grad_norm": 1.236257791519165, "learning_rate": 4.795439108834641e-05, "loss": 1.0832, "step": 2440 }, { "epoch": 0.39, "grad_norm": 0.6936543583869934, "learning_rate": 4.794610549929031e-05, "loss": 0.858, "step": 2445 }, { "epoch": 0.39, "grad_norm": 0.8064691424369812, "learning_rate": 4.793780388279809e-05, "loss": 0.6951, "step": 2450 }, { "epoch": 0.39, "grad_norm": 0.7180449962615967, "learning_rate": 4.792948624466827e-05, "loss": 0.6779, "step": 2455 }, { "epoch": 0.39, "grad_norm": 0.6903377175331116, "learning_rate": 4.792115259071058e-05, "loss": 0.8281, "step": 2460 }, { "epoch": 0.39, "grad_norm": 0.9112733006477356, "learning_rate": 4.791280292674591e-05, "loss": 0.938, "step": 2465 }, { "epoch": 0.39, "grad_norm": 0.8657469153404236, "learning_rate": 4.790443725860636e-05, "loss": 0.8063, "step": 2470 }, { "epoch": 0.4, "grad_norm": 0.9260883927345276, "learning_rate": 4.7896055592135194e-05, "loss": 1.0093, "step": 2475 }, { "epoch": 0.4, "grad_norm": 0.7651245594024658, "learning_rate": 4.788765793318685e-05, "loss": 0.6686, "step": 2480 }, { "epoch": 0.4, "grad_norm": 0.6063816547393799, "learning_rate": 4.7879244287626945e-05, "loss": 0.8516, "step": 2485 }, { "epoch": 0.4, "grad_norm": 0.9127621650695801, "learning_rate": 4.787081466133225e-05, "loss": 0.7992, "step": 2490 }, { "epoch": 0.4, "grad_norm": 1.061246633529663, "learning_rate": 4.7862369060190716e-05, "loss": 0.8232, "step": 2495 }, { "epoch": 0.4, "grad_norm": 0.7100695967674255, "learning_rate": 4.785390749010143e-05, "loss": 0.9615, "step": 2500 }, { "epoch": 0.4, "eval_loss": 0.7581596970558167, "eval_runtime": 96.5797, "eval_samples_per_second": 7.217, "eval_steps_per_second": 7.217, "step": 2500 }, { "epoch": 0.4, "grad_norm": 16.361513137817383, "learning_rate": 4.784542995697464e-05, "loss": 0.7725, "step": 2505 }, { "epoch": 0.4, "grad_norm": 0.7746205925941467, "learning_rate": 4.7836936466731764e-05, "loss": 0.8464, "step": 2510 }, { "epoch": 0.4, "grad_norm": 0.7703484892845154, "learning_rate": 4.7828427025305345e-05, "loss": 0.8596, "step": 2515 }, { "epoch": 0.4, "grad_norm": 0.7838412523269653, "learning_rate": 4.7819901638639066e-05, "loss": 0.666, "step": 2520 }, { "epoch": 0.4, "grad_norm": 0.5832842588424683, "learning_rate": 4.781136031268776e-05, "loss": 0.4995, "step": 2525 }, { "epoch": 0.4, "grad_norm": 0.798271894454956, "learning_rate": 4.780280305341739e-05, "loss": 1.0017, "step": 2530 }, { "epoch": 0.4, "grad_norm": 0.463828444480896, "learning_rate": 4.779422986680503e-05, "loss": 0.5894, "step": 2535 }, { "epoch": 0.41, "grad_norm": 0.761908233165741, "learning_rate": 4.7785640758838916e-05, "loss": 0.9198, "step": 2540 }, { "epoch": 0.41, "grad_norm": 0.8427887558937073, "learning_rate": 4.777703573551837e-05, "loss": 0.8572, "step": 2545 }, { "epoch": 0.41, "grad_norm": 0.6188894510269165, "learning_rate": 4.776841480285384e-05, "loss": 0.9102, "step": 2550 }, { "epoch": 0.41, "grad_norm": 0.7198623418807983, "learning_rate": 4.775977796686691e-05, "loss": 0.8472, "step": 2555 }, { "epoch": 0.41, "grad_norm": 1.0144587755203247, "learning_rate": 4.775112523359023e-05, "loss": 0.7059, "step": 2560 }, { "epoch": 0.41, "grad_norm": 0.9784219861030579, "learning_rate": 4.77424566090676e-05, "loss": 0.7417, "step": 2565 }, { "epoch": 0.41, "grad_norm": 0.5349156856536865, "learning_rate": 4.773377209935387e-05, "loss": 0.7287, "step": 2570 }, { "epoch": 0.41, "grad_norm": 0.7715370655059814, "learning_rate": 4.772507171051502e-05, "loss": 0.8393, "step": 2575 }, { "epoch": 0.41, "grad_norm": 0.8483054637908936, "learning_rate": 4.771635544862813e-05, "loss": 0.8938, "step": 2580 }, { "epoch": 0.41, "grad_norm": 0.8196272253990173, "learning_rate": 4.770762331978132e-05, "loss": 0.8321, "step": 2585 }, { "epoch": 0.41, "grad_norm": 0.6155353784561157, "learning_rate": 4.769887533007384e-05, "loss": 0.9291, "step": 2590 }, { "epoch": 0.41, "grad_norm": 0.8897277116775513, "learning_rate": 4.769011148561601e-05, "loss": 0.7098, "step": 2595 }, { "epoch": 0.42, "grad_norm": 1.2256160974502563, "learning_rate": 4.768133179252921e-05, "loss": 0.8284, "step": 2600 }, { "epoch": 0.42, "eval_loss": 0.7554901838302612, "eval_runtime": 96.5279, "eval_samples_per_second": 7.221, "eval_steps_per_second": 7.221, "step": 2600 }, { "epoch": 0.42, "grad_norm": 0.6943432688713074, "learning_rate": 4.767253625694588e-05, "loss": 0.8785, "step": 2605 }, { "epoch": 0.42, "grad_norm": 0.6707726120948792, "learning_rate": 4.7663724885009556e-05, "loss": 0.7949, "step": 2610 }, { "epoch": 0.42, "grad_norm": 0.5595915913581848, "learning_rate": 4.765489768287481e-05, "loss": 0.8796, "step": 2615 }, { "epoch": 0.42, "grad_norm": 0.9889727234840393, "learning_rate": 4.7646054656707306e-05, "loss": 1.0676, "step": 2620 }, { "epoch": 0.42, "grad_norm": 0.8624396324157715, "learning_rate": 4.763719581268371e-05, "loss": 0.709, "step": 2625 }, { "epoch": 0.42, "grad_norm": 0.7466241121292114, "learning_rate": 4.7628321156991767e-05, "loss": 0.8084, "step": 2630 }, { "epoch": 0.42, "grad_norm": 0.6439360976219177, "learning_rate": 4.761943069583027e-05, "loss": 0.8831, "step": 2635 }, { "epoch": 0.42, "grad_norm": 0.9999917149543762, "learning_rate": 4.761052443540904e-05, "loss": 0.6372, "step": 2640 }, { "epoch": 0.42, "grad_norm": 0.688369870185852, "learning_rate": 4.760160238194894e-05, "loss": 0.7938, "step": 2645 }, { "epoch": 0.42, "grad_norm": 0.6920734643936157, "learning_rate": 4.759266454168186e-05, "loss": 0.7378, "step": 2650 }, { "epoch": 0.42, "grad_norm": 0.7592100501060486, "learning_rate": 4.758371092085073e-05, "loss": 1.097, "step": 2655 }, { "epoch": 0.42, "grad_norm": 0.9243403077125549, "learning_rate": 4.757474152570946e-05, "loss": 1.0404, "step": 2660 }, { "epoch": 0.43, "grad_norm": 0.8212980031967163, "learning_rate": 4.756575636252304e-05, "loss": 0.6179, "step": 2665 }, { "epoch": 0.43, "grad_norm": 0.6905696392059326, "learning_rate": 4.755675543756744e-05, "loss": 0.8398, "step": 2670 }, { "epoch": 0.43, "grad_norm": 0.8420882821083069, "learning_rate": 4.754773875712961e-05, "loss": 0.7552, "step": 2675 }, { "epoch": 0.43, "grad_norm": 0.6216087341308594, "learning_rate": 4.7538706327507575e-05, "loss": 0.8345, "step": 2680 }, { "epoch": 0.43, "grad_norm": 0.7430551648139954, "learning_rate": 4.75296581550103e-05, "loss": 0.8277, "step": 2685 }, { "epoch": 0.43, "grad_norm": 0.7866222262382507, "learning_rate": 4.752059424595778e-05, "loss": 0.9178, "step": 2690 }, { "epoch": 0.43, "grad_norm": 0.6548468470573425, "learning_rate": 4.7511514606680985e-05, "loss": 0.745, "step": 2695 }, { "epoch": 0.43, "grad_norm": 0.6956586837768555, "learning_rate": 4.750241924352187e-05, "loss": 0.8631, "step": 2700 }, { "epoch": 0.43, "eval_loss": 0.7539612650871277, "eval_runtime": 96.4433, "eval_samples_per_second": 7.227, "eval_steps_per_second": 7.227, "step": 2700 }, { "epoch": 0.43, "grad_norm": 0.6508235335350037, "learning_rate": 4.7493308162833394e-05, "loss": 0.9936, "step": 2705 }, { "epoch": 0.43, "grad_norm": 0.8658422827720642, "learning_rate": 4.7484181370979475e-05, "loss": 0.8, "step": 2710 }, { "epoch": 0.43, "grad_norm": 0.9571516513824463, "learning_rate": 4.747503887433501e-05, "loss": 0.7028, "step": 2715 }, { "epoch": 0.43, "grad_norm": 0.7693742513656616, "learning_rate": 4.7465880679285866e-05, "loss": 0.7194, "step": 2720 }, { "epoch": 0.43, "grad_norm": 1.34340238571167, "learning_rate": 4.745670679222888e-05, "loss": 1.0445, "step": 2725 }, { "epoch": 0.44, "grad_norm": 2.71327805519104, "learning_rate": 4.7447517219571834e-05, "loss": 0.8088, "step": 2730 }, { "epoch": 0.44, "grad_norm": 0.9449920058250427, "learning_rate": 4.743831196773349e-05, "loss": 0.7939, "step": 2735 }, { "epoch": 0.44, "grad_norm": 0.8091790676116943, "learning_rate": 4.742909104314353e-05, "loss": 0.7816, "step": 2740 }, { "epoch": 0.44, "grad_norm": 0.5790795087814331, "learning_rate": 4.741985445224263e-05, "loss": 0.8778, "step": 2745 }, { "epoch": 0.44, "grad_norm": 1.1936956644058228, "learning_rate": 4.741060220148236e-05, "loss": 1.0242, "step": 2750 }, { "epoch": 0.44, "grad_norm": 0.5158389806747437, "learning_rate": 4.7401334297325244e-05, "loss": 0.7954, "step": 2755 }, { "epoch": 0.44, "grad_norm": 0.8950900435447693, "learning_rate": 4.7392050746244754e-05, "loss": 0.7603, "step": 2760 }, { "epoch": 0.44, "grad_norm": 0.7289401888847351, "learning_rate": 4.738275155472528e-05, "loss": 0.879, "step": 2765 }, { "epoch": 0.44, "grad_norm": 0.8410510420799255, "learning_rate": 4.7373436729262145e-05, "loss": 0.7399, "step": 2770 }, { "epoch": 0.44, "grad_norm": 0.7992503643035889, "learning_rate": 4.736410627636156e-05, "loss": 0.6779, "step": 2775 }, { "epoch": 0.44, "grad_norm": 0.6706194281578064, "learning_rate": 4.73547602025407e-05, "loss": 0.7878, "step": 2780 }, { "epoch": 0.44, "grad_norm": 0.7177903652191162, "learning_rate": 4.734539851432763e-05, "loss": 0.6958, "step": 2785 }, { "epoch": 0.45, "grad_norm": 0.6557692885398865, "learning_rate": 4.73360212182613e-05, "loss": 0.6695, "step": 2790 }, { "epoch": 0.45, "grad_norm": 0.6754157543182373, "learning_rate": 4.7326628320891586e-05, "loss": 0.9057, "step": 2795 }, { "epoch": 0.45, "grad_norm": 1.1403777599334717, "learning_rate": 4.731721982877926e-05, "loss": 1.0507, "step": 2800 }, { "epoch": 0.45, "eval_loss": 0.7518497705459595, "eval_runtime": 96.4525, "eval_samples_per_second": 7.226, "eval_steps_per_second": 7.226, "step": 2800 }, { "epoch": 0.45, "grad_norm": 0.8268899321556091, "learning_rate": 4.730779574849598e-05, "loss": 0.7375, "step": 2805 }, { "epoch": 0.45, "grad_norm": 0.5358712673187256, "learning_rate": 4.72983560866243e-05, "loss": 0.7839, "step": 2810 }, { "epoch": 0.45, "grad_norm": 1.0761948823928833, "learning_rate": 4.7288900849757636e-05, "loss": 0.7936, "step": 2815 }, { "epoch": 0.45, "grad_norm": 0.7037429213523865, "learning_rate": 4.7279430044500315e-05, "loss": 0.6875, "step": 2820 }, { "epoch": 0.45, "grad_norm": 0.6378889679908752, "learning_rate": 4.726994367746751e-05, "loss": 0.9209, "step": 2825 }, { "epoch": 0.45, "grad_norm": 0.5508277416229248, "learning_rate": 4.7260441755285284e-05, "loss": 0.9402, "step": 2830 }, { "epoch": 0.45, "grad_norm": 0.9046247005462646, "learning_rate": 4.725092428459055e-05, "loss": 0.6336, "step": 2835 }, { "epoch": 0.45, "grad_norm": 0.8689594864845276, "learning_rate": 4.7241391272031096e-05, "loss": 1.1281, "step": 2840 }, { "epoch": 0.45, "grad_norm": 0.8785949945449829, "learning_rate": 4.723184272426555e-05, "loss": 0.711, "step": 2845 }, { "epoch": 0.45, "grad_norm": 0.9959015250205994, "learning_rate": 4.722227864796339e-05, "loss": 0.7432, "step": 2850 }, { "epoch": 0.46, "grad_norm": 0.6438590884208679, "learning_rate": 4.721269904980497e-05, "loss": 0.883, "step": 2855 }, { "epoch": 0.46, "grad_norm": 0.6714455485343933, "learning_rate": 4.720310393648145e-05, "loss": 1.065, "step": 2860 }, { "epoch": 0.46, "grad_norm": 0.7378780245780945, "learning_rate": 4.7193493314694846e-05, "loss": 0.5352, "step": 2865 }, { "epoch": 0.46, "grad_norm": 0.7698020935058594, "learning_rate": 4.7183867191158006e-05, "loss": 0.7016, "step": 2870 }, { "epoch": 0.46, "grad_norm": 0.952795684337616, "learning_rate": 4.7174225572594586e-05, "loss": 1.0659, "step": 2875 }, { "epoch": 0.46, "grad_norm": 0.6401458978652954, "learning_rate": 4.71645684657391e-05, "loss": 0.7335, "step": 2880 }, { "epoch": 0.46, "grad_norm": 0.8375076055526733, "learning_rate": 4.715489587733685e-05, "loss": 0.9264, "step": 2885 }, { "epoch": 0.46, "grad_norm": 0.693505048751831, "learning_rate": 4.714520781414397e-05, "loss": 1.0286, "step": 2890 }, { "epoch": 0.46, "grad_norm": 1.0239859819412231, "learning_rate": 4.7135504282927375e-05, "loss": 0.6875, "step": 2895 }, { "epoch": 0.46, "grad_norm": 0.602035403251648, "learning_rate": 4.712578529046483e-05, "loss": 0.8247, "step": 2900 }, { "epoch": 0.46, "eval_loss": 0.7512397766113281, "eval_runtime": 96.4745, "eval_samples_per_second": 7.225, "eval_steps_per_second": 7.225, "step": 2900 }, { "epoch": 0.46, "grad_norm": 0.6859713196754456, "learning_rate": 4.711605084354487e-05, "loss": 0.7521, "step": 2905 }, { "epoch": 0.46, "grad_norm": 0.7126486301422119, "learning_rate": 4.7106300948966817e-05, "loss": 0.7656, "step": 2910 }, { "epoch": 0.47, "grad_norm": 0.4363511800765991, "learning_rate": 4.70965356135408e-05, "loss": 1.1595, "step": 2915 }, { "epoch": 0.47, "grad_norm": 0.6381859183311462, "learning_rate": 4.7086754844087724e-05, "loss": 0.6949, "step": 2920 }, { "epoch": 0.47, "grad_norm": 0.7931796312332153, "learning_rate": 4.7076958647439284e-05, "loss": 1.0821, "step": 2925 }, { "epoch": 0.47, "grad_norm": 0.9333865642547607, "learning_rate": 4.706714703043795e-05, "loss": 0.7753, "step": 2930 }, { "epoch": 0.47, "grad_norm": 0.8860915899276733, "learning_rate": 4.705731999993694e-05, "loss": 0.7257, "step": 2935 }, { "epoch": 0.47, "grad_norm": 0.6868377327919006, "learning_rate": 4.704747756280027e-05, "loss": 0.8148, "step": 2940 }, { "epoch": 0.47, "grad_norm": 0.5337914228439331, "learning_rate": 4.7037619725902706e-05, "loss": 0.7379, "step": 2945 }, { "epoch": 0.47, "grad_norm": 0.4664730429649353, "learning_rate": 4.7027746496129745e-05, "loss": 0.6226, "step": 2950 }, { "epoch": 0.47, "grad_norm": 0.7305762767791748, "learning_rate": 4.701785788037768e-05, "loss": 0.9018, "step": 2955 }, { "epoch": 0.47, "grad_norm": 0.6576158404350281, "learning_rate": 4.7007953885553525e-05, "loss": 0.7777, "step": 2960 }, { "epoch": 0.47, "grad_norm": 0.9728206396102905, "learning_rate": 4.699803451857503e-05, "loss": 0.8004, "step": 2965 }, { "epoch": 0.47, "grad_norm": 0.6211077570915222, "learning_rate": 4.69880997863707e-05, "loss": 0.7407, "step": 2970 }, { "epoch": 0.47, "grad_norm": 1.2564159631729126, "learning_rate": 4.697814969587976e-05, "loss": 0.7993, "step": 2975 }, { "epoch": 0.48, "grad_norm": 0.927930474281311, "learning_rate": 4.696818425405217e-05, "loss": 0.8803, "step": 2980 }, { "epoch": 0.48, "grad_norm": 0.9062425494194031, "learning_rate": 4.695820346784861e-05, "loss": 0.8835, "step": 2985 }, { "epoch": 0.48, "grad_norm": 0.6738875508308411, "learning_rate": 4.694820734424047e-05, "loss": 0.7817, "step": 2990 }, { "epoch": 0.48, "grad_norm": 1.326353669166565, "learning_rate": 4.6938195890209866e-05, "loss": 0.9213, "step": 2995 }, { "epoch": 0.48, "grad_norm": 0.4853856563568115, "learning_rate": 4.692816911274962e-05, "loss": 0.9835, "step": 3000 }, { "epoch": 0.48, "eval_loss": 0.7496011257171631, "eval_runtime": 96.515, "eval_samples_per_second": 7.222, "eval_steps_per_second": 7.222, "step": 3000 }, { "epoch": 0.48, "grad_norm": 0.5411309003829956, "learning_rate": 4.691812701886324e-05, "loss": 0.7556, "step": 3005 }, { "epoch": 0.48, "grad_norm": 0.7545793652534485, "learning_rate": 4.6908069615564966e-05, "loss": 0.8295, "step": 3010 }, { "epoch": 0.48, "grad_norm": 0.850104808807373, "learning_rate": 4.6897996909879695e-05, "loss": 1.0194, "step": 3015 }, { "epoch": 0.48, "grad_norm": 0.69708651304245, "learning_rate": 4.6887908908843026e-05, "loss": 0.7918, "step": 3020 }, { "epoch": 0.48, "grad_norm": 2.1333253383636475, "learning_rate": 4.687780561950126e-05, "loss": 0.7287, "step": 3025 }, { "epoch": 0.48, "grad_norm": 0.9223487973213196, "learning_rate": 4.686768704891134e-05, "loss": 0.9592, "step": 3030 }, { "epoch": 0.48, "grad_norm": 0.7700949311256409, "learning_rate": 4.685755320414091e-05, "loss": 0.8572, "step": 3035 }, { "epoch": 0.49, "grad_norm": 0.5573208332061768, "learning_rate": 4.684740409226829e-05, "loss": 0.9441, "step": 3040 }, { "epoch": 0.49, "grad_norm": 0.6346720457077026, "learning_rate": 4.6837239720382426e-05, "loss": 0.8398, "step": 3045 }, { "epoch": 0.49, "grad_norm": 0.8065741062164307, "learning_rate": 4.682706009558297e-05, "loss": 0.9325, "step": 3050 }, { "epoch": 0.49, "grad_norm": 0.3001660406589508, "learning_rate": 4.681686522498018e-05, "loss": 0.8997, "step": 3055 }, { "epoch": 0.49, "grad_norm": 0.860211968421936, "learning_rate": 4.680665511569501e-05, "loss": 0.6883, "step": 3060 }, { "epoch": 0.49, "grad_norm": 0.722518265247345, "learning_rate": 4.6796429774859015e-05, "loss": 0.8607, "step": 3065 }, { "epoch": 0.49, "grad_norm": 0.6525880694389343, "learning_rate": 4.678618920961442e-05, "loss": 0.9256, "step": 3070 }, { "epoch": 0.49, "grad_norm": 0.7581719756126404, "learning_rate": 4.6775933427114084e-05, "loss": 0.662, "step": 3075 }, { "epoch": 0.49, "grad_norm": 0.6604760885238647, "learning_rate": 4.676566243452146e-05, "loss": 0.734, "step": 3080 }, { "epoch": 0.49, "grad_norm": 0.7573785781860352, "learning_rate": 4.6755376239010665e-05, "loss": 0.7113, "step": 3085 }, { "epoch": 0.49, "grad_norm": 0.8933848738670349, "learning_rate": 4.674507484776641e-05, "loss": 0.8523, "step": 3090 }, { "epoch": 0.49, "grad_norm": 0.5941946506500244, "learning_rate": 4.6734758267984044e-05, "loss": 0.7907, "step": 3095 }, { "epoch": 0.49, "grad_norm": 0.7756261825561523, "learning_rate": 4.672442650686949e-05, "loss": 0.8407, "step": 3100 }, { "epoch": 0.49, "eval_loss": 0.7495761513710022, "eval_runtime": 96.4482, "eval_samples_per_second": 7.227, "eval_steps_per_second": 7.227, "step": 3100 }, { "epoch": 0.5, "grad_norm": 0.6407367587089539, "learning_rate": 4.671407957163931e-05, "loss": 0.6413, "step": 3105 }, { "epoch": 0.5, "grad_norm": 1.069754719734192, "learning_rate": 4.670371746952063e-05, "loss": 0.8934, "step": 3110 }, { "epoch": 0.5, "grad_norm": 0.9698624610900879, "learning_rate": 4.669334020775122e-05, "loss": 0.7261, "step": 3115 }, { "epoch": 0.5, "grad_norm": 0.6487118005752563, "learning_rate": 4.668294779357938e-05, "loss": 0.8951, "step": 3120 }, { "epoch": 0.5, "grad_norm": 1.0640240907669067, "learning_rate": 4.667254023426404e-05, "loss": 0.8568, "step": 3125 }, { "epoch": 0.5, "grad_norm": 0.5772892236709595, "learning_rate": 4.666211753707468e-05, "loss": 0.9798, "step": 3130 }, { "epoch": 0.5, "grad_norm": 0.6915898323059082, "learning_rate": 4.665167970929137e-05, "loss": 0.8694, "step": 3135 }, { "epoch": 0.5, "grad_norm": 0.5959879159927368, "learning_rate": 4.664122675820474e-05, "loss": 0.6521, "step": 3140 }, { "epoch": 0.5, "grad_norm": 0.833991289138794, "learning_rate": 4.663075869111597e-05, "loss": 0.9194, "step": 3145 }, { "epoch": 0.5, "grad_norm": 0.9575549960136414, "learning_rate": 4.662027551533685e-05, "loss": 1.0088, "step": 3150 }, { "epoch": 0.5, "grad_norm": 0.5501818656921387, "learning_rate": 4.660977723818965e-05, "loss": 0.5997, "step": 3155 }, { "epoch": 0.5, "grad_norm": 0.6001989245414734, "learning_rate": 4.659926386700725e-05, "loss": 0.7643, "step": 3160 }, { "epoch": 0.51, "grad_norm": 0.6806654930114746, "learning_rate": 4.658873540913303e-05, "loss": 0.899, "step": 3165 }, { "epoch": 0.51, "grad_norm": 0.7098959684371948, "learning_rate": 4.657819187192094e-05, "loss": 1.0281, "step": 3170 }, { "epoch": 0.51, "grad_norm": 0.9234817028045654, "learning_rate": 4.6567633262735446e-05, "loss": 0.9495, "step": 3175 }, { "epoch": 0.51, "grad_norm": 0.596527636051178, "learning_rate": 4.655705958895153e-05, "loss": 0.6352, "step": 3180 }, { "epoch": 0.51, "grad_norm": 1.154539704322815, "learning_rate": 4.6546470857954736e-05, "loss": 0.8939, "step": 3185 }, { "epoch": 0.51, "grad_norm": 0.7502239942550659, "learning_rate": 4.653586707714108e-05, "loss": 0.692, "step": 3190 }, { "epoch": 0.51, "grad_norm": 0.7868794202804565, "learning_rate": 4.652524825391711e-05, "loss": 0.908, "step": 3195 }, { "epoch": 0.51, "grad_norm": 0.6436206102371216, "learning_rate": 4.6514614395699886e-05, "loss": 0.7417, "step": 3200 }, { "epoch": 0.51, "eval_loss": 0.7466740012168884, "eval_runtime": 96.4309, "eval_samples_per_second": 7.228, "eval_steps_per_second": 7.228, "step": 3200 }, { "epoch": 0.51, "grad_norm": 0.8566870093345642, "learning_rate": 4.6503965509916956e-05, "loss": 0.8041, "step": 3205 }, { "epoch": 0.51, "grad_norm": 0.5982272028923035, "learning_rate": 4.649330160400639e-05, "loss": 0.4528, "step": 3210 }, { "epoch": 0.51, "grad_norm": 0.6511960029602051, "learning_rate": 4.648262268541671e-05, "loss": 0.877, "step": 3215 }, { "epoch": 0.51, "grad_norm": 0.8476071357727051, "learning_rate": 4.6471928761606965e-05, "loss": 0.7145, "step": 3220 }, { "epoch": 0.51, "grad_norm": 1.0408881902694702, "learning_rate": 4.6461219840046654e-05, "loss": 0.5539, "step": 3225 }, { "epoch": 0.52, "grad_norm": 0.7445903420448303, "learning_rate": 4.645049592821577e-05, "loss": 0.8306, "step": 3230 }, { "epoch": 0.52, "grad_norm": 0.9672279357910156, "learning_rate": 4.6439757033604756e-05, "loss": 0.8645, "step": 3235 }, { "epoch": 0.52, "grad_norm": 0.7082134485244751, "learning_rate": 4.6429003163714556e-05, "loss": 0.8188, "step": 3240 }, { "epoch": 0.52, "grad_norm": 0.8803107142448425, "learning_rate": 4.641823432605654e-05, "loss": 0.7956, "step": 3245 }, { "epoch": 0.52, "grad_norm": 0.7926101088523865, "learning_rate": 4.640745052815254e-05, "loss": 0.715, "step": 3250 }, { "epoch": 0.52, "grad_norm": 0.890519380569458, "learning_rate": 4.639665177753485e-05, "loss": 0.8825, "step": 3255 }, { "epoch": 0.52, "grad_norm": 0.9909971952438354, "learning_rate": 4.638583808174619e-05, "loss": 0.7843, "step": 3260 }, { "epoch": 0.52, "grad_norm": 0.7450726628303528, "learning_rate": 4.6375009448339743e-05, "loss": 0.9714, "step": 3265 }, { "epoch": 0.52, "grad_norm": 0.8220781683921814, "learning_rate": 4.636416588487911e-05, "loss": 0.8467, "step": 3270 }, { "epoch": 0.52, "grad_norm": 1.025499701499939, "learning_rate": 4.63533073989383e-05, "loss": 0.9301, "step": 3275 }, { "epoch": 0.52, "grad_norm": 0.8067827820777893, "learning_rate": 4.634243399810181e-05, "loss": 0.7078, "step": 3280 }, { "epoch": 0.52, "grad_norm": 0.8833619952201843, "learning_rate": 4.6331545689964475e-05, "loss": 0.699, "step": 3285 }, { "epoch": 0.53, "grad_norm": 1.0600448846817017, "learning_rate": 4.632064248213159e-05, "loss": 0.7849, "step": 3290 }, { "epoch": 0.53, "grad_norm": 1.0503095388412476, "learning_rate": 4.630972438221885e-05, "loss": 0.6215, "step": 3295 }, { "epoch": 0.53, "grad_norm": 0.5159885287284851, "learning_rate": 4.629879139785235e-05, "loss": 0.7449, "step": 3300 }, { "epoch": 0.53, "eval_loss": 0.7472941279411316, "eval_runtime": 96.4994, "eval_samples_per_second": 7.223, "eval_steps_per_second": 7.223, "step": 3300 }, { "epoch": 0.53, "grad_norm": 1.072464108467102, "learning_rate": 4.6287843536668575e-05, "loss": 0.8511, "step": 3305 }, { "epoch": 0.53, "grad_norm": 0.9016098976135254, "learning_rate": 4.62768808063144e-05, "loss": 0.7373, "step": 3310 }, { "epoch": 0.53, "grad_norm": 1.0161947011947632, "learning_rate": 4.626590321444712e-05, "loss": 0.9035, "step": 3315 }, { "epoch": 0.53, "grad_norm": 0.7459146976470947, "learning_rate": 4.625491076873435e-05, "loss": 0.6468, "step": 3320 }, { "epoch": 0.53, "grad_norm": 0.950080394744873, "learning_rate": 4.624390347685413e-05, "loss": 0.7211, "step": 3325 }, { "epoch": 0.53, "grad_norm": 0.7308927774429321, "learning_rate": 4.623288134649485e-05, "loss": 0.9238, "step": 3330 }, { "epoch": 0.53, "grad_norm": 0.7227129340171814, "learning_rate": 4.622184438535527e-05, "loss": 0.9773, "step": 3335 }, { "epoch": 0.53, "grad_norm": 0.7054020166397095, "learning_rate": 4.62107926011445e-05, "loss": 0.7783, "step": 3340 }, { "epoch": 0.53, "grad_norm": 0.6535981297492981, "learning_rate": 4.619972600158201e-05, "loss": 0.6559, "step": 3345 }, { "epoch": 0.53, "grad_norm": 0.7245693206787109, "learning_rate": 4.618864459439762e-05, "loss": 0.8352, "step": 3350 }, { "epoch": 0.54, "grad_norm": 0.9683626890182495, "learning_rate": 4.6177548387331485e-05, "loss": 0.9397, "step": 3355 }, { "epoch": 0.54, "grad_norm": 1.1847660541534424, "learning_rate": 4.616643738813411e-05, "loss": 0.7383, "step": 3360 }, { "epoch": 0.54, "grad_norm": 0.8566804528236389, "learning_rate": 4.615531160456633e-05, "loss": 0.8066, "step": 3365 }, { "epoch": 0.54, "grad_norm": 0.7312522530555725, "learning_rate": 4.61441710443993e-05, "loss": 0.7974, "step": 3370 }, { "epoch": 0.54, "grad_norm": 0.6620572209358215, "learning_rate": 4.6133015715414484e-05, "loss": 0.9136, "step": 3375 }, { "epoch": 0.54, "grad_norm": 0.5405072569847107, "learning_rate": 4.612184562540369e-05, "loss": 0.6921, "step": 3380 }, { "epoch": 0.54, "grad_norm": 0.7474086284637451, "learning_rate": 4.611066078216901e-05, "loss": 0.8463, "step": 3385 }, { "epoch": 0.54, "grad_norm": 0.9125152230262756, "learning_rate": 4.609946119352287e-05, "loss": 0.8508, "step": 3390 }, { "epoch": 0.54, "grad_norm": 0.9998400211334229, "learning_rate": 4.608824686728797e-05, "loss": 0.8735, "step": 3395 }, { "epoch": 0.54, "grad_norm": 0.5990025401115417, "learning_rate": 4.6077017811297304e-05, "loss": 0.8562, "step": 3400 }, { "epoch": 0.54, "eval_loss": 0.743736743927002, "eval_runtime": 96.3748, "eval_samples_per_second": 7.232, "eval_steps_per_second": 7.232, "step": 3400 }, { "epoch": 0.54, "grad_norm": 0.35676899552345276, "learning_rate": 4.606577403339418e-05, "loss": 0.8914, "step": 3405 }, { "epoch": 0.54, "grad_norm": 0.772233784198761, "learning_rate": 4.605451554143216e-05, "loss": 0.779, "step": 3410 }, { "epoch": 0.55, "grad_norm": 0.7336989641189575, "learning_rate": 4.604324234327509e-05, "loss": 0.7678, "step": 3415 }, { "epoch": 0.55, "grad_norm": 0.7039794325828552, "learning_rate": 4.603195444679711e-05, "loss": 0.8783, "step": 3420 }, { "epoch": 0.55, "grad_norm": 0.6955629587173462, "learning_rate": 4.602065185988259e-05, "loss": 0.818, "step": 3425 }, { "epoch": 0.55, "grad_norm": 0.7369412779808044, "learning_rate": 4.60093345904262e-05, "loss": 0.6942, "step": 3430 }, { "epoch": 0.55, "grad_norm": 0.6824669241905212, "learning_rate": 4.5998002646332835e-05, "loss": 0.9274, "step": 3435 }, { "epoch": 0.55, "grad_norm": 1.355720043182373, "learning_rate": 4.598665603551765e-05, "loss": 0.7219, "step": 3440 }, { "epoch": 0.55, "grad_norm": 0.8629677295684814, "learning_rate": 4.597529476590605e-05, "loss": 0.8023, "step": 3445 }, { "epoch": 0.55, "grad_norm": 2.2956135272979736, "learning_rate": 4.596391884543368e-05, "loss": 0.9574, "step": 3450 }, { "epoch": 0.55, "grad_norm": 1.2683722972869873, "learning_rate": 4.59525282820464e-05, "loss": 0.6996, "step": 3455 }, { "epoch": 0.55, "grad_norm": 0.7317371368408203, "learning_rate": 4.594112308370032e-05, "loss": 1.03, "step": 3460 }, { "epoch": 0.55, "grad_norm": 1.0310641527175903, "learning_rate": 4.5929703258361756e-05, "loss": 0.6917, "step": 3465 }, { "epoch": 0.55, "grad_norm": 0.9479489326477051, "learning_rate": 4.591826881400726e-05, "loss": 0.9939, "step": 3470 }, { "epoch": 0.55, "grad_norm": 0.9485552310943604, "learning_rate": 4.5906819758623576e-05, "loss": 1.0317, "step": 3475 }, { "epoch": 0.56, "grad_norm": 0.724987268447876, "learning_rate": 4.589535610020765e-05, "loss": 0.6915, "step": 3480 }, { "epoch": 0.56, "grad_norm": 0.7091718316078186, "learning_rate": 4.5883877846766654e-05, "loss": 0.8673, "step": 3485 }, { "epoch": 0.56, "grad_norm": 0.8297457098960876, "learning_rate": 4.587238500631793e-05, "loss": 0.8114, "step": 3490 }, { "epoch": 0.56, "grad_norm": 0.7213541269302368, "learning_rate": 4.586087758688903e-05, "loss": 0.863, "step": 3495 }, { "epoch": 0.56, "grad_norm": 1.1096009016036987, "learning_rate": 4.584935559651765e-05, "loss": 0.9222, "step": 3500 }, { "epoch": 0.56, "eval_loss": 0.7428527474403381, "eval_runtime": 96.3993, "eval_samples_per_second": 7.23, "eval_steps_per_second": 7.23, "step": 3500 }, { "epoch": 0.56, "grad_norm": 0.5834380984306335, "learning_rate": 4.583781904325172e-05, "loss": 0.6609, "step": 3505 }, { "epoch": 0.56, "grad_norm": 0.5797068476676941, "learning_rate": 4.5826267935149285e-05, "loss": 0.7933, "step": 3510 }, { "epoch": 0.56, "grad_norm": 0.6027450561523438, "learning_rate": 4.581470228027861e-05, "loss": 0.7841, "step": 3515 }, { "epoch": 0.56, "grad_norm": 0.5674509406089783, "learning_rate": 4.5803122086718077e-05, "loss": 0.7721, "step": 3520 }, { "epoch": 0.56, "grad_norm": 0.7398461103439331, "learning_rate": 4.5791527362556235e-05, "loss": 0.7651, "step": 3525 }, { "epoch": 0.56, "grad_norm": 0.6617181301116943, "learning_rate": 4.577991811589181e-05, "loss": 0.9359, "step": 3530 }, { "epoch": 0.56, "grad_norm": 0.49279505014419556, "learning_rate": 4.576829435483362e-05, "loss": 0.6278, "step": 3535 }, { "epoch": 0.57, "grad_norm": 0.5201964378356934, "learning_rate": 4.575665608750067e-05, "loss": 0.853, "step": 3540 }, { "epoch": 0.57, "grad_norm": 0.7188725471496582, "learning_rate": 4.5745003322022084e-05, "loss": 0.8338, "step": 3545 }, { "epoch": 0.57, "grad_norm": 1.0798031091690063, "learning_rate": 4.573333606653708e-05, "loss": 0.9776, "step": 3550 }, { "epoch": 0.57, "grad_norm": 0.6439509987831116, "learning_rate": 4.5721654329195046e-05, "loss": 0.9331, "step": 3555 }, { "epoch": 0.57, "grad_norm": 0.7663920521736145, "learning_rate": 4.570995811815545e-05, "loss": 1.0533, "step": 3560 }, { "epoch": 0.57, "grad_norm": 0.7230969071388245, "learning_rate": 4.569824744158789e-05, "loss": 0.6966, "step": 3565 }, { "epoch": 0.57, "grad_norm": 1.016112208366394, "learning_rate": 4.568652230767205e-05, "loss": 0.8393, "step": 3570 }, { "epoch": 0.57, "grad_norm": 1.0165222883224487, "learning_rate": 4.567478272459773e-05, "loss": 1.0218, "step": 3575 }, { "epoch": 0.57, "grad_norm": 0.709685742855072, "learning_rate": 4.5663028700564826e-05, "loss": 0.7273, "step": 3580 }, { "epoch": 0.57, "grad_norm": 0.5664321780204773, "learning_rate": 4.565126024378328e-05, "loss": 0.9079, "step": 3585 }, { "epoch": 0.57, "grad_norm": 0.7938306927680969, "learning_rate": 4.5639477362473173e-05, "loss": 0.976, "step": 3590 }, { "epoch": 0.57, "grad_norm": 0.6710417866706848, "learning_rate": 4.5627680064864606e-05, "loss": 1.1969, "step": 3595 }, { "epoch": 0.57, "grad_norm": 0.9886580109596252, "learning_rate": 4.5615868359197796e-05, "loss": 0.9242, "step": 3600 }, { "epoch": 0.57, "eval_loss": 0.7412505149841309, "eval_runtime": 96.4, "eval_samples_per_second": 7.23, "eval_steps_per_second": 7.23, "step": 3600 }, { "epoch": 0.58, "grad_norm": 0.8157562613487244, "learning_rate": 4.5604042253723014e-05, "loss": 0.8398, "step": 3605 }, { "epoch": 0.58, "grad_norm": 1.2368131875991821, "learning_rate": 4.559220175670054e-05, "loss": 0.8742, "step": 3610 }, { "epoch": 0.58, "grad_norm": 0.6060155034065247, "learning_rate": 4.558034687640078e-05, "loss": 0.6993, "step": 3615 }, { "epoch": 0.58, "grad_norm": 1.1366558074951172, "learning_rate": 4.556847762110415e-05, "loss": 0.9328, "step": 3620 }, { "epoch": 0.58, "grad_norm": 0.7205525636672974, "learning_rate": 4.555659399910108e-05, "loss": 0.827, "step": 3625 }, { "epoch": 0.58, "grad_norm": 0.6944175958633423, "learning_rate": 4.554469601869209e-05, "loss": 0.7805, "step": 3630 }, { "epoch": 0.58, "grad_norm": 0.6939406394958496, "learning_rate": 4.55327836881877e-05, "loss": 0.7996, "step": 3635 }, { "epoch": 0.58, "grad_norm": 0.592650830745697, "learning_rate": 4.552085701590844e-05, "loss": 0.6599, "step": 3640 }, { "epoch": 0.58, "grad_norm": 0.5287877321243286, "learning_rate": 4.5508916010184884e-05, "loss": 0.6856, "step": 3645 }, { "epoch": 0.58, "grad_norm": 0.6414081454277039, "learning_rate": 4.549696067935762e-05, "loss": 0.7622, "step": 3650 }, { "epoch": 0.58, "grad_norm": 1.2272289991378784, "learning_rate": 4.548499103177719e-05, "loss": 1.0834, "step": 3655 }, { "epoch": 0.58, "grad_norm": 0.5912505388259888, "learning_rate": 4.547300707580422e-05, "loss": 0.8738, "step": 3660 }, { "epoch": 0.58, "grad_norm": 0.6686813235282898, "learning_rate": 4.5461008819809246e-05, "loss": 0.6221, "step": 3665 }, { "epoch": 0.59, "grad_norm": 0.891153872013092, "learning_rate": 4.544899627217286e-05, "loss": 0.9009, "step": 3670 }, { "epoch": 0.59, "grad_norm": 1.1651557683944702, "learning_rate": 4.543696944128559e-05, "loss": 0.8448, "step": 3675 }, { "epoch": 0.59, "grad_norm": 0.7525443434715271, "learning_rate": 4.5424928335547964e-05, "loss": 0.6654, "step": 3680 }, { "epoch": 0.59, "grad_norm": 0.6798614859580994, "learning_rate": 4.541287296337048e-05, "loss": 0.9244, "step": 3685 }, { "epoch": 0.59, "grad_norm": 0.498735249042511, "learning_rate": 4.540080333317358e-05, "loss": 0.6815, "step": 3690 }, { "epoch": 0.59, "grad_norm": 0.6097673773765564, "learning_rate": 4.5388719453387694e-05, "loss": 0.8536, "step": 3695 }, { "epoch": 0.59, "grad_norm": 0.6685522198677063, "learning_rate": 4.537662133245319e-05, "loss": 0.8092, "step": 3700 }, { "epoch": 0.59, "eval_loss": 0.7402560114860535, "eval_runtime": 96.4998, "eval_samples_per_second": 7.223, "eval_steps_per_second": 7.223, "step": 3700 }, { "epoch": 0.59, "grad_norm": 1.162788987159729, "learning_rate": 4.5364508978820375e-05, "loss": 0.6143, "step": 3705 }, { "epoch": 0.59, "grad_norm": 0.8281823992729187, "learning_rate": 4.5352382400949524e-05, "loss": 0.8143, "step": 3710 }, { "epoch": 0.59, "grad_norm": 0.6465135812759399, "learning_rate": 4.534024160731082e-05, "loss": 0.9152, "step": 3715 }, { "epoch": 0.59, "grad_norm": 0.5903899669647217, "learning_rate": 4.532808660638438e-05, "loss": 0.7229, "step": 3720 }, { "epoch": 0.59, "grad_norm": 0.6988681554794312, "learning_rate": 4.5315917406660265e-05, "loss": 0.6863, "step": 3725 }, { "epoch": 0.6, "grad_norm": 0.7910459637641907, "learning_rate": 4.530373401663843e-05, "loss": 0.8762, "step": 3730 }, { "epoch": 0.6, "grad_norm": 0.7580087184906006, "learning_rate": 4.529153644482875e-05, "loss": 0.9896, "step": 3735 }, { "epoch": 0.6, "grad_norm": 0.6871665716171265, "learning_rate": 4.5279324699751005e-05, "loss": 0.8831, "step": 3740 }, { "epoch": 0.6, "grad_norm": 1.0093677043914795, "learning_rate": 4.526709878993488e-05, "loss": 0.742, "step": 3745 }, { "epoch": 0.6, "grad_norm": 0.9898921847343445, "learning_rate": 4.525485872391996e-05, "loss": 0.766, "step": 3750 }, { "epoch": 0.6, "grad_norm": 0.8706837296485901, "learning_rate": 4.524260451025569e-05, "loss": 0.7545, "step": 3755 }, { "epoch": 0.6, "grad_norm": 1.1715607643127441, "learning_rate": 4.523033615750142e-05, "loss": 0.84, "step": 3760 }, { "epoch": 0.6, "grad_norm": 1.017062783241272, "learning_rate": 4.521805367422638e-05, "loss": 0.7477, "step": 3765 }, { "epoch": 0.6, "grad_norm": 0.6071624159812927, "learning_rate": 4.520575706900965e-05, "loss": 0.793, "step": 3770 }, { "epoch": 0.6, "grad_norm": 0.5821404457092285, "learning_rate": 4.519344635044018e-05, "loss": 0.7514, "step": 3775 }, { "epoch": 0.6, "grad_norm": 0.6849238872528076, "learning_rate": 4.51811215271168e-05, "loss": 0.862, "step": 3780 }, { "epoch": 0.6, "grad_norm": 0.8808868527412415, "learning_rate": 4.5168782607648166e-05, "loss": 0.7189, "step": 3785 }, { "epoch": 0.6, "grad_norm": 0.7080340385437012, "learning_rate": 4.5156429600652774e-05, "loss": 0.6987, "step": 3790 }, { "epoch": 0.61, "grad_norm": 0.705869734287262, "learning_rate": 4.5144062514759e-05, "loss": 0.6482, "step": 3795 }, { "epoch": 0.61, "grad_norm": 0.6345694065093994, "learning_rate": 4.5131681358605007e-05, "loss": 0.7279, "step": 3800 }, { "epoch": 0.61, "eval_loss": 0.7394095063209534, "eval_runtime": 96.4977, "eval_samples_per_second": 7.223, "eval_steps_per_second": 7.223, "step": 3800 }, { "epoch": 0.61, "grad_norm": 0.748913586139679, "learning_rate": 4.511928614083881e-05, "loss": 0.7474, "step": 3805 }, { "epoch": 0.61, "grad_norm": 0.6260043382644653, "learning_rate": 4.5106876870118255e-05, "loss": 0.7469, "step": 3810 }, { "epoch": 0.61, "grad_norm": 0.596367597579956, "learning_rate": 4.509445355511098e-05, "loss": 0.8437, "step": 3815 }, { "epoch": 0.61, "grad_norm": 1.3925014734268188, "learning_rate": 4.5082016204494445e-05, "loss": 1.0928, "step": 3820 }, { "epoch": 0.61, "grad_norm": 1.1370338201522827, "learning_rate": 4.506956482695592e-05, "loss": 0.8908, "step": 3825 }, { "epoch": 0.61, "grad_norm": 0.6746950149536133, "learning_rate": 4.505709943119246e-05, "loss": 0.7121, "step": 3830 }, { "epoch": 0.61, "grad_norm": 0.6608826518058777, "learning_rate": 4.504462002591091e-05, "loss": 0.9397, "step": 3835 }, { "epoch": 0.61, "grad_norm": 0.6542508006095886, "learning_rate": 4.5032126619827916e-05, "loss": 0.6942, "step": 3840 }, { "epoch": 0.61, "grad_norm": 0.5825070738792419, "learning_rate": 4.5019619221669895e-05, "loss": 0.7083, "step": 3845 }, { "epoch": 0.61, "grad_norm": 0.8596588373184204, "learning_rate": 4.500709784017303e-05, "loss": 0.839, "step": 3850 }, { "epoch": 0.62, "grad_norm": 0.641009509563446, "learning_rate": 4.499456248408328e-05, "loss": 0.72, "step": 3855 }, { "epoch": 0.62, "grad_norm": 1.213782548904419, "learning_rate": 4.498201316215635e-05, "loss": 0.7116, "step": 3860 }, { "epoch": 0.62, "grad_norm": 1.1411411762237549, "learning_rate": 4.496944988315775e-05, "loss": 1.0208, "step": 3865 }, { "epoch": 0.62, "grad_norm": 0.8265553712844849, "learning_rate": 4.495687265586266e-05, "loss": 0.7664, "step": 3870 }, { "epoch": 0.62, "grad_norm": 0.9309681057929993, "learning_rate": 4.4944281489056065e-05, "loss": 0.9126, "step": 3875 }, { "epoch": 0.62, "grad_norm": 0.49171608686447144, "learning_rate": 4.493167639153266e-05, "loss": 0.6271, "step": 3880 }, { "epoch": 0.62, "grad_norm": 0.743669867515564, "learning_rate": 4.491905737209688e-05, "loss": 0.7965, "step": 3885 }, { "epoch": 0.62, "grad_norm": 0.6191633939743042, "learning_rate": 4.490642443956287e-05, "loss": 0.5884, "step": 3890 }, { "epoch": 0.62, "grad_norm": 0.5481441020965576, "learning_rate": 4.489377760275452e-05, "loss": 0.6281, "step": 3895 }, { "epoch": 0.62, "grad_norm": 0.7155417203903198, "learning_rate": 4.488111687050539e-05, "loss": 0.7774, "step": 3900 }, { "epoch": 0.62, "eval_loss": 0.738506019115448, "eval_runtime": 96.7667, "eval_samples_per_second": 7.203, "eval_steps_per_second": 7.203, "step": 3900 }, { "epoch": 0.62, "grad_norm": 1.032523274421692, "learning_rate": 4.4868442251658795e-05, "loss": 0.7621, "step": 3905 }, { "epoch": 0.62, "grad_norm": 0.584082841873169, "learning_rate": 4.4855753755067703e-05, "loss": 0.6617, "step": 3910 }, { "epoch": 0.62, "grad_norm": 0.7214722037315369, "learning_rate": 4.4843051389594814e-05, "loss": 0.8669, "step": 3915 }, { "epoch": 0.63, "grad_norm": 0.6019904613494873, "learning_rate": 4.4830335164112504e-05, "loss": 0.736, "step": 3920 }, { "epoch": 0.63, "grad_norm": 0.8038384318351746, "learning_rate": 4.48176050875028e-05, "loss": 0.637, "step": 3925 }, { "epoch": 0.63, "grad_norm": 0.9631878733634949, "learning_rate": 4.4804861168657455e-05, "loss": 0.9722, "step": 3930 }, { "epoch": 0.63, "grad_norm": 0.5342935919761658, "learning_rate": 4.4792103416477836e-05, "loss": 0.8081, "step": 3935 }, { "epoch": 0.63, "grad_norm": 0.5893488526344299, "learning_rate": 4.477933183987503e-05, "loss": 0.61, "step": 3940 }, { "epoch": 0.63, "grad_norm": 1.388850212097168, "learning_rate": 4.476654644776973e-05, "loss": 0.8454, "step": 3945 }, { "epoch": 0.63, "grad_norm": 0.6928623914718628, "learning_rate": 4.4753747249092305e-05, "loss": 0.7209, "step": 3950 }, { "epoch": 0.63, "grad_norm": 1.2383430004119873, "learning_rate": 4.4740934252782757e-05, "loss": 0.8205, "step": 3955 }, { "epoch": 0.63, "grad_norm": 0.6005001664161682, "learning_rate": 4.472810746779074e-05, "loss": 0.6083, "step": 3960 }, { "epoch": 0.63, "grad_norm": 0.7928474545478821, "learning_rate": 4.471526690307552e-05, "loss": 0.9735, "step": 3965 }, { "epoch": 0.63, "grad_norm": 0.8710891008377075, "learning_rate": 4.4702412567606014e-05, "loss": 0.7573, "step": 3970 }, { "epoch": 0.63, "grad_norm": 0.6327987313270569, "learning_rate": 4.468954447036071e-05, "loss": 0.8563, "step": 3975 }, { "epoch": 0.64, "grad_norm": 0.7048762440681458, "learning_rate": 4.467666262032777e-05, "loss": 0.9176, "step": 3980 }, { "epoch": 0.64, "grad_norm": 0.6058861017227173, "learning_rate": 4.466376702650492e-05, "loss": 0.5525, "step": 3985 }, { "epoch": 0.64, "grad_norm": 0.637993574142456, "learning_rate": 4.465085769789949e-05, "loss": 0.7256, "step": 3990 }, { "epoch": 0.64, "grad_norm": 0.6992897987365723, "learning_rate": 4.463793464352842e-05, "loss": 0.8824, "step": 3995 }, { "epoch": 0.64, "grad_norm": 0.7812734246253967, "learning_rate": 4.462499787241822e-05, "loss": 0.8942, "step": 4000 }, { "epoch": 0.64, "eval_loss": 0.7363680601119995, "eval_runtime": 96.9231, "eval_samples_per_second": 7.191, "eval_steps_per_second": 7.191, "step": 4000 }, { "epoch": 0.64, "grad_norm": 0.907598078250885, "learning_rate": 4.4612047393605e-05, "loss": 0.867, "step": 4005 }, { "epoch": 0.64, "grad_norm": 0.9081722497940063, "learning_rate": 4.459908321613442e-05, "loss": 0.8757, "step": 4010 }, { "epoch": 0.64, "grad_norm": 0.5538048148155212, "learning_rate": 4.4586105349061726e-05, "loss": 0.6709, "step": 4015 }, { "epoch": 0.64, "grad_norm": 0.6632833480834961, "learning_rate": 4.457311380145173e-05, "loss": 0.8362, "step": 4020 }, { "epoch": 0.64, "grad_norm": 0.8646539449691772, "learning_rate": 4.4560108582378766e-05, "loss": 0.8527, "step": 4025 }, { "epoch": 0.64, "grad_norm": 0.6309005618095398, "learning_rate": 4.454708970092678e-05, "loss": 0.595, "step": 4030 }, { "epoch": 0.64, "grad_norm": 0.5711541175842285, "learning_rate": 4.45340571661892e-05, "loss": 0.8069, "step": 4035 }, { "epoch": 0.64, "grad_norm": 1.1379880905151367, "learning_rate": 4.4521010987269006e-05, "loss": 0.8464, "step": 4040 }, { "epoch": 0.65, "grad_norm": 0.6005469560623169, "learning_rate": 4.450795117327874e-05, "loss": 0.5801, "step": 4045 }, { "epoch": 0.65, "grad_norm": 0.7842866778373718, "learning_rate": 4.449487773334042e-05, "loss": 0.6238, "step": 4050 }, { "epoch": 0.65, "grad_norm": 0.7519890069961548, "learning_rate": 4.448179067658563e-05, "loss": 1.1255, "step": 4055 }, { "epoch": 0.65, "grad_norm": 0.5955212712287903, "learning_rate": 4.446869001215542e-05, "loss": 0.7738, "step": 4060 }, { "epoch": 0.65, "grad_norm": 0.5085921287536621, "learning_rate": 4.4455575749200364e-05, "loss": 0.6239, "step": 4065 }, { "epoch": 0.65, "grad_norm": 0.779778003692627, "learning_rate": 4.444244789688056e-05, "loss": 0.9719, "step": 4070 }, { "epoch": 0.65, "grad_norm": 0.7279208898544312, "learning_rate": 4.442930646436554e-05, "loss": 0.9854, "step": 4075 }, { "epoch": 0.65, "grad_norm": 0.9218065738677979, "learning_rate": 4.4416151460834376e-05, "loss": 0.8096, "step": 4080 }, { "epoch": 0.65, "grad_norm": 0.7595914006233215, "learning_rate": 4.44029828954756e-05, "loss": 0.7955, "step": 4085 }, { "epoch": 0.65, "grad_norm": 0.785493016242981, "learning_rate": 4.43898007774872e-05, "loss": 0.8598, "step": 4090 }, { "epoch": 0.65, "grad_norm": 0.5540453195571899, "learning_rate": 4.437660511607666e-05, "loss": 0.8485, "step": 4095 }, { "epoch": 0.65, "grad_norm": 0.7215760350227356, "learning_rate": 4.43633959204609e-05, "loss": 0.9286, "step": 4100 }, { "epoch": 0.65, "eval_loss": 0.7347923517227173, "eval_runtime": 96.8658, "eval_samples_per_second": 7.196, "eval_steps_per_second": 7.196, "step": 4100 }, { "epoch": 0.66, "grad_norm": 0.7934743762016296, "learning_rate": 4.435017319986631e-05, "loss": 0.7829, "step": 4105 }, { "epoch": 0.66, "grad_norm": 1.503614068031311, "learning_rate": 4.43369369635287e-05, "loss": 0.7203, "step": 4110 }, { "epoch": 0.66, "grad_norm": 0.6292420625686646, "learning_rate": 4.4323687220693365e-05, "loss": 0.7556, "step": 4115 }, { "epoch": 0.66, "grad_norm": 0.6981114149093628, "learning_rate": 4.431042398061499e-05, "loss": 0.6953, "step": 4120 }, { "epoch": 0.66, "grad_norm": 0.8554514050483704, "learning_rate": 4.4297147252557715e-05, "loss": 0.7731, "step": 4125 }, { "epoch": 0.66, "grad_norm": 1.1464003324508667, "learning_rate": 4.428385704579509e-05, "loss": 0.7761, "step": 4130 }, { "epoch": 0.66, "grad_norm": 0.6772524118423462, "learning_rate": 4.427055336961008e-05, "loss": 0.7529, "step": 4135 }, { "epoch": 0.66, "grad_norm": 0.5949820280075073, "learning_rate": 4.425723623329507e-05, "loss": 0.9164, "step": 4140 }, { "epoch": 0.66, "grad_norm": 0.848900318145752, "learning_rate": 4.4243905646151825e-05, "loss": 0.8385, "step": 4145 }, { "epoch": 0.66, "grad_norm": 0.7119936943054199, "learning_rate": 4.4230561617491514e-05, "loss": 0.6342, "step": 4150 }, { "epoch": 0.66, "grad_norm": 0.4240078628063202, "learning_rate": 4.421720415663472e-05, "loss": 0.9921, "step": 4155 }, { "epoch": 0.66, "grad_norm": 1.166399359703064, "learning_rate": 4.4203833272911355e-05, "loss": 0.6751, "step": 4160 }, { "epoch": 0.66, "grad_norm": 0.7882303595542908, "learning_rate": 4.4190448975660756e-05, "loss": 0.8711, "step": 4165 }, { "epoch": 0.67, "grad_norm": 0.7739405632019043, "learning_rate": 4.417705127423162e-05, "loss": 0.7635, "step": 4170 }, { "epoch": 0.67, "grad_norm": 0.6729245781898499, "learning_rate": 4.416364017798197e-05, "loss": 1.0083, "step": 4175 }, { "epoch": 0.67, "grad_norm": 0.7291648983955383, "learning_rate": 4.4150215696279233e-05, "loss": 0.9355, "step": 4180 }, { "epoch": 0.67, "grad_norm": 0.569436252117157, "learning_rate": 4.413677783850015e-05, "loss": 0.5718, "step": 4185 }, { "epoch": 0.67, "grad_norm": 0.7857233285903931, "learning_rate": 4.412332661403085e-05, "loss": 0.6356, "step": 4190 }, { "epoch": 0.67, "grad_norm": 1.124894618988037, "learning_rate": 4.410986203226672e-05, "loss": 0.9911, "step": 4195 }, { "epoch": 0.67, "grad_norm": 0.7088748216629028, "learning_rate": 4.409638410261256e-05, "loss": 0.7703, "step": 4200 }, { "epoch": 0.67, "eval_loss": 0.7353793978691101, "eval_runtime": 96.9146, "eval_samples_per_second": 7.192, "eval_steps_per_second": 7.192, "step": 4200 }, { "epoch": 0.67, "grad_norm": 0.8883334398269653, "learning_rate": 4.4082892834482456e-05, "loss": 0.7829, "step": 4205 }, { "epoch": 0.67, "grad_norm": 0.5809643864631653, "learning_rate": 4.406938823729979e-05, "loss": 0.79, "step": 4210 }, { "epoch": 0.67, "grad_norm": 2.2371888160705566, "learning_rate": 4.405587032049731e-05, "loss": 0.9394, "step": 4215 }, { "epoch": 0.67, "grad_norm": 0.6468964219093323, "learning_rate": 4.4042339093517e-05, "loss": 0.7621, "step": 4220 }, { "epoch": 0.67, "grad_norm": 0.8613569736480713, "learning_rate": 4.4028794565810194e-05, "loss": 0.9303, "step": 4225 }, { "epoch": 0.68, "grad_norm": 0.8210548162460327, "learning_rate": 4.4015236746837505e-05, "loss": 1.04, "step": 4230 }, { "epoch": 0.68, "grad_norm": 0.8066801428794861, "learning_rate": 4.4001665646068804e-05, "loss": 0.9942, "step": 4235 }, { "epoch": 0.68, "grad_norm": 0.6841477751731873, "learning_rate": 4.3988081272983263e-05, "loss": 0.6893, "step": 4240 }, { "epoch": 0.68, "grad_norm": 0.7812705636024475, "learning_rate": 4.3974483637069333e-05, "loss": 0.9125, "step": 4245 }, { "epoch": 0.68, "grad_norm": 0.7913382649421692, "learning_rate": 4.3960872747824686e-05, "loss": 0.9298, "step": 4250 }, { "epoch": 0.68, "grad_norm": 0.6053805947303772, "learning_rate": 4.394724861475631e-05, "loss": 0.7055, "step": 4255 }, { "epoch": 0.68, "grad_norm": 0.6879487633705139, "learning_rate": 4.393361124738039e-05, "loss": 0.605, "step": 4260 }, { "epoch": 0.68, "grad_norm": 0.7929925918579102, "learning_rate": 4.3919960655222394e-05, "loss": 0.8569, "step": 4265 }, { "epoch": 0.68, "grad_norm": 0.5888631939888, "learning_rate": 4.390629684781701e-05, "loss": 0.6246, "step": 4270 }, { "epoch": 0.68, "grad_norm": 0.9546008706092834, "learning_rate": 4.389261983470815e-05, "loss": 0.7964, "step": 4275 }, { "epoch": 0.68, "grad_norm": 0.620267391204834, "learning_rate": 4.387892962544896e-05, "loss": 0.7127, "step": 4280 }, { "epoch": 0.68, "grad_norm": 0.7655039429664612, "learning_rate": 4.3865226229601805e-05, "loss": 0.6936, "step": 4285 }, { "epoch": 0.68, "grad_norm": 0.5404471158981323, "learning_rate": 4.3851509656738264e-05, "loss": 0.6141, "step": 4290 }, { "epoch": 0.69, "grad_norm": 0.9140282273292542, "learning_rate": 4.38377799164391e-05, "loss": 1.152, "step": 4295 }, { "epoch": 0.69, "grad_norm": 0.4845621585845947, "learning_rate": 4.382403701829429e-05, "loss": 0.8322, "step": 4300 }, { "epoch": 0.69, "eval_loss": 0.733027458190918, "eval_runtime": 96.886, "eval_samples_per_second": 7.194, "eval_steps_per_second": 7.194, "step": 4300 }, { "epoch": 0.69, "grad_norm": 0.598147988319397, "learning_rate": 4.381028097190299e-05, "loss": 0.772, "step": 4305 }, { "epoch": 0.69, "grad_norm": 0.5572992563247681, "learning_rate": 4.3796511786873574e-05, "loss": 0.7232, "step": 4310 }, { "epoch": 0.69, "grad_norm": 0.7913936376571655, "learning_rate": 4.378272947282354e-05, "loss": 0.6972, "step": 4315 }, { "epoch": 0.69, "grad_norm": 0.4532865583896637, "learning_rate": 4.376893403937959e-05, "loss": 0.7454, "step": 4320 }, { "epoch": 0.69, "grad_norm": 0.8871356844902039, "learning_rate": 4.375512549617759e-05, "loss": 0.6946, "step": 4325 }, { "epoch": 0.69, "grad_norm": 0.7564520835876465, "learning_rate": 4.374130385286255e-05, "loss": 0.9257, "step": 4330 }, { "epoch": 0.69, "grad_norm": 0.7280387282371521, "learning_rate": 4.3727469119088624e-05, "loss": 0.756, "step": 4335 }, { "epoch": 0.69, "grad_norm": 0.6494055986404419, "learning_rate": 4.3713621304519144e-05, "loss": 0.6358, "step": 4340 }, { "epoch": 0.69, "grad_norm": 0.6048948764801025, "learning_rate": 4.369976041882654e-05, "loss": 0.6705, "step": 4345 }, { "epoch": 0.69, "grad_norm": 0.6458585858345032, "learning_rate": 4.36858864716924e-05, "loss": 0.7999, "step": 4350 }, { "epoch": 0.7, "grad_norm": 0.837872326374054, "learning_rate": 4.36719994728074e-05, "loss": 0.7671, "step": 4355 }, { "epoch": 0.7, "grad_norm": 0.6451572775840759, "learning_rate": 4.365809943187138e-05, "loss": 0.8672, "step": 4360 }, { "epoch": 0.7, "grad_norm": 0.6438645124435425, "learning_rate": 4.364418635859326e-05, "loss": 0.78, "step": 4365 }, { "epoch": 0.7, "grad_norm": 0.7427099347114563, "learning_rate": 4.363026026269106e-05, "loss": 0.8977, "step": 4370 }, { "epoch": 0.7, "grad_norm": 0.7844499945640564, "learning_rate": 4.36163211538919e-05, "loss": 0.7586, "step": 4375 }, { "epoch": 0.7, "grad_norm": 0.8544999361038208, "learning_rate": 4.360236904193201e-05, "loss": 0.7085, "step": 4380 }, { "epoch": 0.7, "grad_norm": 2.431629180908203, "learning_rate": 4.358840393655668e-05, "loss": 0.8572, "step": 4385 }, { "epoch": 0.7, "grad_norm": 0.6864097118377686, "learning_rate": 4.357442584752027e-05, "loss": 0.6848, "step": 4390 }, { "epoch": 0.7, "grad_norm": 0.7158388495445251, "learning_rate": 4.356043478458623e-05, "loss": 1.0071, "step": 4395 }, { "epoch": 0.7, "grad_norm": 0.7883514165878296, "learning_rate": 4.3546430757527066e-05, "loss": 0.9851, "step": 4400 }, { "epoch": 0.7, "eval_loss": 0.732368528842926, "eval_runtime": 96.9109, "eval_samples_per_second": 7.192, "eval_steps_per_second": 7.192, "step": 4400 }, { "epoch": 0.7, "grad_norm": 0.673925518989563, "learning_rate": 4.353241377612433e-05, "loss": 0.7076, "step": 4405 }, { "epoch": 0.7, "grad_norm": 0.9540270566940308, "learning_rate": 4.351838385016862e-05, "loss": 0.8989, "step": 4410 }, { "epoch": 0.7, "grad_norm": 0.8137551546096802, "learning_rate": 4.35043409894596e-05, "loss": 0.7633, "step": 4415 }, { "epoch": 0.71, "grad_norm": 0.779330313205719, "learning_rate": 4.349028520380594e-05, "loss": 0.7013, "step": 4420 }, { "epoch": 0.71, "grad_norm": 0.7883580327033997, "learning_rate": 4.347621650302535e-05, "loss": 0.9788, "step": 4425 }, { "epoch": 0.71, "grad_norm": 0.7106336951255798, "learning_rate": 4.3462134896944565e-05, "loss": 0.8399, "step": 4430 }, { "epoch": 0.71, "grad_norm": 0.6303668022155762, "learning_rate": 4.344804039539933e-05, "loss": 0.5943, "step": 4435 }, { "epoch": 0.71, "grad_norm": 1.2975471019744873, "learning_rate": 4.3433933008234395e-05, "loss": 0.8917, "step": 4440 }, { "epoch": 0.71, "grad_norm": 0.68232661485672, "learning_rate": 4.341981274530351e-05, "loss": 0.7756, "step": 4445 }, { "epoch": 0.71, "grad_norm": 0.6689594984054565, "learning_rate": 4.340567961646943e-05, "loss": 0.772, "step": 4450 }, { "epoch": 0.71, "grad_norm": 1.102365493774414, "learning_rate": 4.339153363160388e-05, "loss": 0.738, "step": 4455 }, { "epoch": 0.71, "grad_norm": 0.6535090804100037, "learning_rate": 4.337737480058758e-05, "loss": 0.9096, "step": 4460 }, { "epoch": 0.71, "grad_norm": 0.676058292388916, "learning_rate": 4.3363203133310206e-05, "loss": 0.9634, "step": 4465 }, { "epoch": 0.71, "grad_norm": 0.9258711934089661, "learning_rate": 4.3349018639670415e-05, "loss": 0.8025, "step": 4470 }, { "epoch": 0.71, "grad_norm": 0.5786353349685669, "learning_rate": 4.333482132957581e-05, "loss": 0.7638, "step": 4475 }, { "epoch": 0.72, "grad_norm": 0.7258582711219788, "learning_rate": 4.332061121294296e-05, "loss": 1.3538, "step": 4480 }, { "epoch": 0.72, "grad_norm": 0.9384926557540894, "learning_rate": 4.330638829969738e-05, "loss": 0.8485, "step": 4485 }, { "epoch": 0.72, "grad_norm": 0.5252525806427002, "learning_rate": 4.3292152599773494e-05, "loss": 0.8547, "step": 4490 }, { "epoch": 0.72, "grad_norm": 0.7551200985908508, "learning_rate": 4.32779041231147e-05, "loss": 0.7435, "step": 4495 }, { "epoch": 0.72, "grad_norm": 0.7492663264274597, "learning_rate": 4.3263642879673286e-05, "loss": 0.8712, "step": 4500 }, { "epoch": 0.72, "eval_loss": 0.7316818237304688, "eval_runtime": 96.9418, "eval_samples_per_second": 7.19, "eval_steps_per_second": 7.19, "step": 4500 }, { "epoch": 0.72, "grad_norm": 0.7490917444229126, "learning_rate": 4.3249368879410475e-05, "loss": 0.7598, "step": 4505 }, { "epoch": 0.72, "grad_norm": 0.7305790781974792, "learning_rate": 4.323508213229639e-05, "loss": 0.8315, "step": 4510 }, { "epoch": 0.72, "grad_norm": 0.7009093165397644, "learning_rate": 4.3220782648310075e-05, "loss": 0.7482, "step": 4515 }, { "epoch": 0.72, "grad_norm": 0.7155885100364685, "learning_rate": 4.320647043743945e-05, "loss": 0.8385, "step": 4520 }, { "epoch": 0.72, "grad_norm": 0.6159176826477051, "learning_rate": 4.319214550968133e-05, "loss": 0.6507, "step": 4525 }, { "epoch": 0.72, "grad_norm": 0.7776069045066833, "learning_rate": 4.3177807875041424e-05, "loss": 0.855, "step": 4530 }, { "epoch": 0.72, "grad_norm": 0.6204195618629456, "learning_rate": 4.316345754353432e-05, "loss": 0.7169, "step": 4535 }, { "epoch": 0.72, "grad_norm": 0.7233458757400513, "learning_rate": 4.3149094525183426e-05, "loss": 0.5399, "step": 4540 }, { "epoch": 0.73, "grad_norm": 0.7921779155731201, "learning_rate": 4.313471883002108e-05, "loss": 0.9124, "step": 4545 }, { "epoch": 0.73, "grad_norm": 0.9145547747612, "learning_rate": 4.3120330468088435e-05, "loss": 1.2346, "step": 4550 }, { "epoch": 0.73, "grad_norm": 0.8733106255531311, "learning_rate": 4.310592944943549e-05, "loss": 0.6737, "step": 4555 }, { "epoch": 0.73, "grad_norm": 0.6620619297027588, "learning_rate": 4.3091515784121107e-05, "loss": 0.8041, "step": 4560 }, { "epoch": 0.73, "grad_norm": 0.7026892900466919, "learning_rate": 4.307708948221296e-05, "loss": 0.9422, "step": 4565 }, { "epoch": 0.73, "grad_norm": 0.7953292727470398, "learning_rate": 4.3062650553787566e-05, "loss": 0.7398, "step": 4570 }, { "epoch": 0.73, "grad_norm": 1.6465870141983032, "learning_rate": 4.304819900893024e-05, "loss": 0.8175, "step": 4575 }, { "epoch": 0.73, "grad_norm": 1.3427163362503052, "learning_rate": 4.303373485773513e-05, "loss": 0.7331, "step": 4580 }, { "epoch": 0.73, "grad_norm": 0.6665405035018921, "learning_rate": 4.3019258110305186e-05, "loss": 0.7529, "step": 4585 }, { "epoch": 0.73, "grad_norm": 0.796320915222168, "learning_rate": 4.300476877675215e-05, "loss": 0.915, "step": 4590 }, { "epoch": 0.73, "grad_norm": 0.551832377910614, "learning_rate": 4.299026686719655e-05, "loss": 0.7693, "step": 4595 }, { "epoch": 0.73, "grad_norm": 0.75690096616745, "learning_rate": 4.297575239176771e-05, "loss": 0.7871, "step": 4600 }, { "epoch": 0.73, "eval_loss": 0.730965256690979, "eval_runtime": 96.8803, "eval_samples_per_second": 7.194, "eval_steps_per_second": 7.194, "step": 4600 }, { "epoch": 0.74, "grad_norm": 0.7093445062637329, "learning_rate": 4.296122536060373e-05, "loss": 0.6279, "step": 4605 }, { "epoch": 0.74, "grad_norm": 0.6522731781005859, "learning_rate": 4.294668578385147e-05, "loss": 0.5442, "step": 4610 }, { "epoch": 0.74, "grad_norm": 0.7964634299278259, "learning_rate": 4.2932133671666565e-05, "loss": 1.0221, "step": 4615 }, { "epoch": 0.74, "grad_norm": 0.7733820676803589, "learning_rate": 4.2917569034213395e-05, "loss": 0.7152, "step": 4620 }, { "epoch": 0.74, "grad_norm": 0.8039364218711853, "learning_rate": 4.2902991881665097e-05, "loss": 1.0939, "step": 4625 }, { "epoch": 0.74, "grad_norm": 0.756020724773407, "learning_rate": 4.2888402224203536e-05, "loss": 0.7539, "step": 4630 }, { "epoch": 0.74, "grad_norm": 0.5059025287628174, "learning_rate": 4.2873800072019345e-05, "loss": 0.8716, "step": 4635 }, { "epoch": 0.74, "grad_norm": 0.8273636102676392, "learning_rate": 4.285918543531183e-05, "loss": 0.687, "step": 4640 }, { "epoch": 0.74, "grad_norm": 0.6505921483039856, "learning_rate": 4.2844558324289076e-05, "loss": 1.0697, "step": 4645 }, { "epoch": 0.74, "grad_norm": 0.6481053829193115, "learning_rate": 4.282991874916784e-05, "loss": 0.884, "step": 4650 }, { "epoch": 0.74, "grad_norm": 0.8193663954734802, "learning_rate": 4.28152667201736e-05, "loss": 0.962, "step": 4655 }, { "epoch": 0.74, "grad_norm": 0.7153398990631104, "learning_rate": 4.280060224754053e-05, "loss": 0.7705, "step": 4660 }, { "epoch": 0.74, "grad_norm": 0.7197556495666504, "learning_rate": 4.278592534151149e-05, "loss": 0.8521, "step": 4665 }, { "epoch": 0.75, "grad_norm": 1.009098768234253, "learning_rate": 4.2771236012338044e-05, "loss": 0.8425, "step": 4670 }, { "epoch": 0.75, "grad_norm": 0.598564088344574, "learning_rate": 4.275653427028041e-05, "loss": 0.7072, "step": 4675 }, { "epoch": 0.75, "grad_norm": 0.8035867810249329, "learning_rate": 4.2741820125607504e-05, "loss": 0.6689, "step": 4680 }, { "epoch": 0.75, "grad_norm": 0.6109891533851624, "learning_rate": 4.2727093588596866e-05, "loss": 0.7776, "step": 4685 }, { "epoch": 0.75, "grad_norm": 0.7504151463508606, "learning_rate": 4.271235466953473e-05, "loss": 0.7481, "step": 4690 }, { "epoch": 0.75, "grad_norm": 0.9492344260215759, "learning_rate": 4.269760337871594e-05, "loss": 0.7261, "step": 4695 }, { "epoch": 0.75, "grad_norm": 0.5792133212089539, "learning_rate": 4.2682839726444035e-05, "loss": 0.7156, "step": 4700 }, { "epoch": 0.75, "eval_loss": 0.7283556461334229, "eval_runtime": 96.8998, "eval_samples_per_second": 7.193, "eval_steps_per_second": 7.193, "step": 4700 }, { "epoch": 0.75, "grad_norm": 0.8895491361618042, "learning_rate": 4.266806372303113e-05, "loss": 0.8466, "step": 4705 }, { "epoch": 0.75, "grad_norm": 0.7995960712432861, "learning_rate": 4.2653275378798005e-05, "loss": 0.7823, "step": 4710 }, { "epoch": 0.75, "grad_norm": 0.6673771739006042, "learning_rate": 4.263847470407405e-05, "loss": 0.5461, "step": 4715 }, { "epoch": 0.75, "grad_norm": 0.6228974461555481, "learning_rate": 4.262366170919726e-05, "loss": 0.7611, "step": 4720 }, { "epoch": 0.75, "grad_norm": 0.8050612807273865, "learning_rate": 4.2608836404514255e-05, "loss": 0.6524, "step": 4725 }, { "epoch": 0.75, "grad_norm": 0.8815121650695801, "learning_rate": 4.2593998800380216e-05, "loss": 0.9997, "step": 4730 }, { "epoch": 0.76, "grad_norm": 1.0408731698989868, "learning_rate": 4.257914890715897e-05, "loss": 0.7031, "step": 4735 }, { "epoch": 0.76, "grad_norm": 0.6744192838668823, "learning_rate": 4.256428673522287e-05, "loss": 0.6587, "step": 4740 }, { "epoch": 0.76, "grad_norm": 1.014369249343872, "learning_rate": 4.254941229495289e-05, "loss": 0.7726, "step": 4745 }, { "epoch": 0.76, "grad_norm": 0.7497864365577698, "learning_rate": 4.2534525596738526e-05, "loss": 0.7327, "step": 4750 }, { "epoch": 0.76, "grad_norm": 0.6479122042655945, "learning_rate": 4.2519626650977905e-05, "loss": 0.7071, "step": 4755 }, { "epoch": 0.76, "grad_norm": 0.6300268173217773, "learning_rate": 4.250471546807765e-05, "loss": 0.9479, "step": 4760 }, { "epoch": 0.76, "grad_norm": 0.8272077441215515, "learning_rate": 4.248979205845294e-05, "loss": 0.9013, "step": 4765 }, { "epoch": 0.76, "grad_norm": 0.7070410847663879, "learning_rate": 4.2474856432527524e-05, "loss": 0.713, "step": 4770 }, { "epoch": 0.76, "grad_norm": 0.7199767231941223, "learning_rate": 4.2459908600733654e-05, "loss": 0.9308, "step": 4775 }, { "epoch": 0.76, "grad_norm": 0.6886048316955566, "learning_rate": 4.244494857351212e-05, "loss": 0.8008, "step": 4780 }, { "epoch": 0.76, "grad_norm": 0.6097077131271362, "learning_rate": 4.242997636131222e-05, "loss": 0.9639, "step": 4785 }, { "epoch": 0.76, "grad_norm": 1.0947343111038208, "learning_rate": 4.241499197459178e-05, "loss": 0.9012, "step": 4790 }, { "epoch": 0.77, "grad_norm": 0.6965738534927368, "learning_rate": 4.239999542381712e-05, "loss": 0.6745, "step": 4795 }, { "epoch": 0.77, "grad_norm": 0.8290371894836426, "learning_rate": 4.238498671946306e-05, "loss": 0.7856, "step": 4800 }, { "epoch": 0.77, "eval_loss": 0.7277354598045349, "eval_runtime": 96.9165, "eval_samples_per_second": 7.192, "eval_steps_per_second": 7.192, "step": 4800 }, { "epoch": 0.77, "grad_norm": 0.8061904907226562, "learning_rate": 4.2369965872012904e-05, "loss": 0.7034, "step": 4805 }, { "epoch": 0.77, "grad_norm": 0.6652625799179077, "learning_rate": 4.2354932891958434e-05, "loss": 0.5825, "step": 4810 }, { "epoch": 0.77, "grad_norm": 0.6529026627540588, "learning_rate": 4.2339887789799916e-05, "loss": 0.7407, "step": 4815 }, { "epoch": 0.77, "grad_norm": 3.7802493572235107, "learning_rate": 4.232483057604607e-05, "loss": 0.8906, "step": 4820 }, { "epoch": 0.77, "grad_norm": 0.7709060907363892, "learning_rate": 4.230976126121411e-05, "loss": 0.863, "step": 4825 }, { "epoch": 0.77, "grad_norm": 1.2582249641418457, "learning_rate": 4.229467985582966e-05, "loss": 1.065, "step": 4830 }, { "epoch": 0.77, "grad_norm": 0.5523508191108704, "learning_rate": 4.22795863704268e-05, "loss": 0.5925, "step": 4835 }, { "epoch": 0.77, "grad_norm": 1.3535953760147095, "learning_rate": 4.2264480815548076e-05, "loss": 0.7993, "step": 4840 }, { "epoch": 0.77, "grad_norm": 1.067133903503418, "learning_rate": 4.2249363201744425e-05, "loss": 0.7921, "step": 4845 }, { "epoch": 0.77, "grad_norm": 0.6478603482246399, "learning_rate": 4.223423353957523e-05, "loss": 0.6769, "step": 4850 }, { "epoch": 0.77, "grad_norm": 0.6439855694770813, "learning_rate": 4.2219091839608276e-05, "loss": 0.9018, "step": 4855 }, { "epoch": 0.78, "grad_norm": 0.5302556753158569, "learning_rate": 4.2203938112419786e-05, "loss": 0.837, "step": 4860 }, { "epoch": 0.78, "grad_norm": 0.8129810690879822, "learning_rate": 4.218877236859433e-05, "loss": 0.9195, "step": 4865 }, { "epoch": 0.78, "grad_norm": 0.6531801819801331, "learning_rate": 4.217359461872493e-05, "loss": 0.6829, "step": 4870 }, { "epoch": 0.78, "grad_norm": 0.7695423364639282, "learning_rate": 4.215840487341296e-05, "loss": 0.7739, "step": 4875 }, { "epoch": 0.78, "grad_norm": 1.3163946866989136, "learning_rate": 4.2143203143268184e-05, "loss": 0.9678, "step": 4880 }, { "epoch": 0.78, "grad_norm": 1.1124577522277832, "learning_rate": 4.212798943890871e-05, "loss": 0.9327, "step": 4885 }, { "epoch": 0.78, "grad_norm": 0.8979106545448303, "learning_rate": 4.2112763770961074e-05, "loss": 0.7043, "step": 4890 }, { "epoch": 0.78, "grad_norm": 0.9879763126373291, "learning_rate": 4.2097526150060085e-05, "loss": 0.8129, "step": 4895 }, { "epoch": 0.78, "grad_norm": 0.7016007304191589, "learning_rate": 4.208227658684898e-05, "loss": 0.7906, "step": 4900 }, { "epoch": 0.78, "eval_loss": 0.7254941463470459, "eval_runtime": 96.9328, "eval_samples_per_second": 7.191, "eval_steps_per_second": 7.191, "step": 4900 }, { "epoch": 0.78, "grad_norm": 0.5404706597328186, "learning_rate": 4.206701509197927e-05, "loss": 0.7769, "step": 4905 }, { "epoch": 0.78, "grad_norm": 0.7096789479255676, "learning_rate": 4.205174167611085e-05, "loss": 0.5985, "step": 4910 }, { "epoch": 0.78, "grad_norm": 0.8139373660087585, "learning_rate": 4.20364563499119e-05, "loss": 0.75, "step": 4915 }, { "epoch": 0.79, "grad_norm": 1.2196255922317505, "learning_rate": 4.202115912405897e-05, "loss": 0.8441, "step": 4920 }, { "epoch": 0.79, "grad_norm": 0.6912347674369812, "learning_rate": 4.200585000923689e-05, "loss": 0.8885, "step": 4925 }, { "epoch": 0.79, "grad_norm": 0.7879334688186646, "learning_rate": 4.199052901613878e-05, "loss": 0.6353, "step": 4930 }, { "epoch": 0.79, "grad_norm": 1.0302627086639404, "learning_rate": 4.197519615546608e-05, "loss": 0.7704, "step": 4935 }, { "epoch": 0.79, "grad_norm": 0.5351320505142212, "learning_rate": 4.195985143792851e-05, "loss": 0.8094, "step": 4940 }, { "epoch": 0.79, "grad_norm": 0.7632457613945007, "learning_rate": 4.194449487424409e-05, "loss": 0.9625, "step": 4945 }, { "epoch": 0.79, "grad_norm": 0.7706131935119629, "learning_rate": 4.1929126475139096e-05, "loss": 0.7007, "step": 4950 }, { "epoch": 0.79, "grad_norm": 0.6262048482894897, "learning_rate": 4.191374625134806e-05, "loss": 0.7768, "step": 4955 }, { "epoch": 0.79, "grad_norm": 0.8302519917488098, "learning_rate": 4.189835421361381e-05, "loss": 0.8281, "step": 4960 }, { "epoch": 0.79, "grad_norm": 0.5914260149002075, "learning_rate": 4.188295037268738e-05, "loss": 0.8554, "step": 4965 }, { "epoch": 0.79, "grad_norm": 0.7599936127662659, "learning_rate": 4.1867534739328085e-05, "loss": 0.9547, "step": 4970 }, { "epoch": 0.79, "grad_norm": 0.4832470417022705, "learning_rate": 4.1852107324303455e-05, "loss": 0.5212, "step": 4975 }, { "epoch": 0.79, "grad_norm": 0.8040557503700256, "learning_rate": 4.183666813838927e-05, "loss": 0.8939, "step": 4980 }, { "epoch": 0.8, "grad_norm": 0.7503822445869446, "learning_rate": 4.182121719236952e-05, "loss": 0.9279, "step": 4985 }, { "epoch": 0.8, "grad_norm": 0.7608035206794739, "learning_rate": 4.180575449703639e-05, "loss": 0.7965, "step": 4990 }, { "epoch": 0.8, "grad_norm": 0.5479308366775513, "learning_rate": 4.1790280063190315e-05, "loss": 0.7478, "step": 4995 }, { "epoch": 0.8, "grad_norm": 0.7714606523513794, "learning_rate": 4.177479390163989e-05, "loss": 0.7917, "step": 5000 }, { "epoch": 0.8, "eval_loss": 0.7249829173088074, "eval_runtime": 96.9791, "eval_samples_per_second": 7.187, "eval_steps_per_second": 7.187, "step": 5000 }, { "epoch": 0.8, "grad_norm": 0.7731960415840149, "learning_rate": 4.175929602320192e-05, "loss": 0.5968, "step": 5005 }, { "epoch": 0.8, "grad_norm": 0.525674045085907, "learning_rate": 4.174378643870138e-05, "loss": 0.6748, "step": 5010 }, { "epoch": 0.8, "grad_norm": 0.8199644088745117, "learning_rate": 4.172826515897146e-05, "loss": 0.7478, "step": 5015 }, { "epoch": 0.8, "grad_norm": 0.5651956796646118, "learning_rate": 4.1712732194853464e-05, "loss": 0.5784, "step": 5020 }, { "epoch": 0.8, "grad_norm": 0.8027454614639282, "learning_rate": 4.1697187557196896e-05, "loss": 0.9719, "step": 5025 }, { "epoch": 0.8, "grad_norm": 0.8870482444763184, "learning_rate": 4.168163125685939e-05, "loss": 0.7407, "step": 5030 }, { "epoch": 0.8, "grad_norm": 0.6260176301002502, "learning_rate": 4.1666063304706756e-05, "loss": 0.8039, "step": 5035 }, { "epoch": 0.8, "grad_norm": 0.5424874424934387, "learning_rate": 4.165048371161291e-05, "loss": 0.8169, "step": 5040 }, { "epoch": 0.81, "grad_norm": 0.8967744708061218, "learning_rate": 4.163489248845992e-05, "loss": 0.7546, "step": 5045 }, { "epoch": 0.81, "grad_norm": 0.871800422668457, "learning_rate": 4.161928964613797e-05, "loss": 0.7789, "step": 5050 }, { "epoch": 0.81, "grad_norm": 0.9717918634414673, "learning_rate": 4.1603675195545356e-05, "loss": 0.7955, "step": 5055 }, { "epoch": 0.81, "grad_norm": 0.6605653762817383, "learning_rate": 4.15880491475885e-05, "loss": 0.8416, "step": 5060 }, { "epoch": 0.81, "grad_norm": 0.9123837351799011, "learning_rate": 4.1572411513181896e-05, "loss": 0.6594, "step": 5065 }, { "epoch": 0.81, "grad_norm": 0.8939911723136902, "learning_rate": 4.155676230324816e-05, "loss": 0.858, "step": 5070 }, { "epoch": 0.81, "grad_norm": 0.6447166204452515, "learning_rate": 4.154110152871797e-05, "loss": 0.7425, "step": 5075 }, { "epoch": 0.81, "grad_norm": 0.9701817631721497, "learning_rate": 4.152542920053009e-05, "loss": 0.9332, "step": 5080 }, { "epoch": 0.81, "grad_norm": 0.7782546281814575, "learning_rate": 4.1509745329631365e-05, "loss": 0.8372, "step": 5085 }, { "epoch": 0.81, "grad_norm": 0.6977235078811646, "learning_rate": 4.149404992697669e-05, "loss": 0.6689, "step": 5090 }, { "epoch": 0.81, "grad_norm": 0.7119914293289185, "learning_rate": 4.147834300352901e-05, "loss": 0.6255, "step": 5095 }, { "epoch": 0.81, "grad_norm": 0.7148675322532654, "learning_rate": 4.146262457025933e-05, "loss": 0.6395, "step": 5100 }, { "epoch": 0.81, "eval_loss": 0.7237269282341003, "eval_runtime": 96.898, "eval_samples_per_second": 7.193, "eval_steps_per_second": 7.193, "step": 5100 }, { "epoch": 0.81, "grad_norm": 0.7843590378761292, "learning_rate": 4.144689463814669e-05, "loss": 0.6286, "step": 5105 }, { "epoch": 0.82, "grad_norm": 0.8296339511871338, "learning_rate": 4.143115321817815e-05, "loss": 0.8533, "step": 5110 }, { "epoch": 0.82, "grad_norm": 0.6789836287498474, "learning_rate": 4.1415400321348827e-05, "loss": 0.7821, "step": 5115 }, { "epoch": 0.82, "grad_norm": 0.9112969040870667, "learning_rate": 4.139963595866182e-05, "loss": 0.9232, "step": 5120 }, { "epoch": 0.82, "grad_norm": 0.5057934522628784, "learning_rate": 4.138386014112824e-05, "loss": 0.5756, "step": 5125 }, { "epoch": 0.82, "grad_norm": 0.7892030477523804, "learning_rate": 4.136807287976721e-05, "loss": 0.6673, "step": 5130 }, { "epoch": 0.82, "grad_norm": 0.6285967826843262, "learning_rate": 4.135227418560585e-05, "loss": 0.843, "step": 5135 }, { "epoch": 0.82, "grad_norm": 0.5548838973045349, "learning_rate": 4.133646406967927e-05, "loss": 0.8089, "step": 5140 }, { "epoch": 0.82, "grad_norm": 0.6649813055992126, "learning_rate": 4.132064254303053e-05, "loss": 0.8778, "step": 5145 }, { "epoch": 0.82, "grad_norm": 0.6559309959411621, "learning_rate": 4.1304809616710685e-05, "loss": 0.6655, "step": 5150 }, { "epoch": 0.82, "grad_norm": 0.839788019657135, "learning_rate": 4.1288965301778745e-05, "loss": 0.8372, "step": 5155 }, { "epoch": 0.82, "grad_norm": 0.5987736582756042, "learning_rate": 4.1273109609301686e-05, "loss": 0.6379, "step": 5160 }, { "epoch": 0.82, "grad_norm": 0.7344145774841309, "learning_rate": 4.125724255035439e-05, "loss": 0.7199, "step": 5165 }, { "epoch": 0.83, "grad_norm": 1.2459057569503784, "learning_rate": 4.124136413601973e-05, "loss": 0.7656, "step": 5170 }, { "epoch": 0.83, "grad_norm": 1.2029509544372559, "learning_rate": 4.12254743773885e-05, "loss": 0.6369, "step": 5175 }, { "epoch": 0.83, "grad_norm": 0.8527836799621582, "learning_rate": 4.1209573285559376e-05, "loss": 0.7597, "step": 5180 }, { "epoch": 0.83, "grad_norm": 0.6795602440834045, "learning_rate": 4.1193660871639006e-05, "loss": 0.8141, "step": 5185 }, { "epoch": 0.83, "grad_norm": 0.8437166810035706, "learning_rate": 4.1177737146741904e-05, "loss": 0.7443, "step": 5190 }, { "epoch": 0.83, "grad_norm": 0.7290692329406738, "learning_rate": 4.1161802121990513e-05, "loss": 0.5944, "step": 5195 }, { "epoch": 0.83, "grad_norm": 0.624458372592926, "learning_rate": 4.114585580851515e-05, "loss": 0.7567, "step": 5200 }, { "epoch": 0.83, "eval_loss": 0.7232300043106079, "eval_runtime": 96.871, "eval_samples_per_second": 7.195, "eval_steps_per_second": 7.195, "step": 5200 }, { "epoch": 0.83, "grad_norm": 0.9863276481628418, "learning_rate": 4.1129898217454034e-05, "loss": 0.7102, "step": 5205 }, { "epoch": 0.83, "grad_norm": 0.8225752115249634, "learning_rate": 4.111392935995324e-05, "loss": 0.8148, "step": 5210 }, { "epoch": 0.83, "grad_norm": 0.687274694442749, "learning_rate": 4.109794924716673e-05, "loss": 0.7285, "step": 5215 }, { "epoch": 0.83, "grad_norm": 1.306779146194458, "learning_rate": 4.108195789025632e-05, "loss": 1.0412, "step": 5220 }, { "epoch": 0.83, "grad_norm": 0.872298002243042, "learning_rate": 4.1065955300391676e-05, "loss": 0.8104, "step": 5225 }, { "epoch": 0.83, "grad_norm": 0.41228190064430237, "learning_rate": 4.104994148875032e-05, "loss": 0.8437, "step": 5230 }, { "epoch": 0.84, "grad_norm": 1.0037802457809448, "learning_rate": 4.103391646651761e-05, "loss": 0.579, "step": 5235 }, { "epoch": 0.84, "grad_norm": 0.6584619879722595, "learning_rate": 4.101788024488673e-05, "loss": 0.8698, "step": 5240 }, { "epoch": 0.84, "grad_norm": 0.8180784583091736, "learning_rate": 4.1001832835058685e-05, "loss": 0.9177, "step": 5245 }, { "epoch": 0.84, "grad_norm": 0.7600345015525818, "learning_rate": 4.0985774248242296e-05, "loss": 0.6935, "step": 5250 }, { "epoch": 0.84, "grad_norm": 0.9433395266532898, "learning_rate": 4.09697044956542e-05, "loss": 0.642, "step": 5255 }, { "epoch": 0.84, "grad_norm": 0.625467836856842, "learning_rate": 4.095362358851883e-05, "loss": 0.8038, "step": 5260 }, { "epoch": 0.84, "grad_norm": 0.5945066213607788, "learning_rate": 4.0937531538068387e-05, "loss": 0.7096, "step": 5265 }, { "epoch": 0.84, "grad_norm": 0.9545826315879822, "learning_rate": 4.092142835554289e-05, "loss": 0.6535, "step": 5270 }, { "epoch": 0.84, "grad_norm": 0.8320907354354858, "learning_rate": 4.090531405219012e-05, "loss": 0.7521, "step": 5275 }, { "epoch": 0.84, "grad_norm": 0.7830407619476318, "learning_rate": 4.088918863926562e-05, "loss": 0.5991, "step": 5280 }, { "epoch": 0.84, "grad_norm": 0.6662408709526062, "learning_rate": 4.0873052128032684e-05, "loss": 0.9341, "step": 5285 }, { "epoch": 0.84, "grad_norm": 1.2364461421966553, "learning_rate": 4.0856904529762394e-05, "loss": 0.6156, "step": 5290 }, { "epoch": 0.85, "grad_norm": 0.5708659291267395, "learning_rate": 4.084074585573354e-05, "loss": 0.6609, "step": 5295 }, { "epoch": 0.85, "grad_norm": 0.6155332326889038, "learning_rate": 4.082457611723266e-05, "loss": 0.8551, "step": 5300 }, { "epoch": 0.85, "eval_loss": 0.7219573855400085, "eval_runtime": 96.9322, "eval_samples_per_second": 7.191, "eval_steps_per_second": 7.191, "step": 5300 }, { "epoch": 0.85, "grad_norm": 0.5997724533081055, "learning_rate": 4.0808395325554023e-05, "loss": 0.7243, "step": 5305 }, { "epoch": 0.85, "grad_norm": 0.5939134359359741, "learning_rate": 4.079220349199962e-05, "loss": 0.7455, "step": 5310 }, { "epoch": 0.85, "grad_norm": 0.5447105765342712, "learning_rate": 4.077600062787915e-05, "loss": 0.8424, "step": 5315 }, { "epoch": 0.85, "grad_norm": 0.741060197353363, "learning_rate": 4.075978674451001e-05, "loss": 0.6837, "step": 5320 }, { "epoch": 0.85, "grad_norm": 0.8776602745056152, "learning_rate": 4.074356185321732e-05, "loss": 0.6541, "step": 5325 }, { "epoch": 0.85, "grad_norm": 0.5684154629707336, "learning_rate": 4.072732596533385e-05, "loss": 0.6843, "step": 5330 }, { "epoch": 0.85, "grad_norm": 0.9258201718330383, "learning_rate": 4.071107909220009e-05, "loss": 0.7829, "step": 5335 }, { "epoch": 0.85, "grad_norm": 0.6690560579299927, "learning_rate": 4.0694821245164165e-05, "loss": 0.7726, "step": 5340 }, { "epoch": 0.85, "grad_norm": 0.9005510807037354, "learning_rate": 4.0678552435581905e-05, "loss": 0.845, "step": 5345 }, { "epoch": 0.85, "grad_norm": 0.6624660491943359, "learning_rate": 4.066227267481676e-05, "loss": 0.5798, "step": 5350 }, { "epoch": 0.85, "grad_norm": 0.5465379953384399, "learning_rate": 4.064598197423985e-05, "loss": 0.6488, "step": 5355 }, { "epoch": 0.86, "grad_norm": 0.7643209099769592, "learning_rate": 4.062968034522995e-05, "loss": 0.6886, "step": 5360 }, { "epoch": 0.86, "grad_norm": 0.7964931130409241, "learning_rate": 4.061336779917342e-05, "loss": 1.0865, "step": 5365 }, { "epoch": 0.86, "grad_norm": 0.6491959095001221, "learning_rate": 4.05970443474643e-05, "loss": 0.5869, "step": 5370 }, { "epoch": 0.86, "grad_norm": 1.0309019088745117, "learning_rate": 4.058071000150421e-05, "loss": 0.8846, "step": 5375 }, { "epoch": 0.86, "grad_norm": 0.6856257915496826, "learning_rate": 4.05643647727024e-05, "loss": 0.774, "step": 5380 }, { "epoch": 0.86, "grad_norm": 0.6670073866844177, "learning_rate": 4.0548008672475714e-05, "loss": 0.6854, "step": 5385 }, { "epoch": 0.86, "grad_norm": 0.9411470890045166, "learning_rate": 4.053164171224858e-05, "loss": 0.9237, "step": 5390 }, { "epoch": 0.86, "grad_norm": 0.7178419232368469, "learning_rate": 4.051526390345303e-05, "loss": 0.6262, "step": 5395 }, { "epoch": 0.86, "grad_norm": 0.69013512134552, "learning_rate": 4.049887525752867e-05, "loss": 0.7392, "step": 5400 }, { "epoch": 0.86, "eval_loss": 0.7226108908653259, "eval_runtime": 96.9511, "eval_samples_per_second": 7.189, "eval_steps_per_second": 7.189, "step": 5400 }, { "epoch": 0.86, "grad_norm": 1.5154650211334229, "learning_rate": 4.048247578592266e-05, "loss": 1.0026, "step": 5405 }, { "epoch": 0.86, "grad_norm": 0.6985073685646057, "learning_rate": 4.046606550008973e-05, "loss": 0.4776, "step": 5410 }, { "epoch": 0.86, "grad_norm": 0.687282383441925, "learning_rate": 4.044964441149217e-05, "loss": 0.7665, "step": 5415 }, { "epoch": 0.87, "grad_norm": 0.7575299143791199, "learning_rate": 4.043321253159981e-05, "loss": 0.8202, "step": 5420 }, { "epoch": 0.87, "grad_norm": 1.0001776218414307, "learning_rate": 4.041676987189003e-05, "loss": 0.7952, "step": 5425 }, { "epoch": 0.87, "grad_norm": 1.240440845489502, "learning_rate": 4.040031644384771e-05, "loss": 0.7086, "step": 5430 }, { "epoch": 0.87, "grad_norm": 0.7027778029441833, "learning_rate": 4.0383852258965274e-05, "loss": 0.8268, "step": 5435 }, { "epoch": 0.87, "grad_norm": 0.7208477854728699, "learning_rate": 4.036737732874266e-05, "loss": 0.7095, "step": 5440 }, { "epoch": 0.87, "grad_norm": 0.6870366334915161, "learning_rate": 4.035089166468731e-05, "loss": 0.6324, "step": 5445 }, { "epoch": 0.87, "grad_norm": 0.6353491544723511, "learning_rate": 4.033439527831415e-05, "loss": 0.6463, "step": 5450 }, { "epoch": 0.87, "grad_norm": 0.8659310340881348, "learning_rate": 4.03178881811456e-05, "loss": 0.5445, "step": 5455 }, { "epoch": 0.87, "grad_norm": 1.102812647819519, "learning_rate": 4.030137038471158e-05, "loss": 1.0779, "step": 5460 }, { "epoch": 0.87, "grad_norm": 0.6689164638519287, "learning_rate": 4.028484190054947e-05, "loss": 0.7816, "step": 5465 }, { "epoch": 0.87, "grad_norm": 0.7742553949356079, "learning_rate": 4.02683027402041e-05, "loss": 0.7534, "step": 5470 }, { "epoch": 0.87, "grad_norm": 0.6275122761726379, "learning_rate": 4.025175291522779e-05, "loss": 0.8127, "step": 5475 }, { "epoch": 0.87, "grad_norm": 0.7048559188842773, "learning_rate": 4.0235192437180276e-05, "loss": 0.7884, "step": 5480 }, { "epoch": 0.88, "grad_norm": 0.6857985854148865, "learning_rate": 4.0218621317628755e-05, "loss": 0.6286, "step": 5485 }, { "epoch": 0.88, "grad_norm": 0.750784158706665, "learning_rate": 4.0202039568147857e-05, "loss": 0.76, "step": 5490 }, { "epoch": 0.88, "grad_norm": 0.906353235244751, "learning_rate": 4.018544720031962e-05, "loss": 0.7227, "step": 5495 }, { "epoch": 0.88, "grad_norm": 1.001240611076355, "learning_rate": 4.0168844225733516e-05, "loss": 0.9312, "step": 5500 }, { "epoch": 0.88, "eval_loss": 0.7204533815383911, "eval_runtime": 96.9186, "eval_samples_per_second": 7.192, "eval_steps_per_second": 7.192, "step": 5500 }, { "epoch": 0.88, "grad_norm": 0.6060788035392761, "learning_rate": 4.015223065598642e-05, "loss": 0.7476, "step": 5505 }, { "epoch": 0.88, "grad_norm": 0.9126180410385132, "learning_rate": 4.013560650268261e-05, "loss": 0.9137, "step": 5510 }, { "epoch": 0.88, "grad_norm": 0.7585060000419617, "learning_rate": 4.0118971777433755e-05, "loss": 0.7926, "step": 5515 }, { "epoch": 0.88, "grad_norm": 0.7748726606369019, "learning_rate": 4.01023264918589e-05, "loss": 0.8049, "step": 5520 }, { "epoch": 0.88, "grad_norm": 0.6712713241577148, "learning_rate": 4.008567065758449e-05, "loss": 0.6242, "step": 5525 }, { "epoch": 0.88, "grad_norm": 0.9764819741249084, "learning_rate": 4.006900428624429e-05, "loss": 0.7445, "step": 5530 }, { "epoch": 0.88, "grad_norm": 0.6748983263969421, "learning_rate": 4.005232738947948e-05, "loss": 0.9529, "step": 5535 }, { "epoch": 0.88, "grad_norm": 0.9980877637863159, "learning_rate": 4.003563997893857e-05, "loss": 1.0637, "step": 5540 }, { "epoch": 0.89, "grad_norm": 0.8130014538764954, "learning_rate": 4.0018942066277406e-05, "loss": 0.7479, "step": 5545 }, { "epoch": 0.89, "grad_norm": 0.5256005525588989, "learning_rate": 4.000223366315917e-05, "loss": 0.5274, "step": 5550 }, { "epoch": 0.89, "grad_norm": 0.6466362476348877, "learning_rate": 3.9985514781254385e-05, "loss": 0.7175, "step": 5555 }, { "epoch": 0.89, "grad_norm": 0.8849642276763916, "learning_rate": 3.9968785432240874e-05, "loss": 0.7762, "step": 5560 }, { "epoch": 0.89, "grad_norm": 0.7992335557937622, "learning_rate": 3.9952045627803795e-05, "loss": 0.6103, "step": 5565 }, { "epoch": 0.89, "grad_norm": 0.6376257538795471, "learning_rate": 3.993529537963559e-05, "loss": 0.7441, "step": 5570 }, { "epoch": 0.89, "grad_norm": 0.6867454648017883, "learning_rate": 3.9918534699436e-05, "loss": 0.8815, "step": 5575 }, { "epoch": 0.89, "grad_norm": 0.8116294741630554, "learning_rate": 3.990176359891206e-05, "loss": 0.9572, "step": 5580 }, { "epoch": 0.89, "grad_norm": 0.8836712837219238, "learning_rate": 3.988498208977808e-05, "loss": 0.8463, "step": 5585 }, { "epoch": 0.89, "grad_norm": 0.835573673248291, "learning_rate": 3.9868190183755624e-05, "loss": 0.7096, "step": 5590 }, { "epoch": 0.89, "grad_norm": 0.9869277477264404, "learning_rate": 3.9851387892573545e-05, "loss": 0.9144, "step": 5595 }, { "epoch": 0.89, "grad_norm": 0.6233755350112915, "learning_rate": 3.983457522796793e-05, "loss": 0.8323, "step": 5600 }, { "epoch": 0.89, "eval_loss": 0.719563901424408, "eval_runtime": 96.851, "eval_samples_per_second": 7.197, "eval_steps_per_second": 7.197, "step": 5600 }, { "epoch": 0.89, "grad_norm": 0.6275632977485657, "learning_rate": 3.981775220168212e-05, "loss": 0.751, "step": 5605 }, { "epoch": 0.9, "grad_norm": 0.7892312407493591, "learning_rate": 3.980091882546669e-05, "loss": 0.932, "step": 5610 }, { "epoch": 0.9, "grad_norm": 0.6969819068908691, "learning_rate": 3.9784075111079456e-05, "loss": 0.7515, "step": 5615 }, { "epoch": 0.9, "grad_norm": 0.7002953290939331, "learning_rate": 3.976722107028544e-05, "loss": 0.8556, "step": 5620 }, { "epoch": 0.9, "grad_norm": 1.1970090866088867, "learning_rate": 3.9750356714856876e-05, "loss": 0.7594, "step": 5625 }, { "epoch": 0.9, "grad_norm": 0.7774887084960938, "learning_rate": 3.9733482056573214e-05, "loss": 0.788, "step": 5630 }, { "epoch": 0.9, "grad_norm": 0.5987717509269714, "learning_rate": 3.971659710722109e-05, "loss": 0.7458, "step": 5635 }, { "epoch": 0.9, "grad_norm": 0.7544295191764832, "learning_rate": 3.969970187859435e-05, "loss": 0.9236, "step": 5640 }, { "epoch": 0.9, "grad_norm": 5.217820644378662, "learning_rate": 3.968279638249398e-05, "loss": 0.7796, "step": 5645 }, { "epoch": 0.9, "grad_norm": 3.211364507675171, "learning_rate": 3.966588063072817e-05, "loss": 0.7593, "step": 5650 }, { "epoch": 0.9, "grad_norm": 1.2978931665420532, "learning_rate": 3.964895463511227e-05, "loss": 0.8755, "step": 5655 }, { "epoch": 0.9, "grad_norm": 0.6411646008491516, "learning_rate": 3.963201840746877e-05, "loss": 0.8999, "step": 5660 }, { "epoch": 0.9, "grad_norm": 0.8105179667472839, "learning_rate": 3.961507195962732e-05, "loss": 0.758, "step": 5665 }, { "epoch": 0.91, "grad_norm": 0.8283591270446777, "learning_rate": 3.959811530342471e-05, "loss": 0.8578, "step": 5670 }, { "epoch": 0.91, "grad_norm": 0.8040729761123657, "learning_rate": 3.958114845070485e-05, "loss": 0.7062, "step": 5675 }, { "epoch": 0.91, "grad_norm": 0.696398138999939, "learning_rate": 3.956417141331878e-05, "loss": 0.8301, "step": 5680 }, { "epoch": 0.91, "grad_norm": 0.7806740403175354, "learning_rate": 3.9547184203124636e-05, "loss": 0.7002, "step": 5685 }, { "epoch": 0.91, "grad_norm": 0.6160992980003357, "learning_rate": 3.9530186831987695e-05, "loss": 0.7433, "step": 5690 }, { "epoch": 0.91, "grad_norm": 0.9394809603691101, "learning_rate": 3.95131793117803e-05, "loss": 0.8878, "step": 5695 }, { "epoch": 0.91, "grad_norm": 0.7936028838157654, "learning_rate": 3.94961616543819e-05, "loss": 0.7312, "step": 5700 }, { "epoch": 0.91, "eval_loss": 0.7196908593177795, "eval_runtime": 96.9199, "eval_samples_per_second": 7.192, "eval_steps_per_second": 7.192, "step": 5700 }, { "epoch": 0.91, "grad_norm": 0.7855328321456909, "learning_rate": 3.947913387167901e-05, "loss": 0.8013, "step": 5705 }, { "epoch": 0.91, "grad_norm": 0.7000160217285156, "learning_rate": 3.9462095975565226e-05, "loss": 0.7631, "step": 5710 }, { "epoch": 0.91, "grad_norm": 0.5747112035751343, "learning_rate": 3.944504797794122e-05, "loss": 0.6641, "step": 5715 }, { "epoch": 0.91, "grad_norm": 0.7016122341156006, "learning_rate": 3.942798989071469e-05, "loss": 0.8245, "step": 5720 }, { "epoch": 0.91, "grad_norm": 0.6330326795578003, "learning_rate": 3.941092172580042e-05, "loss": 0.6604, "step": 5725 }, { "epoch": 0.91, "grad_norm": 0.6938309669494629, "learning_rate": 3.939384349512019e-05, "loss": 0.891, "step": 5730 }, { "epoch": 0.92, "grad_norm": 0.6257404685020447, "learning_rate": 3.937675521060284e-05, "loss": 0.6852, "step": 5735 }, { "epoch": 0.92, "grad_norm": 1.243001937866211, "learning_rate": 3.9359656884184234e-05, "loss": 0.7764, "step": 5740 }, { "epoch": 0.92, "grad_norm": 0.5932753086090088, "learning_rate": 3.9342548527807225e-05, "loss": 0.6175, "step": 5745 }, { "epoch": 0.92, "grad_norm": 1.0795527696609497, "learning_rate": 3.9325430153421706e-05, "loss": 0.8249, "step": 5750 }, { "epoch": 0.92, "grad_norm": 0.9690996408462524, "learning_rate": 3.9308301772984535e-05, "loss": 0.7415, "step": 5755 }, { "epoch": 0.92, "grad_norm": 2.7389626502990723, "learning_rate": 3.9291163398459585e-05, "loss": 0.8866, "step": 5760 }, { "epoch": 0.92, "grad_norm": 0.9238413572311401, "learning_rate": 3.927401504181768e-05, "loss": 0.9837, "step": 5765 }, { "epoch": 0.92, "grad_norm": 1.18929922580719, "learning_rate": 3.925685671503665e-05, "loss": 0.7166, "step": 5770 }, { "epoch": 0.92, "grad_norm": 0.717707633972168, "learning_rate": 3.923968843010127e-05, "loss": 0.8331, "step": 5775 }, { "epoch": 0.92, "grad_norm": 0.5547253489494324, "learning_rate": 3.9222510199003276e-05, "loss": 0.5933, "step": 5780 }, { "epoch": 0.92, "grad_norm": 0.73494952917099, "learning_rate": 3.920532203374134e-05, "loss": 0.7895, "step": 5785 }, { "epoch": 0.92, "grad_norm": 0.8860884308815002, "learning_rate": 3.91881239463211e-05, "loss": 0.757, "step": 5790 }, { "epoch": 0.92, "grad_norm": 0.8135414719581604, "learning_rate": 3.917091594875508e-05, "loss": 0.8091, "step": 5795 }, { "epoch": 0.93, "grad_norm": 0.7246249318122864, "learning_rate": 3.915369805306278e-05, "loss": 1.0, "step": 5800 }, { "epoch": 0.93, "eval_loss": 0.7181897163391113, "eval_runtime": 96.9106, "eval_samples_per_second": 7.192, "eval_steps_per_second": 7.192, "step": 5800 }, { "epoch": 0.93, "grad_norm": 0.6413865089416504, "learning_rate": 3.913647027127057e-05, "loss": 0.8138, "step": 5805 }, { "epoch": 0.93, "grad_norm": 0.7799732089042664, "learning_rate": 3.9119232615411724e-05, "loss": 0.74, "step": 5810 }, { "epoch": 0.93, "grad_norm": 0.5068621635437012, "learning_rate": 3.910198509752647e-05, "loss": 0.6272, "step": 5815 }, { "epoch": 0.93, "grad_norm": 0.6522389054298401, "learning_rate": 3.9084727729661864e-05, "loss": 0.747, "step": 5820 }, { "epoch": 0.93, "grad_norm": 0.615208625793457, "learning_rate": 3.9067460523871855e-05, "loss": 1.0008, "step": 5825 }, { "epoch": 0.93, "grad_norm": 0.7739670872688293, "learning_rate": 3.9050183492217284e-05, "loss": 0.959, "step": 5830 }, { "epoch": 0.93, "grad_norm": 0.7364481687545776, "learning_rate": 3.9032896646765837e-05, "loss": 0.7106, "step": 5835 }, { "epoch": 0.93, "grad_norm": 0.9992987513542175, "learning_rate": 3.901559999959207e-05, "loss": 0.6638, "step": 5840 }, { "epoch": 0.93, "grad_norm": 0.9391655921936035, "learning_rate": 3.8998293562777353e-05, "loss": 0.8529, "step": 5845 }, { "epoch": 0.93, "grad_norm": 0.5818854570388794, "learning_rate": 3.8980977348409936e-05, "loss": 0.8186, "step": 5850 }, { "epoch": 0.93, "grad_norm": 0.7900370359420776, "learning_rate": 3.8963651368584876e-05, "loss": 0.6693, "step": 5855 }, { "epoch": 0.94, "grad_norm": 0.6716821193695068, "learning_rate": 3.8946315635404054e-05, "loss": 0.7926, "step": 5860 }, { "epoch": 0.94, "grad_norm": 0.6300959587097168, "learning_rate": 3.892897016097615e-05, "loss": 0.8177, "step": 5865 }, { "epoch": 0.94, "grad_norm": 0.5477313995361328, "learning_rate": 3.8911614957416675e-05, "loss": 0.6463, "step": 5870 }, { "epoch": 0.94, "grad_norm": 0.8673714995384216, "learning_rate": 3.889425003684793e-05, "loss": 0.7662, "step": 5875 }, { "epoch": 0.94, "grad_norm": 0.9277421236038208, "learning_rate": 3.8876875411398975e-05, "loss": 0.7654, "step": 5880 }, { "epoch": 0.94, "grad_norm": 1.111858606338501, "learning_rate": 3.885949109320567e-05, "loss": 0.7782, "step": 5885 }, { "epoch": 0.94, "grad_norm": 0.8440257906913757, "learning_rate": 3.884209709441066e-05, "loss": 0.7054, "step": 5890 }, { "epoch": 0.94, "grad_norm": 0.8476880192756653, "learning_rate": 3.882469342716333e-05, "loss": 0.7382, "step": 5895 }, { "epoch": 0.94, "grad_norm": 0.5822775959968567, "learning_rate": 3.8807280103619826e-05, "loss": 0.6164, "step": 5900 }, { "epoch": 0.94, "eval_loss": 0.7177068591117859, "eval_runtime": 96.8009, "eval_samples_per_second": 7.2, "eval_steps_per_second": 7.2, "step": 5900 }, { "epoch": 0.94, "grad_norm": 0.6871796250343323, "learning_rate": 3.878985713594302e-05, "loss": 0.6928, "step": 5905 }, { "epoch": 0.94, "grad_norm": 0.7812449932098389, "learning_rate": 3.8772424536302564e-05, "loss": 0.8246, "step": 5910 }, { "epoch": 0.94, "grad_norm": 0.8643015623092651, "learning_rate": 3.87549823168748e-05, "loss": 0.6893, "step": 5915 }, { "epoch": 0.94, "grad_norm": 0.6011418104171753, "learning_rate": 3.873753048984278e-05, "loss": 0.5402, "step": 5920 }, { "epoch": 0.95, "grad_norm": 0.6913355588912964, "learning_rate": 3.8720069067396315e-05, "loss": 0.6997, "step": 5925 }, { "epoch": 0.95, "grad_norm": 0.840412437915802, "learning_rate": 3.870259806173188e-05, "loss": 0.7524, "step": 5930 }, { "epoch": 0.95, "grad_norm": 0.7205111980438232, "learning_rate": 3.8685117485052645e-05, "loss": 0.7316, "step": 5935 }, { "epoch": 0.95, "grad_norm": 0.5926095843315125, "learning_rate": 3.8667627349568494e-05, "loss": 0.6616, "step": 5940 }, { "epoch": 0.95, "grad_norm": 0.8431223630905151, "learning_rate": 3.865012766749595e-05, "loss": 0.9906, "step": 5945 }, { "epoch": 0.95, "grad_norm": 0.8787121176719666, "learning_rate": 3.863261845105822e-05, "loss": 0.7688, "step": 5950 }, { "epoch": 0.95, "grad_norm": 0.5568470358848572, "learning_rate": 3.861509971248519e-05, "loss": 0.7319, "step": 5955 }, { "epoch": 0.95, "grad_norm": 0.6905276775360107, "learning_rate": 3.859757146401336e-05, "loss": 0.8506, "step": 5960 }, { "epoch": 0.95, "grad_norm": 0.733594536781311, "learning_rate": 3.858003371788591e-05, "loss": 0.65, "step": 5965 }, { "epoch": 0.95, "grad_norm": 0.9627974033355713, "learning_rate": 3.8562486486352614e-05, "loss": 1.0121, "step": 5970 }, { "epoch": 0.95, "grad_norm": 0.7356416583061218, "learning_rate": 3.854492978166991e-05, "loss": 0.698, "step": 5975 }, { "epoch": 0.95, "grad_norm": 0.6784470081329346, "learning_rate": 3.8527363616100844e-05, "loss": 0.6361, "step": 5980 }, { "epoch": 0.96, "grad_norm": 0.8226351141929626, "learning_rate": 3.8509788001915035e-05, "loss": 0.7528, "step": 5985 }, { "epoch": 0.96, "grad_norm": 1.026175618171692, "learning_rate": 3.8492202951388744e-05, "loss": 1.0142, "step": 5990 }, { "epoch": 0.96, "grad_norm": 0.860149085521698, "learning_rate": 3.847460847680482e-05, "loss": 0.6081, "step": 5995 }, { "epoch": 0.96, "grad_norm": 0.7153595089912415, "learning_rate": 3.845700459045266e-05, "loss": 0.7484, "step": 6000 }, { "epoch": 0.96, "eval_loss": 0.7146974802017212, "eval_runtime": 96.5929, "eval_samples_per_second": 7.216, "eval_steps_per_second": 7.216, "step": 6000 }, { "epoch": 0.96, "grad_norm": 0.8637629151344299, "learning_rate": 3.843939130462827e-05, "loss": 0.9717, "step": 6005 }, { "epoch": 0.96, "grad_norm": 0.39873212575912476, "learning_rate": 3.84217686316342e-05, "loss": 0.64, "step": 6010 }, { "epoch": 0.96, "grad_norm": 0.5839079022407532, "learning_rate": 3.8404136583779585e-05, "loss": 0.8288, "step": 6015 }, { "epoch": 0.96, "grad_norm": 1.2063088417053223, "learning_rate": 3.8386495173380064e-05, "loss": 0.7722, "step": 6020 }, { "epoch": 0.96, "grad_norm": 0.6402775049209595, "learning_rate": 3.836884441275786e-05, "loss": 0.7645, "step": 6025 }, { "epoch": 0.96, "grad_norm": 0.8476734161376953, "learning_rate": 3.83511843142417e-05, "loss": 0.6249, "step": 6030 }, { "epoch": 0.96, "grad_norm": 0.7386351823806763, "learning_rate": 3.833351489016684e-05, "loss": 0.6188, "step": 6035 }, { "epoch": 0.96, "grad_norm": 0.661152184009552, "learning_rate": 3.831583615287504e-05, "loss": 0.9277, "step": 6040 }, { "epoch": 0.96, "grad_norm": 0.6577073335647583, "learning_rate": 3.82981481147146e-05, "loss": 0.7678, "step": 6045 }, { "epoch": 0.97, "grad_norm": 0.9322777986526489, "learning_rate": 3.828045078804027e-05, "loss": 0.6959, "step": 6050 }, { "epoch": 0.97, "grad_norm": 1.2478550672531128, "learning_rate": 3.826274418521332e-05, "loss": 0.9087, "step": 6055 }, { "epoch": 0.97, "grad_norm": 0.5843325257301331, "learning_rate": 3.824502831860148e-05, "loss": 0.5186, "step": 6060 }, { "epoch": 0.97, "grad_norm": 0.4977940618991852, "learning_rate": 3.822730320057898e-05, "loss": 0.7431, "step": 6065 }, { "epoch": 0.97, "grad_norm": 0.7447279691696167, "learning_rate": 3.8209568843526475e-05, "loss": 0.7302, "step": 6070 }, { "epoch": 0.97, "grad_norm": 0.5054610967636108, "learning_rate": 3.8191825259831095e-05, "loss": 0.7878, "step": 6075 }, { "epoch": 0.97, "grad_norm": 1.2159337997436523, "learning_rate": 3.8174072461886396e-05, "loss": 0.663, "step": 6080 }, { "epoch": 0.97, "grad_norm": 0.8180432915687561, "learning_rate": 3.815631046209241e-05, "loss": 0.7184, "step": 6085 }, { "epoch": 0.97, "grad_norm": 0.8640487790107727, "learning_rate": 3.8138539272855544e-05, "loss": 0.7011, "step": 6090 }, { "epoch": 0.97, "grad_norm": 0.569340705871582, "learning_rate": 3.812075890658867e-05, "loss": 0.734, "step": 6095 }, { "epoch": 0.97, "grad_norm": 0.774334728717804, "learning_rate": 3.810296937571103e-05, "loss": 0.7924, "step": 6100 }, { "epoch": 0.97, "eval_loss": 0.7144004106521606, "eval_runtime": 96.5311, "eval_samples_per_second": 7.22, "eval_steps_per_second": 7.22, "step": 6100 }, { "epoch": 0.97, "grad_norm": 0.6256681680679321, "learning_rate": 3.80851706926483e-05, "loss": 0.6543, "step": 6105 }, { "epoch": 0.98, "grad_norm": 0.6679534912109375, "learning_rate": 3.806736286983254e-05, "loss": 0.6674, "step": 6110 }, { "epoch": 0.98, "grad_norm": 0.696412980556488, "learning_rate": 3.804954591970218e-05, "loss": 0.8335, "step": 6115 }, { "epoch": 0.98, "grad_norm": 0.8444018363952637, "learning_rate": 3.803171985470203e-05, "loss": 0.7172, "step": 6120 }, { "epoch": 0.98, "grad_norm": 0.8965733051300049, "learning_rate": 3.8013884687283274e-05, "loss": 1.0342, "step": 6125 }, { "epoch": 0.98, "grad_norm": 0.7367607355117798, "learning_rate": 3.799604042990347e-05, "loss": 0.6862, "step": 6130 }, { "epoch": 0.98, "grad_norm": 0.8707432746887207, "learning_rate": 3.797818709502647e-05, "loss": 0.8098, "step": 6135 }, { "epoch": 0.98, "grad_norm": 0.5112609267234802, "learning_rate": 3.7960324695122544e-05, "loss": 0.5907, "step": 6140 }, { "epoch": 0.98, "grad_norm": 0.39674660563468933, "learning_rate": 3.794245324266823e-05, "loss": 0.6887, "step": 6145 }, { "epoch": 0.98, "grad_norm": 0.7135210037231445, "learning_rate": 3.79245727501464e-05, "loss": 0.7683, "step": 6150 }, { "epoch": 0.98, "grad_norm": 0.6788827776908875, "learning_rate": 3.790668323004628e-05, "loss": 0.9064, "step": 6155 }, { "epoch": 0.98, "grad_norm": 0.5234334468841553, "learning_rate": 3.7888784694863355e-05, "loss": 0.7373, "step": 6160 }, { "epoch": 0.98, "grad_norm": 0.6748809218406677, "learning_rate": 3.7870877157099425e-05, "loss": 0.6998, "step": 6165 }, { "epoch": 0.98, "grad_norm": 0.7746409773826599, "learning_rate": 3.785296062926259e-05, "loss": 0.7789, "step": 6170 }, { "epoch": 0.99, "grad_norm": 0.7480994462966919, "learning_rate": 3.783503512386721e-05, "loss": 0.7114, "step": 6175 }, { "epoch": 0.99, "grad_norm": 1.2271349430084229, "learning_rate": 3.781710065343392e-05, "loss": 0.7294, "step": 6180 }, { "epoch": 0.99, "grad_norm": 0.7802749872207642, "learning_rate": 3.779915723048963e-05, "loss": 0.8348, "step": 6185 }, { "epoch": 0.99, "grad_norm": 0.6718894243240356, "learning_rate": 3.778120486756749e-05, "loss": 0.7428, "step": 6190 }, { "epoch": 0.99, "grad_norm": 0.7281386852264404, "learning_rate": 3.776324357720688e-05, "loss": 0.8076, "step": 6195 }, { "epoch": 0.99, "grad_norm": 0.7086895704269409, "learning_rate": 3.7745273371953464e-05, "loss": 0.9389, "step": 6200 }, { "epoch": 0.99, "eval_loss": 0.7145183682441711, "eval_runtime": 96.6703, "eval_samples_per_second": 7.21, "eval_steps_per_second": 7.21, "step": 6200 }, { "epoch": 0.99, "grad_norm": 0.8768818974494934, "learning_rate": 3.7727294264359095e-05, "loss": 0.8837, "step": 6205 }, { "epoch": 0.99, "grad_norm": 0.5558096170425415, "learning_rate": 3.7709306266981835e-05, "loss": 0.6894, "step": 6210 }, { "epoch": 0.99, "grad_norm": 0.7635754942893982, "learning_rate": 3.769130939238598e-05, "loss": 0.7869, "step": 6215 }, { "epoch": 0.99, "grad_norm": 0.8129850625991821, "learning_rate": 3.7673303653142036e-05, "loss": 0.8049, "step": 6220 }, { "epoch": 0.99, "grad_norm": 0.7782718539237976, "learning_rate": 3.765528906182666e-05, "loss": 1.1669, "step": 6225 }, { "epoch": 0.99, "grad_norm": 0.8600115180015564, "learning_rate": 3.763726563102272e-05, "loss": 0.7453, "step": 6230 }, { "epoch": 1.0, "grad_norm": 0.6311535239219666, "learning_rate": 3.761923337331926e-05, "loss": 0.7537, "step": 6235 }, { "epoch": 1.0, "grad_norm": 0.7079928517341614, "learning_rate": 3.7601192301311486e-05, "loss": 0.8818, "step": 6240 }, { "epoch": 1.0, "grad_norm": 0.6088127493858337, "learning_rate": 3.7583142427600735e-05, "loss": 0.8239, "step": 6245 }, { "epoch": 1.0, "grad_norm": 0.9693393707275391, "learning_rate": 3.756508376479455e-05, "loss": 0.4772, "step": 6250 }, { "epoch": 1.0, "grad_norm": 0.9866142868995667, "learning_rate": 3.754701632550656e-05, "loss": 0.9703, "step": 6255 }, { "epoch": 1.0, "grad_norm": 0.7974732518196106, "learning_rate": 3.7528940122356534e-05, "loss": 0.7359, "step": 6260 }, { "epoch": 1.0, "grad_norm": 0.7444310188293457, "learning_rate": 3.751085516797038e-05, "loss": 0.6785, "step": 6265 }, { "epoch": 1.0, "grad_norm": 0.586471438407898, "learning_rate": 3.7492761474980096e-05, "loss": 0.6079, "step": 6270 }, { "epoch": 1.0, "grad_norm": 0.7271062731742859, "learning_rate": 3.747465905602382e-05, "loss": 0.7284, "step": 6275 }, { "epoch": 1.0, "grad_norm": 0.7004055380821228, "learning_rate": 3.7456547923745745e-05, "loss": 0.7364, "step": 6280 }, { "epoch": 1.0, "grad_norm": 0.7666735053062439, "learning_rate": 3.7438428090796166e-05, "loss": 1.0261, "step": 6285 }, { "epoch": 1.0, "grad_norm": 0.7594092488288879, "learning_rate": 3.742029956983146e-05, "loss": 0.7293, "step": 6290 }, { "epoch": 1.0, "grad_norm": 0.5505672097206116, "learning_rate": 3.7402162373514074e-05, "loss": 1.1323, "step": 6295 }, { "epoch": 1.01, "grad_norm": 0.6808300018310547, "learning_rate": 3.73840165145125e-05, "loss": 0.7108, "step": 6300 }, { "epoch": 1.01, "eval_loss": 0.7136371731758118, "eval_runtime": 96.6091, "eval_samples_per_second": 7.215, "eval_steps_per_second": 7.215, "step": 6300 }, { "epoch": 1.01, "grad_norm": 1.147871494293213, "learning_rate": 3.736586200550129e-05, "loss": 1.0234, "step": 6305 }, { "epoch": 1.01, "grad_norm": 0.7230010628700256, "learning_rate": 3.734769885916104e-05, "loss": 0.6727, "step": 6310 }, { "epoch": 1.01, "grad_norm": 0.5923539400100708, "learning_rate": 3.7329527088178376e-05, "loss": 0.5398, "step": 6315 }, { "epoch": 1.01, "grad_norm": 1.5129786729812622, "learning_rate": 3.731134670524594e-05, "loss": 0.8927, "step": 6320 }, { "epoch": 1.01, "grad_norm": 0.6973879933357239, "learning_rate": 3.7293157723062425e-05, "loss": 0.5394, "step": 6325 }, { "epoch": 1.01, "grad_norm": 0.6135405898094177, "learning_rate": 3.727496015433246e-05, "loss": 0.6949, "step": 6330 }, { "epoch": 1.01, "grad_norm": 0.7291958928108215, "learning_rate": 3.725675401176675e-05, "loss": 0.7333, "step": 6335 }, { "epoch": 1.01, "grad_norm": 0.8721144795417786, "learning_rate": 3.7238539308081956e-05, "loss": 0.7178, "step": 6340 }, { "epoch": 1.01, "grad_norm": 0.6904857754707336, "learning_rate": 3.722031605600069e-05, "loss": 0.5423, "step": 6345 }, { "epoch": 1.01, "grad_norm": 0.6890765428543091, "learning_rate": 3.720208426825157e-05, "loss": 0.6649, "step": 6350 }, { "epoch": 1.01, "grad_norm": 0.7029244899749756, "learning_rate": 3.7183843957569174e-05, "loss": 0.6566, "step": 6355 }, { "epoch": 1.02, "grad_norm": 0.7533852458000183, "learning_rate": 3.716559513669403e-05, "loss": 0.6503, "step": 6360 }, { "epoch": 1.02, "grad_norm": 0.8862756490707397, "learning_rate": 3.7147337818372595e-05, "loss": 0.6742, "step": 6365 }, { "epoch": 1.02, "grad_norm": 0.6946579217910767, "learning_rate": 3.712907201535728e-05, "loss": 0.6658, "step": 6370 }, { "epoch": 1.02, "grad_norm": 0.7204101085662842, "learning_rate": 3.711079774040641e-05, "loss": 0.7723, "step": 6375 }, { "epoch": 1.02, "grad_norm": 0.5266463756561279, "learning_rate": 3.7092515006284245e-05, "loss": 0.5676, "step": 6380 }, { "epoch": 1.02, "grad_norm": 0.7050403952598572, "learning_rate": 3.707422382576092e-05, "loss": 0.6753, "step": 6385 }, { "epoch": 1.02, "grad_norm": 0.6561117768287659, "learning_rate": 3.705592421161251e-05, "loss": 0.6988, "step": 6390 }, { "epoch": 1.02, "grad_norm": 0.5257726311683655, "learning_rate": 3.7037616176620955e-05, "loss": 0.6265, "step": 6395 }, { "epoch": 1.02, "grad_norm": 0.6152855753898621, "learning_rate": 3.7019299733574074e-05, "loss": 0.8076, "step": 6400 }, { "epoch": 1.02, "eval_loss": 0.7153558731079102, "eval_runtime": 96.493, "eval_samples_per_second": 7.223, "eval_steps_per_second": 7.223, "step": 6400 }, { "epoch": 1.02, "grad_norm": 0.9646801948547363, "learning_rate": 3.700097489526559e-05, "loss": 0.8186, "step": 6405 }, { "epoch": 1.02, "grad_norm": 0.8193671703338623, "learning_rate": 3.6982641674495046e-05, "loss": 0.5955, "step": 6410 }, { "epoch": 1.02, "grad_norm": 0.5945185422897339, "learning_rate": 3.696430008406788e-05, "loss": 0.7449, "step": 6415 }, { "epoch": 1.02, "grad_norm": 0.8218982219696045, "learning_rate": 3.6945950136795345e-05, "loss": 0.6192, "step": 6420 }, { "epoch": 1.03, "grad_norm": 0.723145604133606, "learning_rate": 3.692759184549455e-05, "loss": 0.7703, "step": 6425 }, { "epoch": 1.03, "grad_norm": 0.8936192989349365, "learning_rate": 3.690922522298843e-05, "loss": 0.6683, "step": 6430 }, { "epoch": 1.03, "grad_norm": 0.6989235281944275, "learning_rate": 3.6890850282105725e-05, "loss": 0.5886, "step": 6435 }, { "epoch": 1.03, "grad_norm": 0.9722846150398254, "learning_rate": 3.687246703568101e-05, "loss": 0.6229, "step": 6440 }, { "epoch": 1.03, "grad_norm": 0.8043028116226196, "learning_rate": 3.6854075496554636e-05, "loss": 0.8865, "step": 6445 }, { "epoch": 1.03, "grad_norm": 0.7527907490730286, "learning_rate": 3.683567567757276e-05, "loss": 0.6773, "step": 6450 }, { "epoch": 1.03, "grad_norm": 0.6083170175552368, "learning_rate": 3.681726759158733e-05, "loss": 0.6311, "step": 6455 }, { "epoch": 1.03, "grad_norm": 0.7950381636619568, "learning_rate": 3.679885125145605e-05, "loss": 0.8398, "step": 6460 }, { "epoch": 1.03, "grad_norm": 0.8302904367446899, "learning_rate": 3.678042667004239e-05, "loss": 0.4981, "step": 6465 }, { "epoch": 1.03, "grad_norm": 0.7814335227012634, "learning_rate": 3.67619938602156e-05, "loss": 0.8345, "step": 6470 }, { "epoch": 1.03, "grad_norm": 1.1472254991531372, "learning_rate": 3.6743552834850656e-05, "loss": 0.7503, "step": 6475 }, { "epoch": 1.03, "grad_norm": 0.7045092582702637, "learning_rate": 3.672510360682827e-05, "loss": 0.9974, "step": 6480 }, { "epoch": 1.04, "grad_norm": 0.6515605449676514, "learning_rate": 3.670664618903489e-05, "loss": 0.7965, "step": 6485 }, { "epoch": 1.04, "grad_norm": 0.6281563639640808, "learning_rate": 3.66881805943627e-05, "loss": 0.6854, "step": 6490 }, { "epoch": 1.04, "grad_norm": 0.6973702907562256, "learning_rate": 3.666970683570957e-05, "loss": 0.6939, "step": 6495 }, { "epoch": 1.04, "grad_norm": 0.7053531408309937, "learning_rate": 3.6651224925979086e-05, "loss": 0.7232, "step": 6500 }, { "epoch": 1.04, "eval_loss": 0.7147144079208374, "eval_runtime": 96.4093, "eval_samples_per_second": 7.23, "eval_steps_per_second": 7.23, "step": 6500 }, { "epoch": 1.04, "grad_norm": 1.0588470697402954, "learning_rate": 3.663273487808052e-05, "loss": 0.7061, "step": 6505 }, { "epoch": 1.04, "grad_norm": 0.7132044434547424, "learning_rate": 3.661423670492885e-05, "loss": 1.0585, "step": 6510 }, { "epoch": 1.04, "grad_norm": 0.7809985280036926, "learning_rate": 3.65957304194447e-05, "loss": 0.7325, "step": 6515 }, { "epoch": 1.04, "grad_norm": 0.9116477966308594, "learning_rate": 3.6577216034554373e-05, "loss": 0.6707, "step": 6520 }, { "epoch": 1.04, "grad_norm": 0.6966428160667419, "learning_rate": 3.655869356318982e-05, "loss": 0.7778, "step": 6525 }, { "epoch": 1.04, "grad_norm": 0.5731261968612671, "learning_rate": 3.654016301828867e-05, "loss": 0.7336, "step": 6530 }, { "epoch": 1.04, "grad_norm": 0.6520384550094604, "learning_rate": 3.6521624412794167e-05, "loss": 0.6992, "step": 6535 }, { "epoch": 1.04, "grad_norm": 0.7069156169891357, "learning_rate": 3.6503077759655166e-05, "loss": 0.608, "step": 6540 }, { "epoch": 1.04, "grad_norm": 0.7179171442985535, "learning_rate": 3.648452307182618e-05, "loss": 0.6944, "step": 6545 }, { "epoch": 1.05, "grad_norm": 0.7859477996826172, "learning_rate": 3.6465960362267335e-05, "loss": 0.6541, "step": 6550 }, { "epoch": 1.05, "grad_norm": 0.8490133881568909, "learning_rate": 3.6447389643944326e-05, "loss": 0.6887, "step": 6555 }, { "epoch": 1.05, "grad_norm": 0.6254005432128906, "learning_rate": 3.6428810929828466e-05, "loss": 0.6112, "step": 6560 }, { "epoch": 1.05, "grad_norm": 0.42452389001846313, "learning_rate": 3.6410224232896654e-05, "loss": 0.9964, "step": 6565 }, { "epoch": 1.05, "grad_norm": 1.1462258100509644, "learning_rate": 3.639162956613135e-05, "loss": 0.896, "step": 6570 }, { "epoch": 1.05, "grad_norm": 0.9457197785377502, "learning_rate": 3.637302694252058e-05, "loss": 0.8939, "step": 6575 }, { "epoch": 1.05, "grad_norm": 0.888251006603241, "learning_rate": 3.6354416375057965e-05, "loss": 0.5634, "step": 6580 }, { "epoch": 1.05, "grad_norm": 0.6899970173835754, "learning_rate": 3.6335797876742625e-05, "loss": 0.814, "step": 6585 }, { "epoch": 1.05, "grad_norm": 0.56333327293396, "learning_rate": 3.631717146057925e-05, "loss": 0.5733, "step": 6590 }, { "epoch": 1.05, "grad_norm": 0.9747548699378967, "learning_rate": 3.629853713957806e-05, "loss": 0.7239, "step": 6595 }, { "epoch": 1.05, "grad_norm": 0.6559699177742004, "learning_rate": 3.627989492675477e-05, "loss": 0.6456, "step": 6600 }, { "epoch": 1.05, "eval_loss": 0.7122019529342651, "eval_runtime": 96.4526, "eval_samples_per_second": 7.226, "eval_steps_per_second": 7.226, "step": 6600 }, { "epoch": 1.05, "grad_norm": 0.7743479609489441, "learning_rate": 3.626124483513063e-05, "loss": 0.8055, "step": 6605 }, { "epoch": 1.06, "grad_norm": 0.6561269164085388, "learning_rate": 3.6242586877732395e-05, "loss": 0.7325, "step": 6610 }, { "epoch": 1.06, "grad_norm": 0.6919766664505005, "learning_rate": 3.6223921067592316e-05, "loss": 0.6057, "step": 6615 }, { "epoch": 1.06, "grad_norm": 0.7156655192375183, "learning_rate": 3.6205247417748103e-05, "loss": 0.9774, "step": 6620 }, { "epoch": 1.06, "grad_norm": 0.8031120896339417, "learning_rate": 3.618656594124297e-05, "loss": 0.8455, "step": 6625 }, { "epoch": 1.06, "grad_norm": 0.9751262664794922, "learning_rate": 3.616787665112558e-05, "loss": 0.7065, "step": 6630 }, { "epoch": 1.06, "grad_norm": 1.6306493282318115, "learning_rate": 3.6149179560450075e-05, "loss": 0.6623, "step": 6635 }, { "epoch": 1.06, "grad_norm": 0.7967116832733154, "learning_rate": 3.613047468227601e-05, "loss": 0.755, "step": 6640 }, { "epoch": 1.06, "grad_norm": 1.1591620445251465, "learning_rate": 3.6111762029668425e-05, "loss": 0.7066, "step": 6645 }, { "epoch": 1.06, "grad_norm": 0.8396896123886108, "learning_rate": 3.609304161569776e-05, "loss": 0.6199, "step": 6650 }, { "epoch": 1.06, "grad_norm": 0.650834858417511, "learning_rate": 3.607431345343987e-05, "loss": 0.7941, "step": 6655 }, { "epoch": 1.06, "grad_norm": 1.0054168701171875, "learning_rate": 3.6055577555976046e-05, "loss": 0.6738, "step": 6660 }, { "epoch": 1.06, "grad_norm": 0.663850724697113, "learning_rate": 3.603683393639297e-05, "loss": 0.7506, "step": 6665 }, { "epoch": 1.06, "grad_norm": 1.1505427360534668, "learning_rate": 3.6018082607782715e-05, "loss": 0.9257, "step": 6670 }, { "epoch": 1.07, "grad_norm": 0.731639564037323, "learning_rate": 3.599932358324274e-05, "loss": 0.7686, "step": 6675 }, { "epoch": 1.07, "grad_norm": 0.7264305949211121, "learning_rate": 3.598055687587588e-05, "loss": 0.777, "step": 6680 }, { "epoch": 1.07, "grad_norm": 0.6651543378829956, "learning_rate": 3.5961782498790356e-05, "loss": 0.6417, "step": 6685 }, { "epoch": 1.07, "grad_norm": 0.6642833948135376, "learning_rate": 3.594300046509971e-05, "loss": 0.7173, "step": 6690 }, { "epoch": 1.07, "grad_norm": 0.7369661927223206, "learning_rate": 3.592421078792285e-05, "loss": 0.6174, "step": 6695 }, { "epoch": 1.07, "grad_norm": 0.6418754458427429, "learning_rate": 3.590541348038403e-05, "loss": 0.5862, "step": 6700 }, { "epoch": 1.07, "eval_loss": 0.713315486907959, "eval_runtime": 96.4112, "eval_samples_per_second": 7.229, "eval_steps_per_second": 7.229, "step": 6700 }, { "epoch": 1.07, "grad_norm": 0.8054782152175903, "learning_rate": 3.588660855561282e-05, "loss": 0.9726, "step": 6705 }, { "epoch": 1.07, "grad_norm": 0.6561216115951538, "learning_rate": 3.586779602674413e-05, "loss": 0.7284, "step": 6710 }, { "epoch": 1.07, "grad_norm": 0.7178167700767517, "learning_rate": 3.584897590691815e-05, "loss": 0.7098, "step": 6715 }, { "epoch": 1.07, "grad_norm": 0.6914810538291931, "learning_rate": 3.58301482092804e-05, "loss": 0.6683, "step": 6720 }, { "epoch": 1.07, "grad_norm": 0.605938196182251, "learning_rate": 3.5811312946981676e-05, "loss": 0.7859, "step": 6725 }, { "epoch": 1.07, "grad_norm": 0.7012625336647034, "learning_rate": 3.579247013317809e-05, "loss": 0.5396, "step": 6730 }, { "epoch": 1.08, "grad_norm": 0.8261893391609192, "learning_rate": 3.5773619781030965e-05, "loss": 0.7595, "step": 6735 }, { "epoch": 1.08, "grad_norm": 0.7922738194465637, "learning_rate": 3.5754761903706965e-05, "loss": 0.6672, "step": 6740 }, { "epoch": 1.08, "grad_norm": 0.5855022668838501, "learning_rate": 3.573589651437795e-05, "loss": 0.8951, "step": 6745 }, { "epoch": 1.08, "grad_norm": 1.1443228721618652, "learning_rate": 3.571702362622106e-05, "loss": 0.834, "step": 6750 }, { "epoch": 1.08, "grad_norm": 1.4697206020355225, "learning_rate": 3.5698143252418645e-05, "loss": 0.7024, "step": 6755 }, { "epoch": 1.08, "grad_norm": 0.629658043384552, "learning_rate": 3.567925540615833e-05, "loss": 0.7174, "step": 6760 }, { "epoch": 1.08, "grad_norm": 1.3336237668991089, "learning_rate": 3.5660360100632914e-05, "loss": 0.7339, "step": 6765 }, { "epoch": 1.08, "grad_norm": 0.8946998715400696, "learning_rate": 3.564145734904044e-05, "loss": 0.7148, "step": 6770 }, { "epoch": 1.08, "grad_norm": 1.2121728658676147, "learning_rate": 3.562254716458411e-05, "loss": 0.9227, "step": 6775 }, { "epoch": 1.08, "grad_norm": 0.6504545211791992, "learning_rate": 3.560362956047235e-05, "loss": 0.7391, "step": 6780 }, { "epoch": 1.08, "grad_norm": 0.6672701239585876, "learning_rate": 3.5584704549918776e-05, "loss": 0.6783, "step": 6785 }, { "epoch": 1.08, "grad_norm": 0.8841736316680908, "learning_rate": 3.556577214614216e-05, "loss": 0.6379, "step": 6790 }, { "epoch": 1.08, "grad_norm": 0.7543352842330933, "learning_rate": 3.554683236236642e-05, "loss": 0.921, "step": 6795 }, { "epoch": 1.09, "grad_norm": 0.6553550958633423, "learning_rate": 3.552788521182067e-05, "loss": 0.6935, "step": 6800 }, { "epoch": 1.09, "eval_loss": 0.7111696600914001, "eval_runtime": 96.419, "eval_samples_per_second": 7.229, "eval_steps_per_second": 7.229, "step": 6800 }, { "epoch": 1.09, "grad_norm": 0.7093417644500732, "learning_rate": 3.550893070773914e-05, "loss": 0.5619, "step": 6805 }, { "epoch": 1.09, "grad_norm": 0.6494777798652649, "learning_rate": 3.5489968863361215e-05, "loss": 0.7891, "step": 6810 }, { "epoch": 1.09, "grad_norm": 0.6380666494369507, "learning_rate": 3.547099969193137e-05, "loss": 0.6761, "step": 6815 }, { "epoch": 1.09, "grad_norm": 0.7495108842849731, "learning_rate": 3.545202320669927e-05, "loss": 0.69, "step": 6820 }, { "epoch": 1.09, "grad_norm": 0.9093177914619446, "learning_rate": 3.543303942091961e-05, "loss": 0.7455, "step": 6825 }, { "epoch": 1.09, "grad_norm": 0.7216375470161438, "learning_rate": 3.541404834785222e-05, "loss": 0.8079, "step": 6830 }, { "epoch": 1.09, "grad_norm": 1.1701329946517944, "learning_rate": 3.539505000076203e-05, "loss": 1.0064, "step": 6835 }, { "epoch": 1.09, "grad_norm": 0.6244146227836609, "learning_rate": 3.5376044392919037e-05, "loss": 0.8856, "step": 6840 }, { "epoch": 1.09, "grad_norm": 0.7619176506996155, "learning_rate": 3.535703153759832e-05, "loss": 0.615, "step": 6845 }, { "epoch": 1.09, "grad_norm": 0.5909354090690613, "learning_rate": 3.533801144807998e-05, "loss": 0.6205, "step": 6850 }, { "epoch": 1.09, "grad_norm": 0.5859089493751526, "learning_rate": 3.531898413764923e-05, "loss": 0.6251, "step": 6855 }, { "epoch": 1.09, "grad_norm": 0.7175759673118591, "learning_rate": 3.52999496195963e-05, "loss": 0.8056, "step": 6860 }, { "epoch": 1.1, "grad_norm": 0.8045479655265808, "learning_rate": 3.528090790721643e-05, "loss": 0.6239, "step": 6865 }, { "epoch": 1.1, "grad_norm": 0.5341171622276306, "learning_rate": 3.5261859013809934e-05, "loss": 0.5879, "step": 6870 }, { "epoch": 1.1, "grad_norm": 0.7800705432891846, "learning_rate": 3.5242802952682106e-05, "loss": 0.7499, "step": 6875 }, { "epoch": 1.1, "grad_norm": 0.6275398135185242, "learning_rate": 3.5223739737143256e-05, "loss": 0.578, "step": 6880 }, { "epoch": 1.1, "grad_norm": 1.110107660293579, "learning_rate": 3.52046693805087e-05, "loss": 0.7843, "step": 6885 }, { "epoch": 1.1, "grad_norm": 0.6027718782424927, "learning_rate": 3.5185591896098726e-05, "loss": 0.5948, "step": 6890 }, { "epoch": 1.1, "grad_norm": 0.8754519820213318, "learning_rate": 3.5166507297238615e-05, "loss": 0.6245, "step": 6895 }, { "epoch": 1.1, "grad_norm": 1.006706953048706, "learning_rate": 3.51474155972586e-05, "loss": 0.7522, "step": 6900 }, { "epoch": 1.1, "eval_loss": 0.7102616429328918, "eval_runtime": 96.4458, "eval_samples_per_second": 7.227, "eval_steps_per_second": 7.227, "step": 6900 }, { "epoch": 1.1, "grad_norm": 0.7887657880783081, "learning_rate": 3.51283168094939e-05, "loss": 0.8036, "step": 6905 }, { "epoch": 1.1, "grad_norm": 0.7712429761886597, "learning_rate": 3.5109210947284656e-05, "loss": 0.7136, "step": 6910 }, { "epoch": 1.1, "grad_norm": 0.6748402118682861, "learning_rate": 3.509009802397598e-05, "loss": 0.881, "step": 6915 }, { "epoch": 1.1, "grad_norm": 0.6241844296455383, "learning_rate": 3.5070978052917885e-05, "loss": 0.8814, "step": 6920 }, { "epoch": 1.11, "grad_norm": 0.8334391117095947, "learning_rate": 3.5051851047465336e-05, "loss": 0.7423, "step": 6925 }, { "epoch": 1.11, "grad_norm": 0.6817840337753296, "learning_rate": 3.503271702097818e-05, "loss": 0.5209, "step": 6930 }, { "epoch": 1.11, "grad_norm": 0.9585073590278625, "learning_rate": 3.50135759868212e-05, "loss": 0.9416, "step": 6935 }, { "epoch": 1.11, "grad_norm": 0.7557898163795471, "learning_rate": 3.499442795836405e-05, "loss": 0.6085, "step": 6940 }, { "epoch": 1.11, "grad_norm": 0.8940297365188599, "learning_rate": 3.497527294898129e-05, "loss": 0.6232, "step": 6945 }, { "epoch": 1.11, "grad_norm": 0.6422560214996338, "learning_rate": 3.495611097205232e-05, "loss": 0.6165, "step": 6950 }, { "epoch": 1.11, "grad_norm": 0.92333984375, "learning_rate": 3.493694204096147e-05, "loss": 0.7409, "step": 6955 }, { "epoch": 1.11, "grad_norm": 0.8350429534912109, "learning_rate": 3.491776616909786e-05, "loss": 0.6304, "step": 6960 }, { "epoch": 1.11, "grad_norm": 0.8454934358596802, "learning_rate": 3.48985833698555e-05, "loss": 0.8221, "step": 6965 }, { "epoch": 1.11, "grad_norm": 0.6815975904464722, "learning_rate": 3.487939365663321e-05, "loss": 0.7426, "step": 6970 }, { "epoch": 1.11, "grad_norm": 0.5304132699966431, "learning_rate": 3.4860197042834674e-05, "loss": 0.6932, "step": 6975 }, { "epoch": 1.11, "grad_norm": 0.9002396464347839, "learning_rate": 3.4840993541868364e-05, "loss": 0.752, "step": 6980 }, { "epoch": 1.11, "grad_norm": 0.8047484755516052, "learning_rate": 3.482178316714758e-05, "loss": 0.8861, "step": 6985 }, { "epoch": 1.12, "grad_norm": 0.8372194170951843, "learning_rate": 3.4802565932090405e-05, "loss": 0.6855, "step": 6990 }, { "epoch": 1.12, "grad_norm": 0.7594023942947388, "learning_rate": 3.4783341850119754e-05, "loss": 0.7424, "step": 6995 }, { "epoch": 1.12, "grad_norm": 0.8143925070762634, "learning_rate": 3.476411093466327e-05, "loss": 1.0525, "step": 7000 }, { "epoch": 1.12, "eval_loss": 0.7106046080589294, "eval_runtime": 96.364, "eval_samples_per_second": 7.233, "eval_steps_per_second": 7.233, "step": 7000 }, { "epoch": 1.12, "grad_norm": 1.2525312900543213, "learning_rate": 3.474487319915342e-05, "loss": 0.7996, "step": 7005 }, { "epoch": 1.12, "grad_norm": 0.9011229276657104, "learning_rate": 3.4725628657027386e-05, "loss": 0.7672, "step": 7010 }, { "epoch": 1.12, "grad_norm": 0.46732136607170105, "learning_rate": 3.4706377321727156e-05, "loss": 0.9557, "step": 7015 }, { "epoch": 1.12, "grad_norm": 0.7189437747001648, "learning_rate": 3.468711920669942e-05, "loss": 0.6853, "step": 7020 }, { "epoch": 1.12, "grad_norm": 0.7930631041526794, "learning_rate": 3.466785432539562e-05, "loss": 0.9078, "step": 7025 }, { "epoch": 1.12, "grad_norm": 1.3144121170043945, "learning_rate": 3.464858269127193e-05, "loss": 0.8064, "step": 7030 }, { "epoch": 1.12, "grad_norm": 0.7118679881095886, "learning_rate": 3.462930431778924e-05, "loss": 0.7595, "step": 7035 }, { "epoch": 1.12, "grad_norm": 1.0305006504058838, "learning_rate": 3.461001921841314e-05, "loss": 0.8286, "step": 7040 }, { "epoch": 1.12, "grad_norm": 0.4701656997203827, "learning_rate": 3.45907274066139e-05, "loss": 0.6739, "step": 7045 }, { "epoch": 1.13, "grad_norm": 0.7981716990470886, "learning_rate": 3.457142889586653e-05, "loss": 0.8902, "step": 7050 }, { "epoch": 1.13, "grad_norm": 0.9665462970733643, "learning_rate": 3.455212369965066e-05, "loss": 0.8668, "step": 7055 }, { "epoch": 1.13, "grad_norm": 0.5912620425224304, "learning_rate": 3.453281183145063e-05, "loss": 0.515, "step": 7060 }, { "epoch": 1.13, "grad_norm": 0.9274890422821045, "learning_rate": 3.451349330475543e-05, "loss": 0.7517, "step": 7065 }, { "epoch": 1.13, "grad_norm": 0.968731164932251, "learning_rate": 3.449416813305869e-05, "loss": 0.5942, "step": 7070 }, { "epoch": 1.13, "grad_norm": 0.8442440032958984, "learning_rate": 3.447483632985871e-05, "loss": 0.9199, "step": 7075 }, { "epoch": 1.13, "grad_norm": 0.7139914035797119, "learning_rate": 3.445549790865838e-05, "loss": 0.7511, "step": 7080 }, { "epoch": 1.13, "grad_norm": 0.8142327666282654, "learning_rate": 3.443615288296524e-05, "loss": 1.0244, "step": 7085 }, { "epoch": 1.13, "grad_norm": 0.7862592935562134, "learning_rate": 3.441680126629144e-05, "loss": 0.4681, "step": 7090 }, { "epoch": 1.13, "grad_norm": 0.774732232093811, "learning_rate": 3.439744307215374e-05, "loss": 0.7968, "step": 7095 }, { "epoch": 1.13, "grad_norm": 0.7824143767356873, "learning_rate": 3.437807831407346e-05, "loss": 0.8285, "step": 7100 }, { "epoch": 1.13, "eval_loss": 0.7099367380142212, "eval_runtime": 96.408, "eval_samples_per_second": 7.23, "eval_steps_per_second": 7.23, "step": 7100 }, { "epoch": 1.13, "grad_norm": 0.6416094303131104, "learning_rate": 3.435870700557655e-05, "loss": 0.8833, "step": 7105 }, { "epoch": 1.13, "grad_norm": 1.0388530492782593, "learning_rate": 3.433932916019352e-05, "loss": 0.8625, "step": 7110 }, { "epoch": 1.14, "grad_norm": 0.665253221988678, "learning_rate": 3.431994479145944e-05, "loss": 0.7174, "step": 7115 }, { "epoch": 1.14, "grad_norm": 0.6002438068389893, "learning_rate": 3.4300553912913916e-05, "loss": 0.5811, "step": 7120 }, { "epoch": 1.14, "grad_norm": 0.6728983521461487, "learning_rate": 3.428115653810114e-05, "loss": 0.7784, "step": 7125 }, { "epoch": 1.14, "grad_norm": 1.487757921218872, "learning_rate": 3.4261752680569824e-05, "loss": 0.8581, "step": 7130 }, { "epoch": 1.14, "grad_norm": 0.7253740429878235, "learning_rate": 3.424234235387322e-05, "loss": 0.6423, "step": 7135 }, { "epoch": 1.14, "grad_norm": 1.0124573707580566, "learning_rate": 3.422292557156907e-05, "loss": 0.951, "step": 7140 }, { "epoch": 1.14, "grad_norm": 0.8678650856018066, "learning_rate": 3.4203502347219626e-05, "loss": 0.7357, "step": 7145 }, { "epoch": 1.14, "grad_norm": 0.793936550617218, "learning_rate": 3.41840726943917e-05, "loss": 0.631, "step": 7150 }, { "epoch": 1.14, "grad_norm": 0.7116626501083374, "learning_rate": 3.416463662665652e-05, "loss": 0.8185, "step": 7155 }, { "epoch": 1.14, "grad_norm": 0.9050395488739014, "learning_rate": 3.414519415758983e-05, "loss": 0.6293, "step": 7160 }, { "epoch": 1.14, "grad_norm": 0.674711287021637, "learning_rate": 3.412574530077185e-05, "loss": 0.6727, "step": 7165 }, { "epoch": 1.14, "grad_norm": 0.682181179523468, "learning_rate": 3.4106290069787236e-05, "loss": 0.7405, "step": 7170 }, { "epoch": 1.15, "grad_norm": 0.8667547702789307, "learning_rate": 3.408682847822512e-05, "loss": 0.6902, "step": 7175 }, { "epoch": 1.15, "grad_norm": 0.8550475835800171, "learning_rate": 3.406736053967907e-05, "loss": 0.7606, "step": 7180 }, { "epoch": 1.15, "grad_norm": 0.8812040686607361, "learning_rate": 3.404788626774708e-05, "loss": 0.9635, "step": 7185 }, { "epoch": 1.15, "grad_norm": 0.7503578066825867, "learning_rate": 3.402840567603158e-05, "loss": 0.8821, "step": 7190 }, { "epoch": 1.15, "grad_norm": 0.757117748260498, "learning_rate": 3.400891877813941e-05, "loss": 0.8054, "step": 7195 }, { "epoch": 1.15, "grad_norm": 0.6797623038291931, "learning_rate": 3.39894255876818e-05, "loss": 0.6116, "step": 7200 }, { "epoch": 1.15, "eval_loss": 0.7078786492347717, "eval_runtime": 96.401, "eval_samples_per_second": 7.23, "eval_steps_per_second": 7.23, "step": 7200 }, { "epoch": 1.15, "grad_norm": 0.7826797366142273, "learning_rate": 3.39699261182744e-05, "loss": 0.6275, "step": 7205 }, { "epoch": 1.15, "grad_norm": 1.9613910913467407, "learning_rate": 3.395042038353723e-05, "loss": 0.696, "step": 7210 }, { "epoch": 1.15, "grad_norm": 0.7311391234397888, "learning_rate": 3.3930908397094696e-05, "loss": 0.7455, "step": 7215 }, { "epoch": 1.15, "grad_norm": 0.8681967854499817, "learning_rate": 3.391139017257555e-05, "loss": 0.617, "step": 7220 }, { "epoch": 1.15, "grad_norm": 1.2949939966201782, "learning_rate": 3.3891865723612926e-05, "loss": 0.8595, "step": 7225 }, { "epoch": 1.15, "grad_norm": 1.1373263597488403, "learning_rate": 3.3872335063844295e-05, "loss": 0.5906, "step": 7230 }, { "epoch": 1.15, "grad_norm": 0.6243600845336914, "learning_rate": 3.385279820691147e-05, "loss": 0.8558, "step": 7235 }, { "epoch": 1.16, "grad_norm": 0.7297492623329163, "learning_rate": 3.383325516646058e-05, "loss": 0.6055, "step": 7240 }, { "epoch": 1.16, "grad_norm": 0.8958369493484497, "learning_rate": 3.3813705956142085e-05, "loss": 0.714, "step": 7245 }, { "epoch": 1.16, "grad_norm": 0.8226301670074463, "learning_rate": 3.3794150589610763e-05, "loss": 0.7724, "step": 7250 }, { "epoch": 1.16, "grad_norm": 0.7973611354827881, "learning_rate": 3.377458908052566e-05, "loss": 0.696, "step": 7255 }, { "epoch": 1.16, "grad_norm": 1.0526561737060547, "learning_rate": 3.3755021442550146e-05, "loss": 0.7198, "step": 7260 }, { "epoch": 1.16, "grad_norm": 0.7639563083648682, "learning_rate": 3.373544768935186e-05, "loss": 0.6681, "step": 7265 }, { "epoch": 1.16, "grad_norm": 0.8041620850563049, "learning_rate": 3.3715867834602715e-05, "loss": 0.7158, "step": 7270 }, { "epoch": 1.16, "grad_norm": 1.0435208082199097, "learning_rate": 3.369628189197887e-05, "loss": 0.719, "step": 7275 }, { "epoch": 1.16, "grad_norm": 0.6697341203689575, "learning_rate": 3.3676689875160755e-05, "loss": 0.5694, "step": 7280 }, { "epoch": 1.16, "grad_norm": 0.7368444800376892, "learning_rate": 3.365709179783304e-05, "loss": 0.646, "step": 7285 }, { "epoch": 1.16, "grad_norm": 1.7546617984771729, "learning_rate": 3.363748767368463e-05, "loss": 0.7492, "step": 7290 }, { "epoch": 1.16, "grad_norm": 0.8340469002723694, "learning_rate": 3.3617877516408625e-05, "loss": 0.753, "step": 7295 }, { "epoch": 1.17, "grad_norm": 0.6987440586090088, "learning_rate": 3.3598261339702374e-05, "loss": 0.5617, "step": 7300 }, { "epoch": 1.17, "eval_loss": 0.708717405796051, "eval_runtime": 96.3901, "eval_samples_per_second": 7.231, "eval_steps_per_second": 7.231, "step": 7300 }, { "epoch": 1.17, "grad_norm": 0.7812116742134094, "learning_rate": 3.357863915726743e-05, "loss": 0.5933, "step": 7305 }, { "epoch": 1.17, "grad_norm": 0.8569033741950989, "learning_rate": 3.3559010982809526e-05, "loss": 0.7012, "step": 7310 }, { "epoch": 1.17, "grad_norm": 0.5553389191627502, "learning_rate": 3.3539376830038567e-05, "loss": 0.5819, "step": 7315 }, { "epoch": 1.17, "grad_norm": 0.8315708637237549, "learning_rate": 3.351973671266867e-05, "loss": 0.6821, "step": 7320 }, { "epoch": 1.17, "grad_norm": 0.828441321849823, "learning_rate": 3.350009064441809e-05, "loss": 0.6926, "step": 7325 }, { "epoch": 1.17, "grad_norm": 0.5075239539146423, "learning_rate": 3.348043863900924e-05, "loss": 0.64, "step": 7330 }, { "epoch": 1.17, "grad_norm": 0.7042556405067444, "learning_rate": 3.34607807101687e-05, "loss": 0.7332, "step": 7335 }, { "epoch": 1.17, "grad_norm": 0.8633832931518555, "learning_rate": 3.344111687162718e-05, "loss": 1.0268, "step": 7340 }, { "epoch": 1.17, "grad_norm": 1.3040523529052734, "learning_rate": 3.342144713711949e-05, "loss": 0.9377, "step": 7345 }, { "epoch": 1.17, "grad_norm": 0.7739453315734863, "learning_rate": 3.34017715203846e-05, "loss": 0.863, "step": 7350 }, { "epoch": 1.17, "grad_norm": 0.5962020754814148, "learning_rate": 3.338209003516556e-05, "loss": 0.5611, "step": 7355 }, { "epoch": 1.17, "grad_norm": 0.6497868299484253, "learning_rate": 3.336240269520953e-05, "loss": 0.8599, "step": 7360 }, { "epoch": 1.18, "grad_norm": 1.0249778032302856, "learning_rate": 3.334270951426776e-05, "loss": 0.7472, "step": 7365 }, { "epoch": 1.18, "grad_norm": 0.7308823466300964, "learning_rate": 3.332301050609558e-05, "loss": 0.692, "step": 7370 }, { "epoch": 1.18, "grad_norm": 0.7353125214576721, "learning_rate": 3.3303305684452377e-05, "loss": 0.6405, "step": 7375 }, { "epoch": 1.18, "grad_norm": 1.055648684501648, "learning_rate": 3.328359506310162e-05, "loss": 0.5795, "step": 7380 }, { "epoch": 1.18, "grad_norm": 1.2591193914413452, "learning_rate": 3.326387865581082e-05, "loss": 0.7434, "step": 7385 }, { "epoch": 1.18, "grad_norm": 0.751158595085144, "learning_rate": 3.3244156476351525e-05, "loss": 0.7315, "step": 7390 }, { "epoch": 1.18, "grad_norm": 0.7036099433898926, "learning_rate": 3.322442853849931e-05, "loss": 0.7636, "step": 7395 }, { "epoch": 1.18, "grad_norm": 0.6783479452133179, "learning_rate": 3.320469485603378e-05, "loss": 0.6514, "step": 7400 }, { "epoch": 1.18, "eval_loss": 0.7071788311004639, "eval_runtime": 96.4058, "eval_samples_per_second": 7.23, "eval_steps_per_second": 7.23, "step": 7400 }, { "epoch": 1.18, "grad_norm": 0.9339218735694885, "learning_rate": 3.318495544273857e-05, "loss": 0.7816, "step": 7405 }, { "epoch": 1.18, "grad_norm": 0.6175532937049866, "learning_rate": 3.316521031240128e-05, "loss": 0.5474, "step": 7410 }, { "epoch": 1.18, "grad_norm": 0.8843727707862854, "learning_rate": 3.314545947881353e-05, "loss": 0.7105, "step": 7415 }, { "epoch": 1.18, "grad_norm": 1.0864967107772827, "learning_rate": 3.312570295577093e-05, "loss": 0.8522, "step": 7420 }, { "epoch": 1.19, "grad_norm": 0.6682702302932739, "learning_rate": 3.310594075707303e-05, "loss": 0.5584, "step": 7425 }, { "epoch": 1.19, "grad_norm": 0.801595151424408, "learning_rate": 3.308617289652337e-05, "loss": 0.8889, "step": 7430 }, { "epoch": 1.19, "grad_norm": 0.8389090299606323, "learning_rate": 3.306639938792945e-05, "loss": 0.7226, "step": 7435 }, { "epoch": 1.19, "grad_norm": 1.5336723327636719, "learning_rate": 3.30466202451027e-05, "loss": 0.8794, "step": 7440 }, { "epoch": 1.19, "grad_norm": 0.8237048387527466, "learning_rate": 3.30268354818585e-05, "loss": 0.7764, "step": 7445 }, { "epoch": 1.19, "grad_norm": 0.9008681178092957, "learning_rate": 3.3007045112016124e-05, "loss": 0.7336, "step": 7450 }, { "epoch": 1.19, "grad_norm": 0.6193007826805115, "learning_rate": 3.29872491493988e-05, "loss": 0.8906, "step": 7455 }, { "epoch": 1.19, "grad_norm": 0.7680274248123169, "learning_rate": 3.296744760783365e-05, "loss": 0.7991, "step": 7460 }, { "epoch": 1.19, "grad_norm": NaN, "learning_rate": 3.2951602367033386e-05, "loss": 0.8565, "step": 7465 }, { "epoch": 1.19, "grad_norm": 0.6190903782844543, "learning_rate": 3.293179081821891e-05, "loss": 0.8935, "step": 7470 }, { "epoch": 1.19, "grad_norm": 0.7620663046836853, "learning_rate": 3.291197372919322e-05, "loss": 0.7041, "step": 7475 }, { "epoch": 1.19, "grad_norm": 0.7184949517250061, "learning_rate": 3.289215111379821e-05, "loss": 0.6436, "step": 7480 }, { "epoch": 1.19, "grad_norm": 0.8207274079322815, "learning_rate": 3.287232298587961e-05, "loss": 0.8094, "step": 7485 }, { "epoch": 1.2, "grad_norm": 0.8164545297622681, "learning_rate": 3.2852489359287016e-05, "loss": 0.8277, "step": 7490 }, { "epoch": 1.2, "grad_norm": 0.776091992855072, "learning_rate": 3.283265024787383e-05, "loss": 0.9329, "step": 7495 }, { "epoch": 1.2, "grad_norm": 0.8750385046005249, "learning_rate": 3.281280566549733e-05, "loss": 0.6729, "step": 7500 }, { "epoch": 1.2, "eval_loss": 0.7052386403083801, "eval_runtime": 96.3606, "eval_samples_per_second": 7.233, "eval_steps_per_second": 7.233, "step": 7500 }, { "epoch": 1.2, "grad_norm": 0.6827027797698975, "learning_rate": 3.27929556260186e-05, "loss": 0.5928, "step": 7505 }, { "epoch": 1.2, "grad_norm": 0.7305205464363098, "learning_rate": 3.2773100143302504e-05, "loss": 0.8407, "step": 7510 }, { "epoch": 1.2, "grad_norm": 1.0960073471069336, "learning_rate": 3.2753239231217745e-05, "loss": 0.6898, "step": 7515 }, { "epoch": 1.2, "grad_norm": 0.7878029942512512, "learning_rate": 3.273337290363683e-05, "loss": 0.654, "step": 7520 }, { "epoch": 1.2, "grad_norm": 0.5136789679527283, "learning_rate": 3.2713501174436e-05, "loss": 0.5858, "step": 7525 }, { "epoch": 1.2, "grad_norm": 0.817991316318512, "learning_rate": 3.269362405749531e-05, "loss": 0.5891, "step": 7530 }, { "epoch": 1.2, "grad_norm": 0.8485821485519409, "learning_rate": 3.267374156669855e-05, "loss": 0.7298, "step": 7535 }, { "epoch": 1.2, "grad_norm": 0.862983226776123, "learning_rate": 3.265385371593329e-05, "loss": 0.9098, "step": 7540 }, { "epoch": 1.2, "grad_norm": 0.7313618063926697, "learning_rate": 3.263396051909083e-05, "loss": 0.9118, "step": 7545 }, { "epoch": 1.21, "grad_norm": 0.6416789293289185, "learning_rate": 3.2614061990066193e-05, "loss": 0.6785, "step": 7550 }, { "epoch": 1.21, "grad_norm": 1.7634716033935547, "learning_rate": 3.2594158142758146e-05, "loss": 0.7872, "step": 7555 }, { "epoch": 1.21, "grad_norm": 0.9261329174041748, "learning_rate": 3.257424899106917e-05, "loss": 0.7336, "step": 7560 }, { "epoch": 1.21, "grad_norm": 0.7905421257019043, "learning_rate": 3.255433454890544e-05, "loss": 0.4871, "step": 7565 }, { "epoch": 1.21, "grad_norm": 0.7929773926734924, "learning_rate": 3.2534414830176825e-05, "loss": 0.7026, "step": 7570 }, { "epoch": 1.21, "grad_norm": 0.7353596091270447, "learning_rate": 3.251448984879689e-05, "loss": 0.657, "step": 7575 }, { "epoch": 1.21, "grad_norm": 0.5401962399482727, "learning_rate": 3.2494559618682867e-05, "loss": 0.9139, "step": 7580 }, { "epoch": 1.21, "grad_norm": 0.8317365646362305, "learning_rate": 3.2474624153755665e-05, "loss": 0.5765, "step": 7585 }, { "epoch": 1.21, "grad_norm": 0.9745128750801086, "learning_rate": 3.2454683467939834e-05, "loss": 0.7377, "step": 7590 }, { "epoch": 1.21, "grad_norm": 0.9197364449501038, "learning_rate": 3.2434737575163585e-05, "loss": 0.8045, "step": 7595 }, { "epoch": 1.21, "grad_norm": 0.5326982140541077, "learning_rate": 3.241478648935876e-05, "loss": 0.6401, "step": 7600 }, { "epoch": 1.21, "eval_loss": 0.7055444121360779, "eval_runtime": 96.2986, "eval_samples_per_second": 7.238, "eval_steps_per_second": 7.238, "step": 7600 }, { "epoch": 1.21, "grad_norm": 0.8797016143798828, "learning_rate": 3.2394830224460837e-05, "loss": 0.6938, "step": 7605 }, { "epoch": 1.21, "grad_norm": 0.5764625668525696, "learning_rate": 3.2374868794408884e-05, "loss": 0.5537, "step": 7610 }, { "epoch": 1.22, "grad_norm": 0.7063139081001282, "learning_rate": 3.235490221314562e-05, "loss": 0.4823, "step": 7615 }, { "epoch": 1.22, "grad_norm": 0.6020994186401367, "learning_rate": 3.23349304946173e-05, "loss": 0.4869, "step": 7620 }, { "epoch": 1.22, "grad_norm": 0.8765579462051392, "learning_rate": 3.231495365277385e-05, "loss": 0.9822, "step": 7625 }, { "epoch": 1.22, "grad_norm": 0.7133936882019043, "learning_rate": 3.22949717015687e-05, "loss": 0.7893, "step": 7630 }, { "epoch": 1.22, "grad_norm": 1.1999497413635254, "learning_rate": 3.227498465495888e-05, "loss": 0.7949, "step": 7635 }, { "epoch": 1.22, "grad_norm": 0.8327472805976868, "learning_rate": 3.225499252690499e-05, "loss": 0.8728, "step": 7640 }, { "epoch": 1.22, "grad_norm": 0.9887890219688416, "learning_rate": 3.2234995331371166e-05, "loss": 0.764, "step": 7645 }, { "epoch": 1.22, "grad_norm": 0.9890260696411133, "learning_rate": 3.2214993082325073e-05, "loss": 0.8756, "step": 7650 }, { "epoch": 1.22, "grad_norm": 0.651091456413269, "learning_rate": 3.2194985793737924e-05, "loss": 0.6986, "step": 7655 }, { "epoch": 1.22, "grad_norm": 1.0091456174850464, "learning_rate": 3.2174973479584446e-05, "loss": 0.6858, "step": 7660 }, { "epoch": 1.22, "grad_norm": 1.0353959798812866, "learning_rate": 3.215495615384287e-05, "loss": 0.7436, "step": 7665 }, { "epoch": 1.22, "grad_norm": 0.9490379691123962, "learning_rate": 3.213493383049494e-05, "loss": 0.6652, "step": 7670 }, { "epoch": 1.23, "grad_norm": 0.9487096667289734, "learning_rate": 3.211490652352589e-05, "loss": 0.7113, "step": 7675 }, { "epoch": 1.23, "grad_norm": 0.8836408853530884, "learning_rate": 3.20948742469244e-05, "loss": 0.7104, "step": 7680 }, { "epoch": 1.23, "grad_norm": 0.9050471782684326, "learning_rate": 3.207483701468268e-05, "loss": 0.7832, "step": 7685 }, { "epoch": 1.23, "grad_norm": 0.7933790683746338, "learning_rate": 3.205479484079635e-05, "loss": 0.7119, "step": 7690 }, { "epoch": 1.23, "grad_norm": 0.8188216090202332, "learning_rate": 3.203474773926453e-05, "loss": 0.7536, "step": 7695 }, { "epoch": 1.23, "grad_norm": 0.7438432574272156, "learning_rate": 3.201469572408973e-05, "loss": 0.8089, "step": 7700 }, { "epoch": 1.23, "eval_loss": 0.7051511406898499, "eval_runtime": 96.3295, "eval_samples_per_second": 7.236, "eval_steps_per_second": 7.236, "step": 7700 }, { "epoch": 1.23, "grad_norm": 0.9201672673225403, "learning_rate": 3.1994638809277915e-05, "loss": 1.0177, "step": 7705 }, { "epoch": 1.23, "grad_norm": 0.7813202738761902, "learning_rate": 3.197457700883849e-05, "loss": 0.476, "step": 7710 }, { "epoch": 1.23, "grad_norm": 0.8475176692008972, "learning_rate": 3.195451033678425e-05, "loss": 0.7233, "step": 7715 }, { "epoch": 1.23, "grad_norm": 0.8281943202018738, "learning_rate": 3.19344388071314e-05, "loss": 0.7427, "step": 7720 }, { "epoch": 1.23, "grad_norm": 0.8039165735244751, "learning_rate": 3.191436243389954e-05, "loss": 0.6422, "step": 7725 }, { "epoch": 1.23, "grad_norm": 0.5459123849868774, "learning_rate": 3.1894281231111644e-05, "loss": 0.6259, "step": 7730 }, { "epoch": 1.23, "grad_norm": 0.7360697984695435, "learning_rate": 3.187419521279407e-05, "loss": 0.6194, "step": 7735 }, { "epoch": 1.24, "grad_norm": 0.6768085956573486, "learning_rate": 3.185410439297653e-05, "loss": 0.6967, "step": 7740 }, { "epoch": 1.24, "grad_norm": 0.7461891174316406, "learning_rate": 3.1834008785692104e-05, "loss": 0.7128, "step": 7745 }, { "epoch": 1.24, "grad_norm": 0.7571334838867188, "learning_rate": 3.18139084049772e-05, "loss": 0.6859, "step": 7750 }, { "epoch": 1.24, "grad_norm": 0.8840231895446777, "learning_rate": 3.1793803264871575e-05, "loss": 0.7524, "step": 7755 }, { "epoch": 1.24, "grad_norm": 0.5408929586410522, "learning_rate": 3.1773693379418296e-05, "loss": 0.55, "step": 7760 }, { "epoch": 1.24, "grad_norm": 0.5577660202980042, "learning_rate": 3.175357876266375e-05, "loss": 0.6207, "step": 7765 }, { "epoch": 1.24, "grad_norm": 0.8528269529342651, "learning_rate": 3.1733459428657637e-05, "loss": 0.513, "step": 7770 }, { "epoch": 1.24, "grad_norm": 0.616848886013031, "learning_rate": 3.171333539145294e-05, "loss": 0.8487, "step": 7775 }, { "epoch": 1.24, "grad_norm": 0.7524570226669312, "learning_rate": 3.169320666510593e-05, "loss": 0.7275, "step": 7780 }, { "epoch": 1.24, "grad_norm": 0.6796889305114746, "learning_rate": 3.167307326367616e-05, "loss": 0.4541, "step": 7785 }, { "epoch": 1.24, "grad_norm": 0.7805665135383606, "learning_rate": 3.165293520122643e-05, "loss": 0.9711, "step": 7790 }, { "epoch": 1.24, "grad_norm": 0.7790519595146179, "learning_rate": 3.1632792491822835e-05, "loss": 0.5531, "step": 7795 }, { "epoch": 1.25, "grad_norm": 0.93754643201828, "learning_rate": 3.161264514953467e-05, "loss": 0.8166, "step": 7800 }, { "epoch": 1.25, "eval_loss": 0.7040937542915344, "eval_runtime": 96.3461, "eval_samples_per_second": 7.234, "eval_steps_per_second": 7.234, "step": 7800 }, { "epoch": 1.25, "grad_norm": 1.0540165901184082, "learning_rate": 3.1592493188434494e-05, "loss": 0.7175, "step": 7805 }, { "epoch": 1.25, "grad_norm": 0.6427468061447144, "learning_rate": 3.157233662259808e-05, "loss": 0.7531, "step": 7810 }, { "epoch": 1.25, "grad_norm": 0.8710203766822815, "learning_rate": 3.155217546610442e-05, "loss": 0.5609, "step": 7815 }, { "epoch": 1.25, "grad_norm": 0.5983574390411377, "learning_rate": 3.153200973303573e-05, "loss": 0.8761, "step": 7820 }, { "epoch": 1.25, "grad_norm": 0.892261803150177, "learning_rate": 3.151183943747738e-05, "loss": 0.8162, "step": 7825 }, { "epoch": 1.25, "grad_norm": 0.8922862410545349, "learning_rate": 3.149166459351797e-05, "loss": 0.6916, "step": 7830 }, { "epoch": 1.25, "grad_norm": 0.7080487608909607, "learning_rate": 3.1471485215249264e-05, "loss": 0.7539, "step": 7835 }, { "epoch": 1.25, "grad_norm": 20.36235809326172, "learning_rate": 3.145130131676618e-05, "loss": 0.7404, "step": 7840 }, { "epoch": 1.25, "grad_norm": 0.768466055393219, "learning_rate": 3.1431112912166804e-05, "loss": 0.6678, "step": 7845 }, { "epoch": 1.25, "grad_norm": 0.8763453960418701, "learning_rate": 3.141092001555238e-05, "loss": 0.5503, "step": 7850 }, { "epoch": 1.25, "grad_norm": 0.8134922385215759, "learning_rate": 3.139072264102727e-05, "loss": 0.698, "step": 7855 }, { "epoch": 1.25, "grad_norm": 1.1443438529968262, "learning_rate": 3.1370520802698953e-05, "loss": 1.0277, "step": 7860 }, { "epoch": 1.26, "grad_norm": 0.806175172328949, "learning_rate": 3.135031451467808e-05, "loss": 0.7382, "step": 7865 }, { "epoch": 1.26, "grad_norm": 0.5716249346733093, "learning_rate": 3.1330103791078345e-05, "loss": 0.6695, "step": 7870 }, { "epoch": 1.26, "grad_norm": 0.7769394516944885, "learning_rate": 3.130988864601659e-05, "loss": 0.6633, "step": 7875 }, { "epoch": 1.26, "grad_norm": 0.5917829871177673, "learning_rate": 3.1289669093612714e-05, "loss": 0.4479, "step": 7880 }, { "epoch": 1.26, "grad_norm": 0.8687866926193237, "learning_rate": 3.1269445147989706e-05, "loss": 0.7416, "step": 7885 }, { "epoch": 1.26, "grad_norm": 0.8341571092605591, "learning_rate": 3.124921682327363e-05, "loss": 0.6651, "step": 7890 }, { "epoch": 1.26, "grad_norm": 0.7416822910308838, "learning_rate": 3.1228984133593594e-05, "loss": 0.6556, "step": 7895 }, { "epoch": 1.26, "grad_norm": 0.8006006479263306, "learning_rate": 3.1208747093081765e-05, "loss": 0.8685, "step": 7900 }, { "epoch": 1.26, "eval_loss": 0.702598512172699, "eval_runtime": 96.335, "eval_samples_per_second": 7.235, "eval_steps_per_second": 7.235, "step": 7900 }, { "epoch": 1.26, "grad_norm": 0.8203070759773254, "learning_rate": 3.118850571587335e-05, "loss": 0.6639, "step": 7905 }, { "epoch": 1.26, "grad_norm": 0.7723727226257324, "learning_rate": 3.116826001610658e-05, "loss": 0.7805, "step": 7910 }, { "epoch": 1.26, "grad_norm": 0.870233952999115, "learning_rate": 3.114801000792271e-05, "loss": 0.7065, "step": 7915 }, { "epoch": 1.26, "grad_norm": 0.6729398965835571, "learning_rate": 3.112775570546599e-05, "loss": 0.9504, "step": 7920 }, { "epoch": 1.26, "grad_norm": 0.7119274735450745, "learning_rate": 3.110749712288369e-05, "loss": 0.6274, "step": 7925 }, { "epoch": 1.27, "grad_norm": 0.7193456888198853, "learning_rate": 3.1087234274326056e-05, "loss": 0.5857, "step": 7930 }, { "epoch": 1.27, "grad_norm": 0.8700212836265564, "learning_rate": 3.106696717394633e-05, "loss": 0.6635, "step": 7935 }, { "epoch": 1.27, "grad_norm": 0.5653272271156311, "learning_rate": 3.104669583590069e-05, "loss": 0.6838, "step": 7940 }, { "epoch": 1.27, "grad_norm": 0.8067308068275452, "learning_rate": 3.102642027434832e-05, "loss": 0.7206, "step": 7945 }, { "epoch": 1.27, "grad_norm": 1.267585277557373, "learning_rate": 3.100614050345131e-05, "loss": 0.7372, "step": 7950 }, { "epoch": 1.27, "grad_norm": 0.7224301099777222, "learning_rate": 3.098585653737473e-05, "loss": 0.6712, "step": 7955 }, { "epoch": 1.27, "grad_norm": 1.1728746891021729, "learning_rate": 3.096556839028654e-05, "loss": 0.6384, "step": 7960 }, { "epoch": 1.27, "grad_norm": 0.8433157801628113, "learning_rate": 3.0945276076357646e-05, "loss": 0.7598, "step": 7965 }, { "epoch": 1.27, "grad_norm": 0.8825304508209229, "learning_rate": 3.092497960976187e-05, "loss": 0.6488, "step": 7970 }, { "epoch": 1.27, "grad_norm": 0.8050782084465027, "learning_rate": 3.0904679004675905e-05, "loss": 0.5749, "step": 7975 }, { "epoch": 1.27, "grad_norm": 0.9096771478652954, "learning_rate": 3.088437427527937e-05, "loss": 0.7139, "step": 7980 }, { "epoch": 1.27, "grad_norm": 0.7473983764648438, "learning_rate": 3.086406543575475e-05, "loss": 0.7281, "step": 7985 }, { "epoch": 1.28, "grad_norm": 1.1370782852172852, "learning_rate": 3.084375250028739e-05, "loss": 0.6314, "step": 7990 }, { "epoch": 1.28, "grad_norm": 0.675596296787262, "learning_rate": 3.08234354830655e-05, "loss": 0.7009, "step": 7995 }, { "epoch": 1.28, "grad_norm": 1.1450175046920776, "learning_rate": 3.080311439828016e-05, "loss": 0.6945, "step": 8000 }, { "epoch": 1.28, "eval_loss": 0.7042596936225891, "eval_runtime": 96.3147, "eval_samples_per_second": 7.237, "eval_steps_per_second": 7.237, "step": 8000 }, { "epoch": 1.28, "grad_norm": 0.6750172972679138, "learning_rate": 3.0782789260125264e-05, "loss": 0.6747, "step": 8005 }, { "epoch": 1.28, "grad_norm": 0.9685637354850769, "learning_rate": 3.0762460082797565e-05, "loss": 0.8169, "step": 8010 }, { "epoch": 1.28, "grad_norm": 0.6871526837348938, "learning_rate": 3.07421268804966e-05, "loss": 0.7907, "step": 8015 }, { "epoch": 1.28, "grad_norm": 1.2020535469055176, "learning_rate": 3.072178966742476e-05, "loss": 0.5841, "step": 8020 }, { "epoch": 1.28, "grad_norm": 0.7885614633560181, "learning_rate": 3.070144845778721e-05, "loss": 0.7879, "step": 8025 }, { "epoch": 1.28, "grad_norm": 0.8481292724609375, "learning_rate": 3.0681103265791913e-05, "loss": 0.7492, "step": 8030 }, { "epoch": 1.28, "grad_norm": 0.7926096320152283, "learning_rate": 3.066075410564962e-05, "loss": 0.7373, "step": 8035 }, { "epoch": 1.28, "grad_norm": 0.6218678951263428, "learning_rate": 3.064040099157384e-05, "loss": 0.5947, "step": 8040 }, { "epoch": 1.28, "grad_norm": 0.7932231426239014, "learning_rate": 3.062004393778086e-05, "loss": 0.6297, "step": 8045 }, { "epoch": 1.28, "grad_norm": 0.7588164806365967, "learning_rate": 3.05996829584897e-05, "loss": 0.5988, "step": 8050 }, { "epoch": 1.29, "grad_norm": 1.5323835611343384, "learning_rate": 3.057931806792214e-05, "loss": 0.8176, "step": 8055 }, { "epoch": 1.29, "grad_norm": 0.672051727771759, "learning_rate": 3.055894928030269e-05, "loss": 0.4785, "step": 8060 }, { "epoch": 1.29, "grad_norm": 0.862082839012146, "learning_rate": 3.0538576609858566e-05, "loss": 0.6622, "step": 8065 }, { "epoch": 1.29, "grad_norm": 0.8672384023666382, "learning_rate": 3.0518200070819715e-05, "loss": 0.5951, "step": 8070 }, { "epoch": 1.29, "grad_norm": 0.7039456367492676, "learning_rate": 3.0497819677418767e-05, "loss": 0.5597, "step": 8075 }, { "epoch": 1.29, "grad_norm": 1.0149037837982178, "learning_rate": 3.0477435443891068e-05, "loss": 0.8546, "step": 8080 }, { "epoch": 1.29, "grad_norm": 0.7860717177391052, "learning_rate": 3.0457047384474623e-05, "loss": 0.5704, "step": 8085 }, { "epoch": 1.29, "grad_norm": 1.0836188793182373, "learning_rate": 3.0436655513410135e-05, "loss": 0.7824, "step": 8090 }, { "epoch": 1.29, "grad_norm": 0.5916305780410767, "learning_rate": 3.0416259844940932e-05, "loss": 0.5636, "step": 8095 }, { "epoch": 1.29, "grad_norm": 0.9937427639961243, "learning_rate": 3.0395860393313026e-05, "loss": 0.6955, "step": 8100 }, { "epoch": 1.29, "eval_loss": 0.7009605765342712, "eval_runtime": 96.2791, "eval_samples_per_second": 7.239, "eval_steps_per_second": 7.239, "step": 8100 }, { "epoch": 1.29, "grad_norm": 1.223351001739502, "learning_rate": 3.0375457172775067e-05, "loss": 0.7957, "step": 8105 }, { "epoch": 1.29, "grad_norm": 0.7758160829544067, "learning_rate": 3.0355050197578333e-05, "loss": 0.7482, "step": 8110 }, { "epoch": 1.3, "grad_norm": 1.0522717237472534, "learning_rate": 3.033463948197671e-05, "loss": 0.5511, "step": 8115 }, { "epoch": 1.3, "grad_norm": 0.8289081454277039, "learning_rate": 3.0314225040226723e-05, "loss": 0.8692, "step": 8120 }, { "epoch": 1.3, "grad_norm": 0.7654734253883362, "learning_rate": 3.029380688658749e-05, "loss": 0.7779, "step": 8125 }, { "epoch": 1.3, "grad_norm": 0.8430918455123901, "learning_rate": 3.0273385035320708e-05, "loss": 0.6381, "step": 8130 }, { "epoch": 1.3, "grad_norm": 0.5839015245437622, "learning_rate": 3.025295950069067e-05, "loss": 0.7259, "step": 8135 }, { "epoch": 1.3, "grad_norm": 0.9747032523155212, "learning_rate": 3.0232530296964247e-05, "loss": 0.7543, "step": 8140 }, { "epoch": 1.3, "grad_norm": 0.7823505401611328, "learning_rate": 3.0212097438410864e-05, "loss": 0.603, "step": 8145 }, { "epoch": 1.3, "grad_norm": 0.9050419926643372, "learning_rate": 3.0191660939302484e-05, "loss": 0.8558, "step": 8150 }, { "epoch": 1.3, "grad_norm": 0.8175522685050964, "learning_rate": 3.017122081391365e-05, "loss": 0.9133, "step": 8155 }, { "epoch": 1.3, "grad_norm": 0.7007612586021423, "learning_rate": 3.0150777076521403e-05, "loss": 0.5508, "step": 8160 }, { "epoch": 1.3, "grad_norm": 0.9761927127838135, "learning_rate": 3.0130329741405334e-05, "loss": 0.597, "step": 8165 }, { "epoch": 1.3, "grad_norm": 0.7669122815132141, "learning_rate": 3.0109878822847515e-05, "loss": 0.7017, "step": 8170 }, { "epoch": 1.3, "grad_norm": 1.3604693412780762, "learning_rate": 3.008942433513255e-05, "loss": 0.6975, "step": 8175 }, { "epoch": 1.31, "grad_norm": 0.6270090937614441, "learning_rate": 3.0068966292547528e-05, "loss": 0.6954, "step": 8180 }, { "epoch": 1.31, "grad_norm": 0.9544779062271118, "learning_rate": 3.004850470938201e-05, "loss": 0.5711, "step": 8185 }, { "epoch": 1.31, "grad_norm": 0.7997902035713196, "learning_rate": 3.0028039599928047e-05, "loss": 0.6225, "step": 8190 }, { "epoch": 1.31, "grad_norm": 1.1297340393066406, "learning_rate": 3.000757097848015e-05, "loss": 0.9648, "step": 8195 }, { "epoch": 1.31, "grad_norm": 0.915552020072937, "learning_rate": 2.998709885933526e-05, "loss": 0.734, "step": 8200 }, { "epoch": 1.31, "eval_loss": 0.7022165060043335, "eval_runtime": 96.3376, "eval_samples_per_second": 7.235, "eval_steps_per_second": 7.235, "step": 8200 }, { "epoch": 1.31, "grad_norm": 0.8937833309173584, "learning_rate": 2.996662325679279e-05, "loss": 0.711, "step": 8205 }, { "epoch": 1.31, "grad_norm": 0.8294236063957214, "learning_rate": 2.9946144185154573e-05, "loss": 0.6208, "step": 8210 }, { "epoch": 1.31, "grad_norm": 0.7142773866653442, "learning_rate": 2.992566165872487e-05, "loss": 0.63, "step": 8215 }, { "epoch": 1.31, "grad_norm": 1.2502591609954834, "learning_rate": 2.9905175691810346e-05, "loss": 0.6856, "step": 8220 }, { "epoch": 1.31, "grad_norm": 0.7072376012802124, "learning_rate": 2.9884686298720084e-05, "loss": 0.7738, "step": 8225 }, { "epoch": 1.31, "grad_norm": 0.7068510055541992, "learning_rate": 2.9864193493765536e-05, "loss": 0.5731, "step": 8230 }, { "epoch": 1.31, "grad_norm": 1.0625722408294678, "learning_rate": 2.984369729126057e-05, "loss": 0.6232, "step": 8235 }, { "epoch": 1.32, "grad_norm": 0.9145768284797668, "learning_rate": 2.98231977055214e-05, "loss": 0.831, "step": 8240 }, { "epoch": 1.32, "grad_norm": 0.9636755585670471, "learning_rate": 2.9802694750866615e-05, "loss": 0.8864, "step": 8245 }, { "epoch": 1.32, "grad_norm": 1.1421067714691162, "learning_rate": 2.978218844161715e-05, "loss": 0.7562, "step": 8250 }, { "epoch": 1.32, "grad_norm": 0.645041286945343, "learning_rate": 2.976167879209629e-05, "loss": 0.5159, "step": 8255 }, { "epoch": 1.32, "grad_norm": 0.5683246850967407, "learning_rate": 2.9741165816629662e-05, "loss": 0.7033, "step": 8260 }, { "epoch": 1.32, "grad_norm": 1.5764894485473633, "learning_rate": 2.9720649529545197e-05, "loss": 0.6767, "step": 8265 }, { "epoch": 1.32, "grad_norm": 0.7696818709373474, "learning_rate": 2.970012994517314e-05, "loss": 0.6647, "step": 8270 }, { "epoch": 1.32, "grad_norm": 0.7804857492446899, "learning_rate": 2.967960707784605e-05, "loss": 0.6663, "step": 8275 }, { "epoch": 1.32, "grad_norm": 0.8747091889381409, "learning_rate": 2.9659080941898788e-05, "loss": 0.7183, "step": 8280 }, { "epoch": 1.32, "grad_norm": 0.6558755040168762, "learning_rate": 2.963855155166847e-05, "loss": 0.5533, "step": 8285 }, { "epoch": 1.32, "grad_norm": 1.0046048164367676, "learning_rate": 2.9618018921494505e-05, "loss": 0.6945, "step": 8290 }, { "epoch": 1.32, "grad_norm": 0.8384654521942139, "learning_rate": 2.959748306571857e-05, "loss": 0.6501, "step": 8295 }, { "epoch": 1.32, "grad_norm": 0.5712169408798218, "learning_rate": 2.9576943998684577e-05, "loss": 0.5586, "step": 8300 }, { "epoch": 1.32, "eval_loss": 0.701697826385498, "eval_runtime": 96.3495, "eval_samples_per_second": 7.234, "eval_steps_per_second": 7.234, "step": 8300 }, { "epoch": 1.33, "grad_norm": 0.8932040929794312, "learning_rate": 2.9556401734738682e-05, "loss": 0.776, "step": 8305 }, { "epoch": 1.33, "grad_norm": 0.7956164479255676, "learning_rate": 2.9535856288229295e-05, "loss": 0.7136, "step": 8310 }, { "epoch": 1.33, "grad_norm": 0.699739396572113, "learning_rate": 2.951530767350703e-05, "loss": 0.4986, "step": 8315 }, { "epoch": 1.33, "grad_norm": 0.7867723107337952, "learning_rate": 2.949475590492473e-05, "loss": 0.6532, "step": 8320 }, { "epoch": 1.33, "grad_norm": 0.6946756839752197, "learning_rate": 2.947420099683741e-05, "loss": 0.7284, "step": 8325 }, { "epoch": 1.33, "grad_norm": 1.097390055656433, "learning_rate": 2.9453642963602306e-05, "loss": 1.0162, "step": 8330 }, { "epoch": 1.33, "grad_norm": 0.6341527104377747, "learning_rate": 2.9433081819578843e-05, "loss": 0.5738, "step": 8335 }, { "epoch": 1.33, "grad_norm": 0.7065830826759338, "learning_rate": 2.941251757912859e-05, "loss": 0.5473, "step": 8340 }, { "epoch": 1.33, "grad_norm": 0.6362321376800537, "learning_rate": 2.939195025661529e-05, "loss": 0.6538, "step": 8345 }, { "epoch": 1.33, "grad_norm": 1.0062533617019653, "learning_rate": 2.9371379866404862e-05, "loss": 0.6313, "step": 8350 }, { "epoch": 1.33, "grad_norm": 1.0622398853302002, "learning_rate": 2.935080642286534e-05, "loss": 0.7483, "step": 8355 }, { "epoch": 1.33, "grad_norm": 0.726993978023529, "learning_rate": 2.933022994036689e-05, "loss": 0.6649, "step": 8360 }, { "epoch": 1.34, "grad_norm": 0.5122197866439819, "learning_rate": 2.930965043328181e-05, "loss": 0.6865, "step": 8365 }, { "epoch": 1.34, "grad_norm": 0.8607394099235535, "learning_rate": 2.9289067915984526e-05, "loss": 0.5822, "step": 8370 }, { "epoch": 1.34, "grad_norm": 0.7147690653800964, "learning_rate": 2.926848240285154e-05, "loss": 0.559, "step": 8375 }, { "epoch": 1.34, "grad_norm": 0.7905888557434082, "learning_rate": 2.9247893908261458e-05, "loss": 0.8028, "step": 8380 }, { "epoch": 1.34, "grad_norm": 0.8332535624504089, "learning_rate": 2.9227302446594963e-05, "loss": 0.7625, "step": 8385 }, { "epoch": 1.34, "grad_norm": 0.8591724038124084, "learning_rate": 2.9206708032234826e-05, "loss": 0.7804, "step": 8390 }, { "epoch": 1.34, "grad_norm": 0.6741548776626587, "learning_rate": 2.918611067956586e-05, "loss": 0.8665, "step": 8395 }, { "epoch": 1.34, "grad_norm": 0.5031669735908508, "learning_rate": 2.916551040297495e-05, "loss": 0.7299, "step": 8400 }, { "epoch": 1.34, "eval_loss": 0.6999276876449585, "eval_runtime": 96.3216, "eval_samples_per_second": 7.236, "eval_steps_per_second": 7.236, "step": 8400 }, { "epoch": 1.34, "grad_norm": 0.6288011074066162, "learning_rate": 2.9144907216851004e-05, "loss": 0.6782, "step": 8405 }, { "epoch": 1.34, "grad_norm": 0.8632309436798096, "learning_rate": 2.9124301135584975e-05, "loss": 0.6778, "step": 8410 }, { "epoch": 1.34, "grad_norm": 0.6853322982788086, "learning_rate": 2.910369217356984e-05, "loss": 0.7244, "step": 8415 }, { "epoch": 1.34, "grad_norm": 0.6843327283859253, "learning_rate": 2.908308034520058e-05, "loss": 0.7216, "step": 8420 }, { "epoch": 1.34, "grad_norm": 1.206913948059082, "learning_rate": 2.906246566487417e-05, "loss": 0.6733, "step": 8425 }, { "epoch": 1.35, "grad_norm": 0.9815182685852051, "learning_rate": 2.904184814698961e-05, "loss": 0.6827, "step": 8430 }, { "epoch": 1.35, "grad_norm": 0.9608283042907715, "learning_rate": 2.9021227805947844e-05, "loss": 0.8357, "step": 8435 }, { "epoch": 1.35, "grad_norm": 1.2694772481918335, "learning_rate": 2.90006046561518e-05, "loss": 0.6741, "step": 8440 }, { "epoch": 1.35, "grad_norm": 0.78603595495224, "learning_rate": 2.897997871200639e-05, "loss": 0.7701, "step": 8445 }, { "epoch": 1.35, "grad_norm": 0.9763398170471191, "learning_rate": 2.895934998791845e-05, "loss": 0.9359, "step": 8450 }, { "epoch": 1.35, "grad_norm": 0.8996787667274475, "learning_rate": 2.8938718498296768e-05, "loss": 0.7951, "step": 8455 }, { "epoch": 1.35, "grad_norm": 0.5990316867828369, "learning_rate": 2.891808425755206e-05, "loss": 0.7176, "step": 8460 }, { "epoch": 1.35, "grad_norm": 3.936537265777588, "learning_rate": 2.889744728009696e-05, "loss": 0.8185, "step": 8465 }, { "epoch": 1.35, "grad_norm": 0.5997591614723206, "learning_rate": 2.8876807580346044e-05, "loss": 0.6776, "step": 8470 }, { "epoch": 1.35, "grad_norm": 0.9869585633277893, "learning_rate": 2.8856165172715745e-05, "loss": 0.9732, "step": 8475 }, { "epoch": 1.35, "grad_norm": 0.8746089935302734, "learning_rate": 2.883552007162441e-05, "loss": 0.7027, "step": 8480 }, { "epoch": 1.35, "grad_norm": 0.7140242457389832, "learning_rate": 2.8814872291492278e-05, "loss": 0.8119, "step": 8485 }, { "epoch": 1.36, "grad_norm": 0.6302705407142639, "learning_rate": 2.8794221846741436e-05, "loss": 0.7862, "step": 8490 }, { "epoch": 1.36, "grad_norm": 0.8000974655151367, "learning_rate": 2.8773568751795843e-05, "loss": 0.7948, "step": 8495 }, { "epoch": 1.36, "grad_norm": 0.35248124599456787, "learning_rate": 2.875291302108131e-05, "loss": 1.089, "step": 8500 }, { "epoch": 1.36, "eval_loss": 0.6993909478187561, "eval_runtime": 96.3537, "eval_samples_per_second": 7.234, "eval_steps_per_second": 7.234, "step": 8500 }, { "epoch": 1.36, "grad_norm": 1.1372653245925903, "learning_rate": 2.8732254669025497e-05, "loss": 0.7275, "step": 8505 }, { "epoch": 1.36, "grad_norm": 0.6813796162605286, "learning_rate": 2.8711593710057883e-05, "loss": 0.6084, "step": 8510 }, { "epoch": 1.36, "grad_norm": 1.1797153949737549, "learning_rate": 2.8690930158609757e-05, "loss": 0.8862, "step": 8515 }, { "epoch": 1.36, "grad_norm": 0.6880937814712524, "learning_rate": 2.8670264029114247e-05, "loss": 0.7201, "step": 8520 }, { "epoch": 1.36, "grad_norm": 0.8386674523353577, "learning_rate": 2.864959533600626e-05, "loss": 0.7845, "step": 8525 }, { "epoch": 1.36, "grad_norm": 0.9347553849220276, "learning_rate": 2.862892409372251e-05, "loss": 0.7399, "step": 8530 }, { "epoch": 1.36, "grad_norm": 0.7480734586715698, "learning_rate": 2.8608250316701474e-05, "loss": 0.6557, "step": 8535 }, { "epoch": 1.36, "grad_norm": 0.7449476718902588, "learning_rate": 2.8587574019383413e-05, "loss": 0.6119, "step": 8540 }, { "epoch": 1.36, "grad_norm": 0.7354079484939575, "learning_rate": 2.856689521621034e-05, "loss": 0.7845, "step": 8545 }, { "epoch": 1.36, "grad_norm": 0.656905472278595, "learning_rate": 2.854621392162602e-05, "loss": 0.6436, "step": 8550 }, { "epoch": 1.37, "grad_norm": 1.025376558303833, "learning_rate": 2.8525530150075968e-05, "loss": 0.8306, "step": 8555 }, { "epoch": 1.37, "grad_norm": 0.6368470191955566, "learning_rate": 2.8504843916007412e-05, "loss": 0.7914, "step": 8560 }, { "epoch": 1.37, "grad_norm": 0.7506736516952515, "learning_rate": 2.8484155233869315e-05, "loss": 0.854, "step": 8565 }, { "epoch": 1.37, "grad_norm": 0.6676509380340576, "learning_rate": 2.846346411811235e-05, "loss": 0.7416, "step": 8570 }, { "epoch": 1.37, "grad_norm": 0.8177019953727722, "learning_rate": 2.8442770583188872e-05, "loss": 0.5851, "step": 8575 }, { "epoch": 1.37, "grad_norm": 0.9827241897583008, "learning_rate": 2.842207464355294e-05, "loss": 0.9374, "step": 8580 }, { "epoch": 1.37, "grad_norm": 1.079214334487915, "learning_rate": 2.8401376313660295e-05, "loss": 0.7287, "step": 8585 }, { "epoch": 1.37, "grad_norm": 0.5321961641311646, "learning_rate": 2.8380675607968343e-05, "loss": 0.6194, "step": 8590 }, { "epoch": 1.37, "grad_norm": 0.8180492520332336, "learning_rate": 2.835997254093614e-05, "loss": 0.4846, "step": 8595 }, { "epoch": 1.37, "grad_norm": 0.6867579221725464, "learning_rate": 2.8339267127024416e-05, "loss": 0.5733, "step": 8600 }, { "epoch": 1.37, "eval_loss": 0.6994162201881409, "eval_runtime": 96.4595, "eval_samples_per_second": 7.226, "eval_steps_per_second": 7.226, "step": 8600 }, { "epoch": 1.37, "grad_norm": 0.8024903535842896, "learning_rate": 2.8318559380695514e-05, "loss": 0.679, "step": 8605 }, { "epoch": 1.37, "grad_norm": 0.9138768911361694, "learning_rate": 2.8297849316413422e-05, "loss": 0.6586, "step": 8610 }, { "epoch": 1.38, "grad_norm": 0.6323714256286621, "learning_rate": 2.8277136948643734e-05, "loss": 0.5754, "step": 8615 }, { "epoch": 1.38, "grad_norm": 0.9891794323921204, "learning_rate": 2.825642229185367e-05, "loss": 0.7783, "step": 8620 }, { "epoch": 1.38, "grad_norm": 0.8239473700523376, "learning_rate": 2.8235705360512034e-05, "loss": 0.7095, "step": 8625 }, { "epoch": 1.38, "grad_norm": 1.0162553787231445, "learning_rate": 2.8214986169089232e-05, "loss": 0.7844, "step": 8630 }, { "epoch": 1.38, "grad_norm": 0.9242910146713257, "learning_rate": 2.819426473205723e-05, "loss": 0.7479, "step": 8635 }, { "epoch": 1.38, "grad_norm": 0.6635788679122925, "learning_rate": 2.8173541063889587e-05, "loss": 0.7058, "step": 8640 }, { "epoch": 1.38, "grad_norm": 0.7192090153694153, "learning_rate": 2.8152815179061396e-05, "loss": 0.8564, "step": 8645 }, { "epoch": 1.38, "grad_norm": 2.3398587703704834, "learning_rate": 2.8132087092049316e-05, "loss": 0.8072, "step": 8650 }, { "epoch": 1.38, "grad_norm": 0.8398715257644653, "learning_rate": 2.811135681733153e-05, "loss": 0.7366, "step": 8655 }, { "epoch": 1.38, "grad_norm": 0.9099376201629639, "learning_rate": 2.8090624369387776e-05, "loss": 0.661, "step": 8660 }, { "epoch": 1.38, "grad_norm": 1.099500298500061, "learning_rate": 2.806988976269927e-05, "loss": 0.8471, "step": 8665 }, { "epoch": 1.38, "grad_norm": 0.8880857229232788, "learning_rate": 2.8049153011748757e-05, "loss": 0.8047, "step": 8670 }, { "epoch": 1.38, "grad_norm": 0.6709883809089661, "learning_rate": 2.8028414131020492e-05, "loss": 0.6804, "step": 8675 }, { "epoch": 1.39, "grad_norm": 0.7524588108062744, "learning_rate": 2.80076731350002e-05, "loss": 0.6399, "step": 8680 }, { "epoch": 1.39, "grad_norm": 0.844389021396637, "learning_rate": 2.7986930038175084e-05, "loss": 0.5184, "step": 8685 }, { "epoch": 1.39, "grad_norm": 1.387044072151184, "learning_rate": 2.7966184855033827e-05, "loss": 0.7384, "step": 8690 }, { "epoch": 1.39, "grad_norm": 0.4819062054157257, "learning_rate": 2.7945437600066556e-05, "loss": 0.8486, "step": 8695 }, { "epoch": 1.39, "grad_norm": 0.5254135727882385, "learning_rate": 2.792468828776484e-05, "loss": 0.5409, "step": 8700 }, { "epoch": 1.39, "eval_loss": 0.6987185478210449, "eval_runtime": 96.4354, "eval_samples_per_second": 7.228, "eval_steps_per_second": 7.228, "step": 8700 }, { "epoch": 1.39, "grad_norm": 0.8113055229187012, "learning_rate": 2.7903936932621717e-05, "loss": 0.4735, "step": 8705 }, { "epoch": 1.39, "grad_norm": 0.7469605207443237, "learning_rate": 2.7883183549131613e-05, "loss": 0.6068, "step": 8710 }, { "epoch": 1.39, "grad_norm": 0.925462007522583, "learning_rate": 2.7862428151790394e-05, "loss": 0.7301, "step": 8715 }, { "epoch": 1.39, "grad_norm": 0.9389353394508362, "learning_rate": 2.7841670755095323e-05, "loss": 0.7569, "step": 8720 }, { "epoch": 1.39, "grad_norm": 0.7033401131629944, "learning_rate": 2.7820911373545074e-05, "loss": 0.8086, "step": 8725 }, { "epoch": 1.39, "grad_norm": 0.9423335194587708, "learning_rate": 2.7800150021639674e-05, "loss": 0.666, "step": 8730 }, { "epoch": 1.39, "grad_norm": 0.5969627499580383, "learning_rate": 2.7779386713880566e-05, "loss": 0.8106, "step": 8735 }, { "epoch": 1.4, "grad_norm": 0.9117034673690796, "learning_rate": 2.7758621464770552e-05, "loss": 0.6877, "step": 8740 }, { "epoch": 1.4, "grad_norm": 1.0278637409210205, "learning_rate": 2.7737854288813757e-05, "loss": 0.6363, "step": 8745 }, { "epoch": 1.4, "grad_norm": 0.8942778706550598, "learning_rate": 2.771708520051568e-05, "loss": 0.8134, "step": 8750 }, { "epoch": 1.4, "grad_norm": 0.6770984530448914, "learning_rate": 2.769631421438315e-05, "loss": 0.6516, "step": 8755 }, { "epoch": 1.4, "grad_norm": 0.7041003108024597, "learning_rate": 2.7675541344924337e-05, "loss": 0.5647, "step": 8760 }, { "epoch": 1.4, "grad_norm": 0.8790050745010376, "learning_rate": 2.7654766606648702e-05, "loss": 0.8915, "step": 8765 }, { "epoch": 1.4, "grad_norm": 0.7827969193458557, "learning_rate": 2.7633990014067008e-05, "loss": 0.6071, "step": 8770 }, { "epoch": 1.4, "grad_norm": 0.996577262878418, "learning_rate": 2.761321158169134e-05, "loss": 0.604, "step": 8775 }, { "epoch": 1.4, "grad_norm": 0.9607555270195007, "learning_rate": 2.759243132403505e-05, "loss": 0.8923, "step": 8780 }, { "epoch": 1.4, "grad_norm": 0.7468242049217224, "learning_rate": 2.7571649255612774e-05, "loss": 0.7277, "step": 8785 }, { "epoch": 1.4, "grad_norm": 0.6365399956703186, "learning_rate": 2.7550865390940395e-05, "loss": 0.7278, "step": 8790 }, { "epoch": 1.4, "grad_norm": 0.8831495046615601, "learning_rate": 2.753007974453508e-05, "loss": 0.5988, "step": 8795 }, { "epoch": 1.4, "grad_norm": 0.8448741436004639, "learning_rate": 2.7509292330915216e-05, "loss": 0.8848, "step": 8800 }, { "epoch": 1.4, "eval_loss": 0.6976245045661926, "eval_runtime": 96.6807, "eval_samples_per_second": 7.209, "eval_steps_per_second": 7.209, "step": 8800 }, { "epoch": 1.41, "grad_norm": 1.0347315073013306, "learning_rate": 2.7488503164600427e-05, "loss": 0.7309, "step": 8805 }, { "epoch": 1.41, "grad_norm": 1.0705949068069458, "learning_rate": 2.7467712260111567e-05, "loss": 0.635, "step": 8810 }, { "epoch": 1.41, "grad_norm": 1.0177220106124878, "learning_rate": 2.7446919631970718e-05, "loss": 0.689, "step": 8815 }, { "epoch": 1.41, "grad_norm": 0.7912683486938477, "learning_rate": 2.742612529470114e-05, "loss": 0.893, "step": 8820 }, { "epoch": 1.41, "grad_norm": 0.7678771615028381, "learning_rate": 2.7405329262827296e-05, "loss": 0.6563, "step": 8825 }, { "epoch": 1.41, "grad_norm": 0.6450463533401489, "learning_rate": 2.738453155087483e-05, "loss": 0.6637, "step": 8830 }, { "epoch": 1.41, "grad_norm": 0.9644352197647095, "learning_rate": 2.7363732173370586e-05, "loss": 0.7377, "step": 8835 }, { "epoch": 1.41, "grad_norm": 0.7947851419448853, "learning_rate": 2.7342931144842533e-05, "loss": 0.6275, "step": 8840 }, { "epoch": 1.41, "grad_norm": 0.9577487111091614, "learning_rate": 2.7322128479819815e-05, "loss": 0.7928, "step": 8845 }, { "epoch": 1.41, "grad_norm": 0.8619856834411621, "learning_rate": 2.7301324192832707e-05, "loss": 0.6892, "step": 8850 }, { "epoch": 1.41, "grad_norm": 0.6695562601089478, "learning_rate": 2.7280518298412628e-05, "loss": 0.5982, "step": 8855 }, { "epoch": 1.41, "grad_norm": 0.6930350065231323, "learning_rate": 2.7259710811092126e-05, "loss": 0.8486, "step": 8860 }, { "epoch": 1.42, "grad_norm": 0.7074466347694397, "learning_rate": 2.7238901745404844e-05, "loss": 0.6394, "step": 8865 }, { "epoch": 1.42, "grad_norm": 0.7025793194770813, "learning_rate": 2.7218091115885523e-05, "loss": 0.7898, "step": 8870 }, { "epoch": 1.42, "grad_norm": 0.8207305073738098, "learning_rate": 2.719727893707002e-05, "loss": 0.5337, "step": 8875 }, { "epoch": 1.42, "grad_norm": 0.752471923828125, "learning_rate": 2.7176465223495268e-05, "loss": 0.8052, "step": 8880 }, { "epoch": 1.42, "grad_norm": 0.8493356704711914, "learning_rate": 2.7155649989699254e-05, "loss": 0.7002, "step": 8885 }, { "epoch": 1.42, "grad_norm": 1.0340080261230469, "learning_rate": 2.713483325022104e-05, "loss": 0.9414, "step": 8890 }, { "epoch": 1.42, "grad_norm": 0.7286359071731567, "learning_rate": 2.711401501960075e-05, "loss": 0.6413, "step": 8895 }, { "epoch": 1.42, "grad_norm": 0.7379997372627258, "learning_rate": 2.7093195312379537e-05, "loss": 0.5739, "step": 8900 }, { "epoch": 1.42, "eval_loss": 0.6970831155776978, "eval_runtime": 96.7643, "eval_samples_per_second": 7.203, "eval_steps_per_second": 7.203, "step": 8900 }, { "epoch": 1.42, "grad_norm": 1.2503465414047241, "learning_rate": 2.7072374143099573e-05, "loss": 0.8559, "step": 8905 }, { "epoch": 1.42, "grad_norm": 0.6661009788513184, "learning_rate": 2.705155152630407e-05, "loss": 0.788, "step": 8910 }, { "epoch": 1.42, "grad_norm": 0.6985836625099182, "learning_rate": 2.7030727476537256e-05, "loss": 0.7824, "step": 8915 }, { "epoch": 1.42, "grad_norm": 0.7565920948982239, "learning_rate": 2.7009902008344344e-05, "loss": 0.6379, "step": 8920 }, { "epoch": 1.42, "grad_norm": 1.0957528352737427, "learning_rate": 2.6989075136271535e-05, "loss": 0.7973, "step": 8925 }, { "epoch": 1.43, "grad_norm": 1.3178807497024536, "learning_rate": 2.6968246874866026e-05, "loss": 0.8141, "step": 8930 }, { "epoch": 1.43, "grad_norm": 0.6984952092170715, "learning_rate": 2.6947417238675987e-05, "loss": 0.7697, "step": 8935 }, { "epoch": 1.43, "grad_norm": 0.9059376120567322, "learning_rate": 2.6926586242250527e-05, "loss": 0.6771, "step": 8940 }, { "epoch": 1.43, "grad_norm": 1.0285694599151611, "learning_rate": 2.690575390013971e-05, "loss": 0.7465, "step": 8945 }, { "epoch": 1.43, "grad_norm": 0.7756787538528442, "learning_rate": 2.6884920226894572e-05, "loss": 0.7743, "step": 8950 }, { "epoch": 1.43, "grad_norm": 0.7653606534004211, "learning_rate": 2.6864085237067037e-05, "loss": 0.8444, "step": 8955 }, { "epoch": 1.43, "grad_norm": 1.0834332704544067, "learning_rate": 2.684324894520996e-05, "loss": 0.6053, "step": 8960 }, { "epoch": 1.43, "grad_norm": 0.7883355021476746, "learning_rate": 2.682241136587712e-05, "loss": 0.6907, "step": 8965 }, { "epoch": 1.43, "grad_norm": 0.7430680394172668, "learning_rate": 2.6801572513623193e-05, "loss": 0.833, "step": 8970 }, { "epoch": 1.43, "grad_norm": 0.8970258235931396, "learning_rate": 2.678073240300372e-05, "loss": 0.777, "step": 8975 }, { "epoch": 1.43, "grad_norm": 0.7223610281944275, "learning_rate": 2.6759891048575147e-05, "loss": 0.579, "step": 8980 }, { "epoch": 1.43, "grad_norm": 0.9050683379173279, "learning_rate": 2.673904846489478e-05, "loss": 0.7313, "step": 8985 }, { "epoch": 1.43, "grad_norm": 0.8505553603172302, "learning_rate": 2.6718204666520786e-05, "loss": 0.6838, "step": 8990 }, { "epoch": 1.44, "grad_norm": 0.7363889813423157, "learning_rate": 2.669735966801217e-05, "loss": 0.7082, "step": 8995 }, { "epoch": 1.44, "grad_norm": 0.7967817783355713, "learning_rate": 2.6676513483928788e-05, "loss": 0.728, "step": 9000 }, { "epoch": 1.44, "eval_loss": 0.6963269114494324, "eval_runtime": 96.6643, "eval_samples_per_second": 7.211, "eval_steps_per_second": 7.211, "step": 9000 }, { "epoch": 1.44, "grad_norm": 1.0329532623291016, "learning_rate": 2.665566612883132e-05, "loss": 0.5401, "step": 9005 }, { "epoch": 1.44, "grad_norm": 0.6207738518714905, "learning_rate": 2.6634817617281262e-05, "loss": 0.735, "step": 9010 }, { "epoch": 1.44, "grad_norm": 0.8492811918258667, "learning_rate": 2.6613967963840925e-05, "loss": 0.6334, "step": 9015 }, { "epoch": 1.44, "grad_norm": 0.7099390029907227, "learning_rate": 2.6593117183073406e-05, "loss": 0.6005, "step": 9020 }, { "epoch": 1.44, "grad_norm": 0.7716400623321533, "learning_rate": 2.657226528954259e-05, "loss": 0.5958, "step": 9025 }, { "epoch": 1.44, "grad_norm": 0.6929382681846619, "learning_rate": 2.655141229781315e-05, "loss": 0.7592, "step": 9030 }, { "epoch": 1.44, "grad_norm": 0.6694722771644592, "learning_rate": 2.6530558222450525e-05, "loss": 0.6927, "step": 9035 }, { "epoch": 1.44, "grad_norm": 0.8594262599945068, "learning_rate": 2.6509703078020892e-05, "loss": 0.9045, "step": 9040 }, { "epoch": 1.44, "grad_norm": 0.7283955812454224, "learning_rate": 2.64888468790912e-05, "loss": 0.7548, "step": 9045 }, { "epoch": 1.44, "grad_norm": 0.9150108098983765, "learning_rate": 2.646798964022913e-05, "loss": 0.6196, "step": 9050 }, { "epoch": 1.45, "grad_norm": 0.8249070644378662, "learning_rate": 2.6447131376003076e-05, "loss": 0.7259, "step": 9055 }, { "epoch": 1.45, "grad_norm": 0.8211736679077148, "learning_rate": 2.642627210098214e-05, "loss": 0.7903, "step": 9060 }, { "epoch": 1.45, "grad_norm": 0.9737334251403809, "learning_rate": 2.6405411829736165e-05, "loss": 0.8098, "step": 9065 }, { "epoch": 1.45, "grad_norm": 0.6321923136711121, "learning_rate": 2.638455057683567e-05, "loss": 0.6646, "step": 9070 }, { "epoch": 1.45, "grad_norm": 1.0885649919509888, "learning_rate": 2.6363688356851847e-05, "loss": 0.671, "step": 9075 }, { "epoch": 1.45, "grad_norm": 1.0221432447433472, "learning_rate": 2.634282518435658e-05, "loss": 0.8984, "step": 9080 }, { "epoch": 1.45, "grad_norm": 0.6629742980003357, "learning_rate": 2.632196107392242e-05, "loss": 0.7947, "step": 9085 }, { "epoch": 1.45, "grad_norm": 0.847280740737915, "learning_rate": 2.6301096040122565e-05, "loss": 0.7693, "step": 9090 }, { "epoch": 1.45, "grad_norm": 1.0186293125152588, "learning_rate": 2.628023009753086e-05, "loss": 0.8952, "step": 9095 }, { "epoch": 1.45, "grad_norm": 0.9672142863273621, "learning_rate": 2.6259363260721776e-05, "loss": 0.7503, "step": 9100 }, { "epoch": 1.45, "eval_loss": 0.6952997446060181, "eval_runtime": 96.6663, "eval_samples_per_second": 7.21, "eval_steps_per_second": 7.21, "step": 9100 }, { "epoch": 1.45, "grad_norm": 0.8531110286712646, "learning_rate": 2.6238495544270435e-05, "loss": 0.5878, "step": 9105 }, { "epoch": 1.45, "grad_norm": 0.7663474082946777, "learning_rate": 2.6217626962752555e-05, "loss": 0.7022, "step": 9110 }, { "epoch": 1.45, "grad_norm": 0.7978513836860657, "learning_rate": 2.6196757530744443e-05, "loss": 0.6604, "step": 9115 }, { "epoch": 1.46, "grad_norm": 0.7127459645271301, "learning_rate": 2.6175887262823023e-05, "loss": 0.7602, "step": 9120 }, { "epoch": 1.46, "grad_norm": 0.6364534497261047, "learning_rate": 2.615501617356581e-05, "loss": 0.8812, "step": 9125 }, { "epoch": 1.46, "grad_norm": 0.9094502329826355, "learning_rate": 2.613414427755086e-05, "loss": 0.655, "step": 9130 }, { "epoch": 1.46, "grad_norm": 0.9474580883979797, "learning_rate": 2.611327158935683e-05, "loss": 0.9989, "step": 9135 }, { "epoch": 1.46, "grad_norm": 1.2897037267684937, "learning_rate": 2.6092398123562895e-05, "loss": 0.9347, "step": 9140 }, { "epoch": 1.46, "grad_norm": 0.690949022769928, "learning_rate": 2.60715238947488e-05, "loss": 0.7953, "step": 9145 }, { "epoch": 1.46, "grad_norm": 0.8102849721908569, "learning_rate": 2.6050648917494808e-05, "loss": 0.716, "step": 9150 }, { "epoch": 1.46, "grad_norm": 0.8799526691436768, "learning_rate": 2.6029773206381718e-05, "loss": 0.7998, "step": 9155 }, { "epoch": 1.46, "grad_norm": 0.7595265507698059, "learning_rate": 2.600889677599082e-05, "loss": 0.7642, "step": 9160 }, { "epoch": 1.46, "grad_norm": 0.819495439529419, "learning_rate": 2.5988019640903927e-05, "loss": 0.7664, "step": 9165 }, { "epoch": 1.46, "grad_norm": 0.9698195457458496, "learning_rate": 2.596714181570335e-05, "loss": 0.6731, "step": 9170 }, { "epoch": 1.46, "grad_norm": 1.1988072395324707, "learning_rate": 2.5946263314971842e-05, "loss": 0.8619, "step": 9175 }, { "epoch": 1.47, "grad_norm": 1.3569873571395874, "learning_rate": 2.5925384153292682e-05, "loss": 0.6456, "step": 9180 }, { "epoch": 1.47, "grad_norm": 1.0610495805740356, "learning_rate": 2.5904504345249565e-05, "loss": 0.8024, "step": 9185 }, { "epoch": 1.47, "grad_norm": 0.7540451288223267, "learning_rate": 2.5883623905426664e-05, "loss": 1.0539, "step": 9190 }, { "epoch": 1.47, "grad_norm": 0.7257586121559143, "learning_rate": 2.5862742848408583e-05, "loss": 0.7662, "step": 9195 }, { "epoch": 1.47, "grad_norm": 0.8342102766036987, "learning_rate": 2.5841861188780355e-05, "loss": 0.706, "step": 9200 }, { "epoch": 1.47, "eval_loss": 0.6950993537902832, "eval_runtime": 96.6378, "eval_samples_per_second": 7.213, "eval_steps_per_second": 7.213, "step": 9200 }, { "epoch": 1.47, "grad_norm": 0.7215315699577332, "learning_rate": 2.582097894112745e-05, "loss": 0.5098, "step": 9205 }, { "epoch": 1.47, "grad_norm": 0.9047237038612366, "learning_rate": 2.5800096120035732e-05, "loss": 0.7546, "step": 9210 }, { "epoch": 1.47, "grad_norm": 0.8365469574928284, "learning_rate": 2.577921274009146e-05, "loss": 0.9019, "step": 9215 }, { "epoch": 1.47, "grad_norm": 1.236525535583496, "learning_rate": 2.5758328815881312e-05, "loss": 0.5778, "step": 9220 }, { "epoch": 1.47, "grad_norm": 1.10982346534729, "learning_rate": 2.5737444361992312e-05, "loss": 0.642, "step": 9225 }, { "epoch": 1.47, "grad_norm": 0.8745054602622986, "learning_rate": 2.5716559393011885e-05, "loss": 0.8715, "step": 9230 }, { "epoch": 1.47, "grad_norm": 0.7256401181221008, "learning_rate": 2.569567392352778e-05, "loss": 0.6683, "step": 9235 }, { "epoch": 1.47, "grad_norm": 0.7779073715209961, "learning_rate": 2.5674787968128143e-05, "loss": 0.8675, "step": 9240 }, { "epoch": 1.48, "grad_norm": 0.9540502429008484, "learning_rate": 2.5653901541401415e-05, "loss": 0.797, "step": 9245 }, { "epoch": 1.48, "grad_norm": 0.5401924848556519, "learning_rate": 2.5633014657936394e-05, "loss": 0.5741, "step": 9250 }, { "epoch": 1.48, "grad_norm": 0.8894802331924438, "learning_rate": 2.5612127332322168e-05, "loss": 0.597, "step": 9255 }, { "epoch": 1.48, "grad_norm": 0.8277390599250793, "learning_rate": 2.5591239579148173e-05, "loss": 0.6365, "step": 9260 }, { "epoch": 1.48, "grad_norm": 0.8742364645004272, "learning_rate": 2.5570351413004125e-05, "loss": 0.7009, "step": 9265 }, { "epoch": 1.48, "grad_norm": 0.7853513360023499, "learning_rate": 2.5549462848480004e-05, "loss": 0.6336, "step": 9270 }, { "epoch": 1.48, "grad_norm": 0.785223662853241, "learning_rate": 2.5528573900166115e-05, "loss": 0.6963, "step": 9275 }, { "epoch": 1.48, "grad_norm": 0.6762696504592896, "learning_rate": 2.5507684582653e-05, "loss": 0.8248, "step": 9280 }, { "epoch": 1.48, "grad_norm": 1.2204557657241821, "learning_rate": 2.5486794910531458e-05, "loss": 0.6296, "step": 9285 }, { "epoch": 1.48, "grad_norm": 0.8152850270271301, "learning_rate": 2.5465904898392564e-05, "loss": 0.7962, "step": 9290 }, { "epoch": 1.48, "grad_norm": 0.834222137928009, "learning_rate": 2.5445014560827578e-05, "loss": 0.7165, "step": 9295 }, { "epoch": 1.48, "grad_norm": 0.7482254505157471, "learning_rate": 2.5424123912428054e-05, "loss": 0.7926, "step": 9300 }, { "epoch": 1.48, "eval_loss": 0.6945102214813232, "eval_runtime": 96.5254, "eval_samples_per_second": 7.221, "eval_steps_per_second": 7.221, "step": 9300 }, { "epoch": 1.49, "grad_norm": 0.741940975189209, "learning_rate": 2.5403232967785705e-05, "loss": 0.5465, "step": 9305 }, { "epoch": 1.49, "grad_norm": 0.8186244368553162, "learning_rate": 2.5382341741492494e-05, "loss": 0.6283, "step": 9310 }, { "epoch": 1.49, "grad_norm": 0.7384134531021118, "learning_rate": 2.536145024814054e-05, "loss": 0.6346, "step": 9315 }, { "epoch": 1.49, "grad_norm": 0.6944209337234497, "learning_rate": 2.5340558502322187e-05, "loss": 0.561, "step": 9320 }, { "epoch": 1.49, "grad_norm": 0.6684926152229309, "learning_rate": 2.531966651862993e-05, "loss": 0.6977, "step": 9325 }, { "epoch": 1.49, "grad_norm": 0.822957456111908, "learning_rate": 2.5298774311656443e-05, "loss": 0.618, "step": 9330 }, { "epoch": 1.49, "grad_norm": 1.0287977457046509, "learning_rate": 2.5277881895994547e-05, "loss": 0.6704, "step": 9335 }, { "epoch": 1.49, "grad_norm": 0.7576120495796204, "learning_rate": 2.5256989286237216e-05, "loss": 0.5916, "step": 9340 }, { "epoch": 1.49, "grad_norm": 0.6524910926818848, "learning_rate": 2.5236096496977564e-05, "loss": 0.6364, "step": 9345 }, { "epoch": 1.49, "grad_norm": 0.776617705821991, "learning_rate": 2.5215203542808803e-05, "loss": 0.5652, "step": 9350 }, { "epoch": 1.49, "grad_norm": 0.645124614238739, "learning_rate": 2.5194310438324298e-05, "loss": 0.7498, "step": 9355 }, { "epoch": 1.49, "grad_norm": 1.037840723991394, "learning_rate": 2.5173417198117498e-05, "loss": 0.6456, "step": 9360 }, { "epoch": 1.49, "grad_norm": 0.8559253215789795, "learning_rate": 2.515252383678195e-05, "loss": 0.8085, "step": 9365 }, { "epoch": 1.5, "grad_norm": 0.9479286670684814, "learning_rate": 2.513163036891128e-05, "loss": 0.8607, "step": 9370 }, { "epoch": 1.5, "grad_norm": 0.6962096691131592, "learning_rate": 2.51107368090992e-05, "loss": 0.7528, "step": 9375 }, { "epoch": 1.5, "grad_norm": 0.7785203456878662, "learning_rate": 2.508984317193948e-05, "loss": 0.508, "step": 9380 }, { "epoch": 1.5, "grad_norm": 0.7632963061332703, "learning_rate": 2.5068949472025943e-05, "loss": 0.7146, "step": 9385 }, { "epoch": 1.5, "grad_norm": 0.7676249742507935, "learning_rate": 2.504805572395245e-05, "loss": 0.7835, "step": 9390 }, { "epoch": 1.5, "grad_norm": 0.6752475500106812, "learning_rate": 2.5027161942312922e-05, "loss": 0.6139, "step": 9395 }, { "epoch": 1.5, "grad_norm": 0.7635406255722046, "learning_rate": 2.5006268141701274e-05, "loss": 0.6019, "step": 9400 }, { "epoch": 1.5, "eval_loss": 0.6944969296455383, "eval_runtime": 96.5995, "eval_samples_per_second": 7.215, "eval_steps_per_second": 7.215, "step": 9400 }, { "epoch": 1.5, "grad_norm": 0.8899182081222534, "learning_rate": 2.4985374336711438e-05, "loss": 0.738, "step": 9405 }, { "epoch": 1.5, "grad_norm": 1.17216157913208, "learning_rate": 2.496448054193735e-05, "loss": 0.6101, "step": 9410 }, { "epoch": 1.5, "grad_norm": 0.8314846754074097, "learning_rate": 2.4943586771972965e-05, "loss": 0.69, "step": 9415 }, { "epoch": 1.5, "grad_norm": 0.7142031192779541, "learning_rate": 2.4922693041412202e-05, "loss": 0.5057, "step": 9420 }, { "epoch": 1.5, "grad_norm": 1.2739388942718506, "learning_rate": 2.4901799364848925e-05, "loss": 0.4485, "step": 9425 }, { "epoch": 1.51, "grad_norm": 0.9823365211486816, "learning_rate": 2.4880905756877008e-05, "loss": 0.7312, "step": 9430 }, { "epoch": 1.51, "grad_norm": 1.3676027059555054, "learning_rate": 2.486001223209024e-05, "loss": 0.6295, "step": 9435 }, { "epoch": 1.51, "grad_norm": 0.8690130710601807, "learning_rate": 2.4839118805082377e-05, "loss": 0.6172, "step": 9440 }, { "epoch": 1.51, "grad_norm": 0.7541719079017639, "learning_rate": 2.4818225490447097e-05, "loss": 0.6258, "step": 9445 }, { "epoch": 1.51, "grad_norm": 0.6225134134292603, "learning_rate": 2.4797332302777993e-05, "loss": 0.5882, "step": 9450 }, { "epoch": 1.51, "grad_norm": 0.886218786239624, "learning_rate": 2.4776439256668576e-05, "loss": 0.5042, "step": 9455 }, { "epoch": 1.51, "grad_norm": 0.7720217108726501, "learning_rate": 2.4755546366712272e-05, "loss": 0.8231, "step": 9460 }, { "epoch": 1.51, "grad_norm": 0.7719532251358032, "learning_rate": 2.4734653647502363e-05, "loss": 0.5943, "step": 9465 }, { "epoch": 1.51, "grad_norm": 0.7584754228591919, "learning_rate": 2.4713761113632032e-05, "loss": 0.6572, "step": 9470 }, { "epoch": 1.51, "grad_norm": 0.8645241856575012, "learning_rate": 2.4692868779694358e-05, "loss": 0.6461, "step": 9475 }, { "epoch": 1.51, "grad_norm": 0.8362308740615845, "learning_rate": 2.467197666028223e-05, "loss": 0.8308, "step": 9480 }, { "epoch": 1.51, "grad_norm": 0.881805956363678, "learning_rate": 2.465108476998842e-05, "loss": 0.84, "step": 9485 }, { "epoch": 1.51, "grad_norm": 0.7092866897583008, "learning_rate": 2.4630193123405542e-05, "loss": 0.6114, "step": 9490 }, { "epoch": 1.52, "grad_norm": 0.6957134008407593, "learning_rate": 2.4609301735126015e-05, "loss": 0.7143, "step": 9495 }, { "epoch": 1.52, "grad_norm": 1.1079517602920532, "learning_rate": 2.45884106197421e-05, "loss": 0.6707, "step": 9500 }, { "epoch": 1.52, "eval_loss": 0.694333553314209, "eval_runtime": 96.6934, "eval_samples_per_second": 7.208, "eval_steps_per_second": 7.208, "step": 9500 }, { "epoch": 1.52, "grad_norm": 0.9612390398979187, "learning_rate": 2.4567519791845857e-05, "loss": 0.776, "step": 9505 }, { "epoch": 1.52, "grad_norm": 1.011856198310852, "learning_rate": 2.4546629266029155e-05, "loss": 0.8204, "step": 9510 }, { "epoch": 1.52, "grad_norm": 0.9229696393013, "learning_rate": 2.4525739056883644e-05, "loss": 0.7631, "step": 9515 }, { "epoch": 1.52, "grad_norm": 0.7902365922927856, "learning_rate": 2.4504849179000746e-05, "loss": 0.7414, "step": 9520 }, { "epoch": 1.52, "grad_norm": 0.5655250549316406, "learning_rate": 2.448395964697166e-05, "loss": 0.7102, "step": 9525 }, { "epoch": 1.52, "grad_norm": 0.6864548921585083, "learning_rate": 2.4463070475387353e-05, "loss": 0.6807, "step": 9530 }, { "epoch": 1.52, "grad_norm": 0.9175466299057007, "learning_rate": 2.444218167883852e-05, "loss": 0.6063, "step": 9535 }, { "epoch": 1.52, "grad_norm": 0.7826316356658936, "learning_rate": 2.4421293271915608e-05, "loss": 0.8181, "step": 9540 }, { "epoch": 1.52, "grad_norm": 0.6215065121650696, "learning_rate": 2.4400405269208797e-05, "loss": 0.5949, "step": 9545 }, { "epoch": 1.52, "grad_norm": 0.8049483895301819, "learning_rate": 2.437951768530796e-05, "loss": 0.6341, "step": 9550 }, { "epoch": 1.53, "grad_norm": 0.45780548453330994, "learning_rate": 2.43586305348027e-05, "loss": 1.0438, "step": 9555 }, { "epoch": 1.53, "grad_norm": 0.7412901520729065, "learning_rate": 2.4337743832282323e-05, "loss": 0.7247, "step": 9560 }, { "epoch": 1.53, "grad_norm": 1.0728024244308472, "learning_rate": 2.431685759233579e-05, "loss": 0.5836, "step": 9565 }, { "epoch": 1.53, "grad_norm": 1.903080701828003, "learning_rate": 2.4295971829551766e-05, "loss": 0.8739, "step": 9570 }, { "epoch": 1.53, "grad_norm": 0.8286559581756592, "learning_rate": 2.427508655851859e-05, "loss": 0.8085, "step": 9575 }, { "epoch": 1.53, "grad_norm": 0.8452445864677429, "learning_rate": 2.4254201793824216e-05, "loss": 0.749, "step": 9580 }, { "epoch": 1.53, "grad_norm": 0.992088258266449, "learning_rate": 2.423331755005629e-05, "loss": 0.635, "step": 9585 }, { "epoch": 1.53, "grad_norm": 0.5679229497909546, "learning_rate": 2.4212433841802067e-05, "loss": 0.8887, "step": 9590 }, { "epoch": 1.53, "grad_norm": 0.7643200159072876, "learning_rate": 2.419155068364844e-05, "loss": 0.7613, "step": 9595 }, { "epoch": 1.53, "grad_norm": 0.7898627519607544, "learning_rate": 2.4170668090181915e-05, "loss": 0.7269, "step": 9600 }, { "epoch": 1.53, "eval_loss": 0.6939559578895569, "eval_runtime": 96.732, "eval_samples_per_second": 7.205, "eval_steps_per_second": 7.205, "step": 9600 }, { "epoch": 1.53, "grad_norm": 0.8673202395439148, "learning_rate": 2.4149786075988602e-05, "loss": 0.8336, "step": 9605 }, { "epoch": 1.53, "grad_norm": 0.8820798993110657, "learning_rate": 2.41289046556542e-05, "loss": 0.631, "step": 9610 }, { "epoch": 1.53, "grad_norm": 0.9562051296234131, "learning_rate": 2.410802384376402e-05, "loss": 0.637, "step": 9615 }, { "epoch": 1.54, "grad_norm": 0.8073970675468445, "learning_rate": 2.4087143654902893e-05, "loss": 0.7949, "step": 9620 }, { "epoch": 1.54, "grad_norm": 0.629291296005249, "learning_rate": 2.4066264103655283e-05, "loss": 0.8292, "step": 9625 }, { "epoch": 1.54, "grad_norm": 0.6791005730628967, "learning_rate": 2.404538520460517e-05, "loss": 0.867, "step": 9630 }, { "epoch": 1.54, "grad_norm": 0.8496805429458618, "learning_rate": 2.402450697233607e-05, "loss": 0.7857, "step": 9635 }, { "epoch": 1.54, "grad_norm": 0.7444857954978943, "learning_rate": 2.4003629421431052e-05, "loss": 0.658, "step": 9640 }, { "epoch": 1.54, "grad_norm": 1.0270099639892578, "learning_rate": 2.398275256647272e-05, "loss": 0.6766, "step": 9645 }, { "epoch": 1.54, "grad_norm": 0.839475691318512, "learning_rate": 2.3961876422043154e-05, "loss": 0.5771, "step": 9650 }, { "epoch": 1.54, "grad_norm": 0.7087832689285278, "learning_rate": 2.394100100272397e-05, "loss": 0.7797, "step": 9655 }, { "epoch": 1.54, "grad_norm": 0.8930874466896057, "learning_rate": 2.3920126323096275e-05, "loss": 0.7951, "step": 9660 }, { "epoch": 1.54, "grad_norm": 1.2568659782409668, "learning_rate": 2.389925239774064e-05, "loss": 0.7785, "step": 9665 }, { "epoch": 1.54, "grad_norm": 1.0711442232131958, "learning_rate": 2.3878379241237136e-05, "loss": 0.974, "step": 9670 }, { "epoch": 1.54, "grad_norm": 0.7370679974555969, "learning_rate": 2.385750686816526e-05, "loss": 0.7111, "step": 9675 }, { "epoch": 1.55, "grad_norm": 0.8300942182540894, "learning_rate": 2.384080954357563e-05, "loss": 0.9931, "step": 9680 }, { "epoch": 1.55, "grad_norm": 0.7908097505569458, "learning_rate": 2.3819938617419347e-05, "loss": 0.729, "step": 9685 }, { "epoch": 1.55, "grad_norm": 0.8966140747070312, "learning_rate": 2.379906851551441e-05, "loss": 0.7695, "step": 9690 }, { "epoch": 1.55, "grad_norm": 0.5384413003921509, "learning_rate": 2.3778199252438203e-05, "loss": 0.6235, "step": 9695 }, { "epoch": 1.55, "grad_norm": 0.7455211877822876, "learning_rate": 2.3757330842767546e-05, "loss": 0.7216, "step": 9700 }, { "epoch": 1.55, "eval_loss": 0.6923454999923706, "eval_runtime": 96.5713, "eval_samples_per_second": 7.217, "eval_steps_per_second": 7.217, "step": 9700 }, { "epoch": 1.55, "grad_norm": 1.0986971855163574, "learning_rate": 2.3736463301078614e-05, "loss": 0.6697, "step": 9705 }, { "epoch": 1.55, "grad_norm": 0.8946139216423035, "learning_rate": 2.3715596641947022e-05, "loss": 0.6864, "step": 9710 }, { "epoch": 1.55, "grad_norm": 0.8130088448524475, "learning_rate": 2.3694730879947754e-05, "loss": 0.6585, "step": 9715 }, { "epoch": 1.55, "grad_norm": 0.8894361853599548, "learning_rate": 2.367386602965515e-05, "loss": 0.6961, "step": 9720 }, { "epoch": 1.55, "grad_norm": 0.7765643000602722, "learning_rate": 2.3653002105642922e-05, "loss": 0.6075, "step": 9725 }, { "epoch": 1.55, "grad_norm": 0.9015631079673767, "learning_rate": 2.3632139122484173e-05, "loss": 0.6439, "step": 9730 }, { "epoch": 1.55, "grad_norm": 1.1747032403945923, "learning_rate": 2.3611277094751283e-05, "loss": 0.6192, "step": 9735 }, { "epoch": 1.55, "grad_norm": 0.7363355159759521, "learning_rate": 2.359041603701601e-05, "loss": 1.036, "step": 9740 }, { "epoch": 1.56, "grad_norm": 0.9469441771507263, "learning_rate": 2.356955596384943e-05, "loss": 0.6255, "step": 9745 }, { "epoch": 1.56, "grad_norm": 0.9195448756217957, "learning_rate": 2.3548696889821907e-05, "loss": 0.7055, "step": 9750 }, { "epoch": 1.56, "grad_norm": 1.0644798278808594, "learning_rate": 2.352783882950315e-05, "loss": 0.7532, "step": 9755 }, { "epoch": 1.56, "grad_norm": 0.6971336603164673, "learning_rate": 2.3506981797462094e-05, "loss": 0.5817, "step": 9760 }, { "epoch": 1.56, "grad_norm": 0.7396060824394226, "learning_rate": 2.3486125808267025e-05, "loss": 0.6482, "step": 9765 }, { "epoch": 1.56, "grad_norm": 0.7229952216148376, "learning_rate": 2.3465270876485474e-05, "loss": 0.7125, "step": 9770 }, { "epoch": 1.56, "grad_norm": 0.7994909286499023, "learning_rate": 2.344441701668421e-05, "loss": 0.5666, "step": 9775 }, { "epoch": 1.56, "grad_norm": 0.6988499760627747, "learning_rate": 2.3423564243429282e-05, "loss": 0.8044, "step": 9780 }, { "epoch": 1.56, "grad_norm": 0.8466933369636536, "learning_rate": 2.340271257128598e-05, "loss": 0.6553, "step": 9785 }, { "epoch": 1.56, "grad_norm": 0.5719871520996094, "learning_rate": 2.3381862014818807e-05, "loss": 0.7292, "step": 9790 }, { "epoch": 1.56, "grad_norm": 0.9753498435020447, "learning_rate": 2.33610125885915e-05, "loss": 0.8397, "step": 9795 }, { "epoch": 1.56, "grad_norm": 0.608966052532196, "learning_rate": 2.3340164307167007e-05, "loss": 0.6394, "step": 9800 }, { "epoch": 1.56, "eval_loss": 0.6920195817947388, "eval_runtime": 96.5737, "eval_samples_per_second": 7.217, "eval_steps_per_second": 7.217, "step": 9800 }, { "epoch": 1.57, "grad_norm": 1.151438593864441, "learning_rate": 2.331931718510746e-05, "loss": 0.6101, "step": 9805 }, { "epoch": 1.57, "grad_norm": 0.9338546395301819, "learning_rate": 2.3298471236974202e-05, "loss": 0.8272, "step": 9810 }, { "epoch": 1.57, "grad_norm": 1.1721985340118408, "learning_rate": 2.327762647732776e-05, "loss": 0.6039, "step": 9815 }, { "epoch": 1.57, "grad_norm": 0.8202813863754272, "learning_rate": 2.3256782920727787e-05, "loss": 0.6222, "step": 9820 }, { "epoch": 1.57, "grad_norm": 0.5816913843154907, "learning_rate": 2.323594058173315e-05, "loss": 0.6952, "step": 9825 }, { "epoch": 1.57, "grad_norm": 1.4169448614120483, "learning_rate": 2.321509947490185e-05, "loss": 0.7452, "step": 9830 }, { "epoch": 1.57, "grad_norm": 0.9609310626983643, "learning_rate": 2.3194259614790998e-05, "loss": 0.6724, "step": 9835 }, { "epoch": 1.57, "grad_norm": 0.7673792243003845, "learning_rate": 2.317342101595687e-05, "loss": 0.9891, "step": 9840 }, { "epoch": 1.57, "grad_norm": 0.8118376135826111, "learning_rate": 2.3152583692954835e-05, "loss": 0.6174, "step": 9845 }, { "epoch": 1.57, "grad_norm": 0.775577187538147, "learning_rate": 2.3131747660339394e-05, "loss": 0.6912, "step": 9850 }, { "epoch": 1.57, "grad_norm": 0.860774040222168, "learning_rate": 2.3110912932664138e-05, "loss": 0.8078, "step": 9855 }, { "epoch": 1.57, "grad_norm": 0.7636804580688477, "learning_rate": 2.3090079524481734e-05, "loss": 0.6305, "step": 9860 }, { "epoch": 1.57, "grad_norm": 1.1903213262557983, "learning_rate": 2.3069247450343943e-05, "loss": 0.7058, "step": 9865 }, { "epoch": 1.58, "grad_norm": 0.9413545727729797, "learning_rate": 2.30484167248016e-05, "loss": 0.7658, "step": 9870 }, { "epoch": 1.58, "grad_norm": 0.8364406228065491, "learning_rate": 2.3027587362404558e-05, "loss": 0.6607, "step": 9875 }, { "epoch": 1.58, "grad_norm": 1.1050459146499634, "learning_rate": 2.3006759377701765e-05, "loss": 0.8299, "step": 9880 }, { "epoch": 1.58, "grad_norm": 0.7848144173622131, "learning_rate": 2.2985932785241203e-05, "loss": 0.7717, "step": 9885 }, { "epoch": 1.58, "grad_norm": 0.9436622858047485, "learning_rate": 2.2965107599569836e-05, "loss": 0.8987, "step": 9890 }, { "epoch": 1.58, "grad_norm": 0.8566385507583618, "learning_rate": 2.2944283835233697e-05, "loss": 0.6131, "step": 9895 }, { "epoch": 1.58, "grad_norm": 0.8907533288002014, "learning_rate": 2.29234615067778e-05, "loss": 0.7608, "step": 9900 }, { "epoch": 1.58, "eval_loss": 0.6909372210502625, "eval_runtime": 96.6101, "eval_samples_per_second": 7.215, "eval_steps_per_second": 7.215, "step": 9900 }, { "epoch": 1.58, "grad_norm": 0.7827823758125305, "learning_rate": 2.290264062874616e-05, "loss": 0.7079, "step": 9905 }, { "epoch": 1.58, "grad_norm": 1.2197152376174927, "learning_rate": 2.288182121568178e-05, "loss": 0.9356, "step": 9910 }, { "epoch": 1.58, "grad_norm": 0.8879411816596985, "learning_rate": 2.286100328212665e-05, "loss": 0.6318, "step": 9915 }, { "epoch": 1.58, "grad_norm": 0.6319537162780762, "learning_rate": 2.28401868426217e-05, "loss": 0.5469, "step": 9920 }, { "epoch": 1.58, "grad_norm": 1.0840219259262085, "learning_rate": 2.281937191170686e-05, "loss": 0.5847, "step": 9925 }, { "epoch": 1.58, "grad_norm": 0.4195910692214966, "learning_rate": 2.2798558503920948e-05, "loss": 0.7168, "step": 9930 }, { "epoch": 1.59, "grad_norm": 1.0517491102218628, "learning_rate": 2.2777746633801764e-05, "loss": 0.5673, "step": 9935 }, { "epoch": 1.59, "grad_norm": 0.8244277834892273, "learning_rate": 2.2756936315886024e-05, "loss": 0.7266, "step": 9940 }, { "epoch": 1.59, "grad_norm": 0.8401049375534058, "learning_rate": 2.2736127564709346e-05, "loss": 0.8369, "step": 9945 }, { "epoch": 1.59, "grad_norm": 0.9072527289390564, "learning_rate": 2.2715320394806265e-05, "loss": 0.664, "step": 9950 }, { "epoch": 1.59, "grad_norm": 0.6931943893432617, "learning_rate": 2.269451482071022e-05, "loss": 1.0078, "step": 9955 }, { "epoch": 1.59, "grad_norm": 0.7260577082633972, "learning_rate": 2.2673710856953512e-05, "loss": 0.5779, "step": 9960 }, { "epoch": 1.59, "grad_norm": 0.784848690032959, "learning_rate": 2.2652908518067336e-05, "loss": 0.7065, "step": 9965 }, { "epoch": 1.59, "grad_norm": 0.9373456239700317, "learning_rate": 2.2632107818581757e-05, "loss": 0.8002, "step": 9970 }, { "epoch": 1.59, "grad_norm": 0.6380440592765808, "learning_rate": 2.261130877302566e-05, "loss": 0.7027, "step": 9975 }, { "epoch": 1.59, "grad_norm": 0.6546103954315186, "learning_rate": 2.259051139592681e-05, "loss": 0.6839, "step": 9980 }, { "epoch": 1.59, "grad_norm": 0.6351701021194458, "learning_rate": 2.2569715701811814e-05, "loss": 0.7078, "step": 9985 }, { "epoch": 1.59, "grad_norm": 0.8696810603141785, "learning_rate": 2.2548921705206057e-05, "loss": 0.7586, "step": 9990 }, { "epoch": 1.6, "grad_norm": 0.7104160785675049, "learning_rate": 2.2528129420633785e-05, "loss": 0.7514, "step": 9995 }, { "epoch": 1.6, "grad_norm": 0.8413506746292114, "learning_rate": 2.2507338862618012e-05, "loss": 1.034, "step": 10000 }, { "epoch": 1.6, "eval_loss": 0.6908321380615234, "eval_runtime": 96.5988, "eval_samples_per_second": 7.215, "eval_steps_per_second": 7.215, "step": 10000 }, { "epoch": 1.6, "grad_norm": 0.9034035205841064, "learning_rate": 2.2486550045680577e-05, "loss": 0.5622, "step": 10005 }, { "epoch": 1.6, "grad_norm": 0.9193819165229797, "learning_rate": 2.246576298434209e-05, "loss": 0.8383, "step": 10010 }, { "epoch": 1.6, "grad_norm": 0.9977108836174011, "learning_rate": 2.244497769312193e-05, "loss": 0.761, "step": 10015 }, { "epoch": 1.6, "grad_norm": 1.0438766479492188, "learning_rate": 2.242419418653824e-05, "loss": 0.7652, "step": 10020 }, { "epoch": 1.6, "grad_norm": 0.755634069442749, "learning_rate": 2.2403412479107936e-05, "loss": 0.653, "step": 10025 }, { "epoch": 1.6, "grad_norm": 0.7939738631248474, "learning_rate": 2.2382632585346637e-05, "loss": 0.7207, "step": 10030 }, { "epoch": 1.6, "grad_norm": 0.7428807020187378, "learning_rate": 2.236185451976873e-05, "loss": 0.8838, "step": 10035 }, { "epoch": 1.6, "grad_norm": 1.3207485675811768, "learning_rate": 2.2341078296887337e-05, "loss": 0.7495, "step": 10040 }, { "epoch": 1.6, "grad_norm": 0.7260899543762207, "learning_rate": 2.2320303931214238e-05, "loss": 0.7531, "step": 10045 }, { "epoch": 1.6, "grad_norm": 0.6951782703399658, "learning_rate": 2.2299531437259956e-05, "loss": 0.8503, "step": 10050 }, { "epoch": 1.6, "grad_norm": 1.0433266162872314, "learning_rate": 2.227876082953372e-05, "loss": 0.6963, "step": 10055 }, { "epoch": 1.61, "grad_norm": 0.798190712928772, "learning_rate": 2.225799212254339e-05, "loss": 0.5295, "step": 10060 }, { "epoch": 1.61, "grad_norm": 0.8649576902389526, "learning_rate": 2.2237225330795554e-05, "loss": 0.5867, "step": 10065 }, { "epoch": 1.61, "grad_norm": 0.7552849054336548, "learning_rate": 2.2216460468795427e-05, "loss": 0.9495, "step": 10070 }, { "epoch": 1.61, "grad_norm": 0.8135244846343994, "learning_rate": 2.2195697551046886e-05, "loss": 0.6632, "step": 10075 }, { "epoch": 1.61, "grad_norm": 0.710469126701355, "learning_rate": 2.2174936592052457e-05, "loss": 0.5685, "step": 10080 }, { "epoch": 1.61, "grad_norm": 1.0271815061569214, "learning_rate": 2.2154177606313278e-05, "loss": 0.6192, "step": 10085 }, { "epoch": 1.61, "grad_norm": 3.2714290618896484, "learning_rate": 2.2133420608329127e-05, "loss": 0.7053, "step": 10090 }, { "epoch": 1.61, "grad_norm": 0.8458003401756287, "learning_rate": 2.21126656125984e-05, "loss": 0.7253, "step": 10095 }, { "epoch": 1.61, "grad_norm": 1.1373984813690186, "learning_rate": 2.2091912633618064e-05, "loss": 0.7934, "step": 10100 }, { "epoch": 1.61, "eval_loss": 0.6892163157463074, "eval_runtime": 96.5872, "eval_samples_per_second": 7.216, "eval_steps_per_second": 7.216, "step": 10100 }, { "epoch": 1.61, "grad_norm": 0.673150897026062, "learning_rate": 2.2071161685883706e-05, "loss": 0.733, "step": 10105 }, { "epoch": 1.61, "grad_norm": 0.8580716848373413, "learning_rate": 2.205041278388949e-05, "loss": 0.611, "step": 10110 }, { "epoch": 1.61, "grad_norm": 0.7604095339775085, "learning_rate": 2.2029665942128138e-05, "loss": 0.7519, "step": 10115 }, { "epoch": 1.62, "grad_norm": 0.7221022844314575, "learning_rate": 2.2008921175090942e-05, "loss": 0.624, "step": 10120 }, { "epoch": 1.62, "grad_norm": 0.7888193130493164, "learning_rate": 2.198817849726775e-05, "loss": 0.7533, "step": 10125 }, { "epoch": 1.62, "grad_norm": 1.003176212310791, "learning_rate": 2.196743792314693e-05, "loss": 0.7613, "step": 10130 }, { "epoch": 1.62, "grad_norm": 0.641484260559082, "learning_rate": 2.19466994672154e-05, "loss": 0.5488, "step": 10135 }, { "epoch": 1.62, "grad_norm": 0.6598163843154907, "learning_rate": 2.192596314395861e-05, "loss": 0.7641, "step": 10140 }, { "epoch": 1.62, "grad_norm": 0.9290664792060852, "learning_rate": 2.190522896786048e-05, "loss": 0.6256, "step": 10145 }, { "epoch": 1.62, "grad_norm": 0.5635184049606323, "learning_rate": 2.1884496953403466e-05, "loss": 0.8748, "step": 10150 }, { "epoch": 1.62, "grad_norm": 0.88919997215271, "learning_rate": 2.1863767115068496e-05, "loss": 0.8393, "step": 10155 }, { "epoch": 1.62, "grad_norm": 0.7918489575386047, "learning_rate": 2.1843039467334986e-05, "loss": 0.8075, "step": 10160 }, { "epoch": 1.62, "grad_norm": 0.9956469535827637, "learning_rate": 2.1822314024680823e-05, "loss": 0.7176, "step": 10165 }, { "epoch": 1.62, "grad_norm": 0.8222232460975647, "learning_rate": 2.1801590801582346e-05, "loss": 0.7122, "step": 10170 }, { "epoch": 1.62, "grad_norm": 0.5581789016723633, "learning_rate": 2.178086981251435e-05, "loss": 0.6472, "step": 10175 }, { "epoch": 1.62, "grad_norm": 0.9564967155456543, "learning_rate": 2.176015107195008e-05, "loss": 0.5325, "step": 10180 }, { "epoch": 1.63, "grad_norm": 0.6632594466209412, "learning_rate": 2.1739434594361176e-05, "loss": 0.559, "step": 10185 }, { "epoch": 1.63, "grad_norm": 0.7242661714553833, "learning_rate": 2.1718720394217727e-05, "loss": 0.7142, "step": 10190 }, { "epoch": 1.63, "grad_norm": 1.2573809623718262, "learning_rate": 2.169800848598824e-05, "loss": 0.611, "step": 10195 }, { "epoch": 1.63, "grad_norm": 0.5467453598976135, "learning_rate": 2.1677298884139585e-05, "loss": 0.627, "step": 10200 }, { "epoch": 1.63, "eval_loss": 0.6901981234550476, "eval_runtime": 96.63, "eval_samples_per_second": 7.213, "eval_steps_per_second": 7.213, "step": 10200 }, { "epoch": 1.63, "grad_norm": 0.7206533551216125, "learning_rate": 2.165659160313705e-05, "loss": 0.6473, "step": 10205 }, { "epoch": 1.63, "grad_norm": 1.0876400470733643, "learning_rate": 2.1635886657444293e-05, "loss": 0.7045, "step": 10210 }, { "epoch": 1.63, "grad_norm": 0.7485154271125793, "learning_rate": 2.1615184061523336e-05, "loss": 0.7456, "step": 10215 }, { "epoch": 1.63, "grad_norm": 0.7333823442459106, "learning_rate": 2.1594483829834568e-05, "loss": 0.8361, "step": 10220 }, { "epoch": 1.63, "grad_norm": 0.7463033199310303, "learning_rate": 2.1573785976836734e-05, "loss": 0.8167, "step": 10225 }, { "epoch": 1.63, "grad_norm": 6.400440216064453, "learning_rate": 2.1553090516986886e-05, "loss": 0.6687, "step": 10230 }, { "epoch": 1.63, "grad_norm": 1.0179191827774048, "learning_rate": 2.1532397464740445e-05, "loss": 0.7998, "step": 10235 }, { "epoch": 1.63, "grad_norm": 0.9365726113319397, "learning_rate": 2.1511706834551116e-05, "loss": 0.5046, "step": 10240 }, { "epoch": 1.64, "grad_norm": 0.4680459797382355, "learning_rate": 2.149101864087093e-05, "loss": 0.5178, "step": 10245 }, { "epoch": 1.64, "grad_norm": 0.9137777090072632, "learning_rate": 2.1470332898150214e-05, "loss": 0.7502, "step": 10250 }, { "epoch": 1.64, "grad_norm": 0.7119225263595581, "learning_rate": 2.1449649620837582e-05, "loss": 0.5574, "step": 10255 }, { "epoch": 1.64, "grad_norm": 0.6330089569091797, "learning_rate": 2.142896882337993e-05, "loss": 0.6987, "step": 10260 }, { "epoch": 1.64, "grad_norm": 0.7967093586921692, "learning_rate": 2.1408290520222414e-05, "loss": 0.6488, "step": 10265 }, { "epoch": 1.64, "grad_norm": 0.8605963587760925, "learning_rate": 2.138761472580845e-05, "loss": 0.6619, "step": 10270 }, { "epoch": 1.64, "grad_norm": 0.8024405241012573, "learning_rate": 2.1366941454579702e-05, "loss": 0.7963, "step": 10275 }, { "epoch": 1.64, "grad_norm": 4.0838541984558105, "learning_rate": 2.1346270720976092e-05, "loss": 0.5748, "step": 10280 }, { "epoch": 1.64, "grad_norm": 1.021926999092102, "learning_rate": 2.1325602539435714e-05, "loss": 0.5239, "step": 10285 }, { "epoch": 1.64, "grad_norm": 0.521344006061554, "learning_rate": 2.130493692439494e-05, "loss": 0.4146, "step": 10290 }, { "epoch": 1.64, "grad_norm": 0.7001100182533264, "learning_rate": 2.1284273890288336e-05, "loss": 0.6906, "step": 10295 }, { "epoch": 1.64, "grad_norm": 1.6099287271499634, "learning_rate": 2.1263613451548632e-05, "loss": 0.5849, "step": 10300 }, { "epoch": 1.64, "eval_loss": 0.6897215247154236, "eval_runtime": 96.4715, "eval_samples_per_second": 7.225, "eval_steps_per_second": 7.225, "step": 10300 }, { "epoch": 1.64, "grad_norm": 1.2209889888763428, "learning_rate": 2.1242955622606774e-05, "loss": 0.6792, "step": 10305 }, { "epoch": 1.65, "grad_norm": 0.906180202960968, "learning_rate": 2.122230041789188e-05, "loss": 0.8933, "step": 10310 }, { "epoch": 1.65, "grad_norm": 0.9409615993499756, "learning_rate": 2.120164785183123e-05, "loss": 0.603, "step": 10315 }, { "epoch": 1.65, "grad_norm": 0.6952770352363586, "learning_rate": 2.1180997938850273e-05, "loss": 0.7578, "step": 10320 }, { "epoch": 1.65, "grad_norm": 1.0658044815063477, "learning_rate": 2.1160350693372587e-05, "loss": 0.7891, "step": 10325 }, { "epoch": 1.65, "grad_norm": 0.7723982930183411, "learning_rate": 2.11397061298199e-05, "loss": 0.5675, "step": 10330 }, { "epoch": 1.65, "grad_norm": 1.1305568218231201, "learning_rate": 2.1119064262612083e-05, "loss": 0.7208, "step": 10335 }, { "epoch": 1.65, "grad_norm": 1.2653555870056152, "learning_rate": 2.1098425106167065e-05, "loss": 0.7588, "step": 10340 }, { "epoch": 1.65, "grad_norm": 0.835446298122406, "learning_rate": 2.107778867490094e-05, "loss": 0.8545, "step": 10345 }, { "epoch": 1.65, "grad_norm": 0.9362479448318481, "learning_rate": 2.1057154983227896e-05, "loss": 0.7154, "step": 10350 }, { "epoch": 1.65, "grad_norm": 1.0777581930160522, "learning_rate": 2.103652404556016e-05, "loss": 0.7584, "step": 10355 }, { "epoch": 1.65, "grad_norm": 1.0760878324508667, "learning_rate": 2.101589587630808e-05, "loss": 0.981, "step": 10360 }, { "epoch": 1.65, "grad_norm": 0.9330127239227295, "learning_rate": 2.0995270489880058e-05, "loss": 0.509, "step": 10365 }, { "epoch": 1.66, "grad_norm": 0.9733738303184509, "learning_rate": 2.0974647900682546e-05, "loss": 0.7506, "step": 10370 }, { "epoch": 1.66, "grad_norm": 1.0248785018920898, "learning_rate": 2.0954028123120047e-05, "loss": 0.7752, "step": 10375 }, { "epoch": 1.66, "grad_norm": 1.0892130136489868, "learning_rate": 2.0933411171595098e-05, "loss": 0.6103, "step": 10380 }, { "epoch": 1.66, "grad_norm": 0.8532816767692566, "learning_rate": 2.091279706050827e-05, "loss": 0.8248, "step": 10385 }, { "epoch": 1.66, "grad_norm": 0.9162831902503967, "learning_rate": 2.0892185804258145e-05, "loss": 0.7611, "step": 10390 }, { "epoch": 1.66, "grad_norm": 0.7723273038864136, "learning_rate": 2.0871577417241293e-05, "loss": 0.6734, "step": 10395 }, { "epoch": 1.66, "grad_norm": 0.947600781917572, "learning_rate": 2.0850971913852308e-05, "loss": 0.7257, "step": 10400 }, { "epoch": 1.66, "eval_loss": 0.6888843774795532, "eval_runtime": 96.5312, "eval_samples_per_second": 7.22, "eval_steps_per_second": 7.22, "step": 10400 }, { "epoch": 1.66, "grad_norm": 0.8397891521453857, "learning_rate": 2.083036930848376e-05, "loss": 0.8541, "step": 10405 }, { "epoch": 1.66, "grad_norm": 0.7784748077392578, "learning_rate": 2.0809769615526185e-05, "loss": 0.4962, "step": 10410 }, { "epoch": 1.66, "grad_norm": 0.8108918070793152, "learning_rate": 2.0789172849368103e-05, "loss": 0.708, "step": 10415 }, { "epoch": 1.66, "grad_norm": 0.8525465130805969, "learning_rate": 2.0768579024395978e-05, "loss": 0.6632, "step": 10420 }, { "epoch": 1.66, "grad_norm": 0.7665340900421143, "learning_rate": 2.0747988154994208e-05, "loss": 0.638, "step": 10425 }, { "epoch": 1.66, "grad_norm": 0.6820683479309082, "learning_rate": 2.0727400255545155e-05, "loss": 0.6338, "step": 10430 }, { "epoch": 1.67, "grad_norm": 0.9017329216003418, "learning_rate": 2.0706815340429088e-05, "loss": 0.67, "step": 10435 }, { "epoch": 1.67, "grad_norm": 0.9704810976982117, "learning_rate": 2.0686233424024184e-05, "loss": 0.6526, "step": 10440 }, { "epoch": 1.67, "grad_norm": 1.4057023525238037, "learning_rate": 2.0665654520706547e-05, "loss": 0.7137, "step": 10445 }, { "epoch": 1.67, "grad_norm": 0.7366592288017273, "learning_rate": 2.0645078644850174e-05, "loss": 0.6371, "step": 10450 }, { "epoch": 1.67, "grad_norm": 0.8077964782714844, "learning_rate": 2.062450581082692e-05, "loss": 0.766, "step": 10455 }, { "epoch": 1.67, "grad_norm": 1.060099482536316, "learning_rate": 2.0603936033006542e-05, "loss": 0.6473, "step": 10460 }, { "epoch": 1.67, "grad_norm": 1.1129028797149658, "learning_rate": 2.0583369325756656e-05, "loss": 0.6216, "step": 10465 }, { "epoch": 1.67, "grad_norm": 0.8062499761581421, "learning_rate": 2.0562805703442725e-05, "loss": 0.6669, "step": 10470 }, { "epoch": 1.67, "grad_norm": 0.8495035171508789, "learning_rate": 2.0542245180428078e-05, "loss": 0.6675, "step": 10475 }, { "epoch": 1.67, "grad_norm": 0.7025056481361389, "learning_rate": 2.052168777107385e-05, "loss": 0.7796, "step": 10480 }, { "epoch": 1.67, "grad_norm": 0.6635052561759949, "learning_rate": 2.050113348973902e-05, "loss": 0.6528, "step": 10485 }, { "epoch": 1.67, "grad_norm": 0.7034019827842712, "learning_rate": 2.04805823507804e-05, "loss": 0.6763, "step": 10490 }, { "epoch": 1.68, "grad_norm": 0.7599086165428162, "learning_rate": 2.0460034368552547e-05, "loss": 0.7538, "step": 10495 }, { "epoch": 1.68, "grad_norm": 0.7102855443954468, "learning_rate": 2.043948955740788e-05, "loss": 0.8931, "step": 10500 }, { "epoch": 1.68, "eval_loss": 0.6890013217926025, "eval_runtime": 96.4336, "eval_samples_per_second": 7.228, "eval_steps_per_second": 7.228, "step": 10500 }, { "epoch": 1.68, "grad_norm": 1.3262914419174194, "learning_rate": 2.0418947931696574e-05, "loss": 0.7715, "step": 10505 }, { "epoch": 1.68, "grad_norm": 0.8994144201278687, "learning_rate": 2.0398409505766567e-05, "loss": 0.5554, "step": 10510 }, { "epoch": 1.68, "grad_norm": 0.6956894993782043, "learning_rate": 2.037787429396358e-05, "loss": 0.8391, "step": 10515 }, { "epoch": 1.68, "grad_norm": 0.8718700408935547, "learning_rate": 2.035734231063109e-05, "loss": 1.0567, "step": 10520 }, { "epoch": 1.68, "grad_norm": 0.7642519474029541, "learning_rate": 2.03368135701103e-05, "loss": 0.6575, "step": 10525 }, { "epoch": 1.68, "grad_norm": 0.732898473739624, "learning_rate": 2.031628808674017e-05, "loss": 0.7843, "step": 10530 }, { "epoch": 1.68, "grad_norm": 0.915675699710846, "learning_rate": 2.0295765874857377e-05, "loss": 0.7512, "step": 10535 }, { "epoch": 1.68, "grad_norm": 0.7394465804100037, "learning_rate": 2.0275246948796307e-05, "loss": 0.8156, "step": 10540 }, { "epoch": 1.68, "grad_norm": 1.178054928779602, "learning_rate": 2.0254731322889067e-05, "loss": 0.6579, "step": 10545 }, { "epoch": 1.68, "grad_norm": 0.7813208103179932, "learning_rate": 2.0234219011465427e-05, "loss": 0.7751, "step": 10550 }, { "epoch": 1.68, "grad_norm": 1.0084145069122314, "learning_rate": 2.0213710028852866e-05, "loss": 0.7505, "step": 10555 }, { "epoch": 1.69, "grad_norm": 0.7699124217033386, "learning_rate": 2.0193204389376563e-05, "loss": 0.5412, "step": 10560 }, { "epoch": 1.69, "grad_norm": 0.727938175201416, "learning_rate": 2.0172702107359295e-05, "loss": 0.833, "step": 10565 }, { "epoch": 1.69, "grad_norm": 1.0906044244766235, "learning_rate": 2.0152203197121553e-05, "loss": 0.7903, "step": 10570 }, { "epoch": 1.69, "grad_norm": 0.8652496933937073, "learning_rate": 2.0131707672981448e-05, "loss": 0.6883, "step": 10575 }, { "epoch": 1.69, "grad_norm": 1.3314316272735596, "learning_rate": 2.011121554925473e-05, "loss": 0.6133, "step": 10580 }, { "epoch": 1.69, "grad_norm": 0.9101418256759644, "learning_rate": 2.0090726840254768e-05, "loss": 0.5496, "step": 10585 }, { "epoch": 1.69, "grad_norm": 0.730658769607544, "learning_rate": 2.007024156029256e-05, "loss": 0.6801, "step": 10590 }, { "epoch": 1.69, "grad_norm": 0.890649139881134, "learning_rate": 2.0049759723676693e-05, "loss": 0.8252, "step": 10595 }, { "epoch": 1.69, "grad_norm": 1.0672917366027832, "learning_rate": 2.0029281344713363e-05, "loss": 0.6831, "step": 10600 }, { "epoch": 1.69, "eval_loss": 0.6875470876693726, "eval_runtime": 96.3895, "eval_samples_per_second": 7.231, "eval_steps_per_second": 7.231, "step": 10600 }, { "epoch": 1.69, "grad_norm": 1.219469666481018, "learning_rate": 2.000880643770635e-05, "loss": 0.6488, "step": 10605 }, { "epoch": 1.69, "grad_norm": 0.8009731769561768, "learning_rate": 1.9988335016956982e-05, "loss": 0.9493, "step": 10610 }, { "epoch": 1.69, "grad_norm": 1.167803168296814, "learning_rate": 1.9967867096764185e-05, "loss": 0.6855, "step": 10615 }, { "epoch": 1.7, "grad_norm": 0.834506630897522, "learning_rate": 1.9947402691424437e-05, "loss": 0.76, "step": 10620 }, { "epoch": 1.7, "grad_norm": 0.4754391312599182, "learning_rate": 1.9926941815231735e-05, "loss": 0.7065, "step": 10625 }, { "epoch": 1.7, "grad_norm": 0.8641960024833679, "learning_rate": 1.9906484482477637e-05, "loss": 0.8709, "step": 10630 }, { "epoch": 1.7, "grad_norm": 0.6834285259246826, "learning_rate": 1.9886030707451216e-05, "loss": 0.7645, "step": 10635 }, { "epoch": 1.7, "grad_norm": 1.0572384595870972, "learning_rate": 1.9865580504439057e-05, "loss": 0.712, "step": 10640 }, { "epoch": 1.7, "grad_norm": 0.7760884761810303, "learning_rate": 1.9845133887725272e-05, "loss": 0.7224, "step": 10645 }, { "epoch": 1.7, "grad_norm": 0.8341572880744934, "learning_rate": 1.982469087159141e-05, "loss": 0.7119, "step": 10650 }, { "epoch": 1.7, "grad_norm": 0.8425217866897583, "learning_rate": 1.980425147031658e-05, "loss": 0.804, "step": 10655 }, { "epoch": 1.7, "grad_norm": 1.1502994298934937, "learning_rate": 1.978381569817733e-05, "loss": 0.6441, "step": 10660 }, { "epoch": 1.7, "grad_norm": 0.7200062870979309, "learning_rate": 1.9763383569447658e-05, "loss": 0.8235, "step": 10665 }, { "epoch": 1.7, "grad_norm": 0.6933602094650269, "learning_rate": 1.9742955098399036e-05, "loss": 0.5782, "step": 10670 }, { "epoch": 1.7, "grad_norm": 0.7740020751953125, "learning_rate": 1.972253029930039e-05, "loss": 0.6943, "step": 10675 }, { "epoch": 1.7, "grad_norm": 0.7301381826400757, "learning_rate": 1.9702109186418054e-05, "loss": 0.8713, "step": 10680 }, { "epoch": 1.71, "grad_norm": 0.805733323097229, "learning_rate": 1.968169177401581e-05, "loss": 0.6041, "step": 10685 }, { "epoch": 1.71, "grad_norm": 0.8800898194313049, "learning_rate": 1.9661278076354854e-05, "loss": 0.9356, "step": 10690 }, { "epoch": 1.71, "grad_norm": 0.8025446534156799, "learning_rate": 1.964086810769377e-05, "loss": 0.839, "step": 10695 }, { "epoch": 1.71, "grad_norm": 0.6204278469085693, "learning_rate": 1.9620461882288572e-05, "loss": 0.4995, "step": 10700 }, { "epoch": 1.71, "eval_loss": 0.6879023909568787, "eval_runtime": 96.4411, "eval_samples_per_second": 7.227, "eval_steps_per_second": 7.227, "step": 10700 }, { "epoch": 1.71, "grad_norm": 1.0396225452423096, "learning_rate": 1.96000594143926e-05, "loss": 0.7388, "step": 10705 }, { "epoch": 1.71, "grad_norm": 0.7833243608474731, "learning_rate": 1.9579660718256626e-05, "loss": 0.6724, "step": 10710 }, { "epoch": 1.71, "grad_norm": 0.9875903129577637, "learning_rate": 1.955926580812878e-05, "loss": 0.577, "step": 10715 }, { "epoch": 1.71, "grad_norm": 0.7285399436950684, "learning_rate": 1.953887469825451e-05, "loss": 0.6292, "step": 10720 }, { "epoch": 1.71, "grad_norm": 1.103204369544983, "learning_rate": 1.9518487402876643e-05, "loss": 0.7344, "step": 10725 }, { "epoch": 1.71, "grad_norm": 0.8033486008644104, "learning_rate": 1.949810393623534e-05, "loss": 0.7313, "step": 10730 }, { "epoch": 1.71, "grad_norm": 0.6469098925590515, "learning_rate": 1.947772431256807e-05, "loss": 0.5896, "step": 10735 }, { "epoch": 1.71, "grad_norm": 0.5645683407783508, "learning_rate": 1.9457348546109638e-05, "loss": 0.7893, "step": 10740 }, { "epoch": 1.72, "grad_norm": 0.5980925559997559, "learning_rate": 1.9436976651092144e-05, "loss": 0.6558, "step": 10745 }, { "epoch": 1.72, "grad_norm": 1.182881236076355, "learning_rate": 1.9416608641744977e-05, "loss": 0.7528, "step": 10750 }, { "epoch": 1.72, "grad_norm": 0.79257732629776, "learning_rate": 1.9396244532294823e-05, "loss": 1.1207, "step": 10755 }, { "epoch": 1.72, "grad_norm": 0.7459089159965515, "learning_rate": 1.9375884336965656e-05, "loss": 0.6823, "step": 10760 }, { "epoch": 1.72, "grad_norm": 0.8486272096633911, "learning_rate": 1.9355528069978675e-05, "loss": 0.7368, "step": 10765 }, { "epoch": 1.72, "grad_norm": 1.2372967004776, "learning_rate": 1.933517574555237e-05, "loss": 0.707, "step": 10770 }, { "epoch": 1.72, "grad_norm": 0.8635643720626831, "learning_rate": 1.931482737790249e-05, "loss": 0.8455, "step": 10775 }, { "epoch": 1.72, "grad_norm": 0.6724494099617004, "learning_rate": 1.929448298124197e-05, "loss": 0.616, "step": 10780 }, { "epoch": 1.72, "grad_norm": 0.951019287109375, "learning_rate": 1.927414256978102e-05, "loss": 0.6461, "step": 10785 }, { "epoch": 1.72, "grad_norm": 0.9326440691947937, "learning_rate": 1.9253806157727027e-05, "loss": 0.6675, "step": 10790 }, { "epoch": 1.72, "grad_norm": 0.6947630047798157, "learning_rate": 1.9233473759284614e-05, "loss": 0.6225, "step": 10795 }, { "epoch": 1.72, "grad_norm": 0.9178038239479065, "learning_rate": 1.9213145388655603e-05, "loss": 0.757, "step": 10800 }, { "epoch": 1.72, "eval_loss": 0.687288761138916, "eval_runtime": 96.4345, "eval_samples_per_second": 7.228, "eval_steps_per_second": 7.228, "step": 10800 }, { "epoch": 1.72, "grad_norm": 0.6267134547233582, "learning_rate": 1.9192821060038964e-05, "loss": 0.7879, "step": 10805 }, { "epoch": 1.73, "grad_norm": 1.0207151174545288, "learning_rate": 1.9172500787630886e-05, "loss": 0.5397, "step": 10810 }, { "epoch": 1.73, "grad_norm": 0.8256750702857971, "learning_rate": 1.915218458562472e-05, "loss": 0.735, "step": 10815 }, { "epoch": 1.73, "grad_norm": 1.3011645078659058, "learning_rate": 1.913187246821094e-05, "loss": 0.6131, "step": 10820 }, { "epoch": 1.73, "grad_norm": 0.767971396446228, "learning_rate": 1.911156444957719e-05, "loss": 0.6645, "step": 10825 }, { "epoch": 1.73, "grad_norm": 0.9583250284194946, "learning_rate": 1.909126054390827e-05, "loss": 0.6797, "step": 10830 }, { "epoch": 1.73, "grad_norm": 0.8778277635574341, "learning_rate": 1.9070960765386074e-05, "loss": 0.6432, "step": 10835 }, { "epoch": 1.73, "grad_norm": 0.7784696221351624, "learning_rate": 1.9050665128189622e-05, "loss": 0.6806, "step": 10840 }, { "epoch": 1.73, "grad_norm": 0.9121264219284058, "learning_rate": 1.9030373646495067e-05, "loss": 1.0989, "step": 10845 }, { "epoch": 1.73, "grad_norm": 1.8751630783081055, "learning_rate": 1.9010086334475616e-05, "loss": 0.7361, "step": 10850 }, { "epoch": 1.73, "grad_norm": 0.7736453413963318, "learning_rate": 1.898980320630161e-05, "loss": 0.6987, "step": 10855 }, { "epoch": 1.73, "grad_norm": 0.97115558385849, "learning_rate": 1.8969524276140416e-05, "loss": 0.6707, "step": 10860 }, { "epoch": 1.73, "grad_norm": 0.7803744673728943, "learning_rate": 1.8949249558156506e-05, "loss": 0.8237, "step": 10865 }, { "epoch": 1.74, "grad_norm": 0.9206936359405518, "learning_rate": 1.8928979066511423e-05, "loss": 0.6268, "step": 10870 }, { "epoch": 1.74, "grad_norm": 0.6118232011795044, "learning_rate": 1.890871281536371e-05, "loss": 0.5877, "step": 10875 }, { "epoch": 1.74, "grad_norm": 0.8835501670837402, "learning_rate": 1.888845081886898e-05, "loss": 0.6301, "step": 10880 }, { "epoch": 1.74, "grad_norm": 0.5582643747329712, "learning_rate": 1.8868193091179876e-05, "loss": 0.6052, "step": 10885 }, { "epoch": 1.74, "grad_norm": 0.4886070489883423, "learning_rate": 1.8847939646446038e-05, "loss": 0.7144, "step": 10890 }, { "epoch": 1.74, "grad_norm": 0.6952773928642273, "learning_rate": 1.8827690498814134e-05, "loss": 0.7458, "step": 10895 }, { "epoch": 1.74, "grad_norm": 0.6507378220558167, "learning_rate": 1.8807445662427834e-05, "loss": 0.4664, "step": 10900 }, { "epoch": 1.74, "eval_loss": 0.6876233220100403, "eval_runtime": 96.4641, "eval_samples_per_second": 7.225, "eval_steps_per_second": 7.225, "step": 10900 }, { "epoch": 1.74, "grad_norm": 0.8398704528808594, "learning_rate": 1.8787205151427772e-05, "loss": 0.8266, "step": 10905 }, { "epoch": 1.74, "grad_norm": 0.7542173862457275, "learning_rate": 1.876696897995158e-05, "loss": 0.5439, "step": 10910 }, { "epoch": 1.74, "grad_norm": 0.6264070272445679, "learning_rate": 1.8746737162133872e-05, "loss": 0.7945, "step": 10915 }, { "epoch": 1.74, "grad_norm": 0.7293385863304138, "learning_rate": 1.872650971210617e-05, "loss": 0.6127, "step": 10920 }, { "epoch": 1.74, "grad_norm": 0.7886928915977478, "learning_rate": 1.8706286643996997e-05, "loss": 0.5804, "step": 10925 }, { "epoch": 1.74, "grad_norm": 0.9824013710021973, "learning_rate": 1.8686067971931815e-05, "loss": 0.6223, "step": 10930 }, { "epoch": 1.75, "grad_norm": 0.8492375016212463, "learning_rate": 1.8665853710032966e-05, "loss": 0.8382, "step": 10935 }, { "epoch": 1.75, "grad_norm": 0.8610935211181641, "learning_rate": 1.8645643872419762e-05, "loss": 0.7523, "step": 10940 }, { "epoch": 1.75, "grad_norm": 0.7715781331062317, "learning_rate": 1.8625438473208397e-05, "loss": 0.6707, "step": 10945 }, { "epoch": 1.75, "grad_norm": 0.8448615670204163, "learning_rate": 1.8605237526511977e-05, "loss": 0.8098, "step": 10950 }, { "epoch": 1.75, "grad_norm": 0.867952823638916, "learning_rate": 1.8585041046440507e-05, "loss": 0.6537, "step": 10955 }, { "epoch": 1.75, "grad_norm": 0.8320883512496948, "learning_rate": 1.8564849047100832e-05, "loss": 0.5736, "step": 10960 }, { "epoch": 1.75, "grad_norm": 0.5284642577171326, "learning_rate": 1.854466154259672e-05, "loss": 0.6359, "step": 10965 }, { "epoch": 1.75, "grad_norm": 0.5998355150222778, "learning_rate": 1.8524478547028773e-05, "loss": 0.7487, "step": 10970 }, { "epoch": 1.75, "grad_norm": 0.9502187371253967, "learning_rate": 1.850430007449443e-05, "loss": 0.7806, "step": 10975 }, { "epoch": 1.75, "grad_norm": 0.6518775224685669, "learning_rate": 1.8484126139087993e-05, "loss": 0.7076, "step": 10980 }, { "epoch": 1.75, "grad_norm": 0.844935953617096, "learning_rate": 1.8463956754900595e-05, "loss": 0.6835, "step": 10985 }, { "epoch": 1.75, "grad_norm": 1.0043857097625732, "learning_rate": 1.8443791936020172e-05, "loss": 0.7642, "step": 10990 }, { "epoch": 1.75, "grad_norm": 0.7905507683753967, "learning_rate": 1.8423631696531492e-05, "loss": 0.7423, "step": 10995 }, { "epoch": 1.76, "grad_norm": 0.7718290090560913, "learning_rate": 1.840347605051611e-05, "loss": 0.78, "step": 11000 }, { "epoch": 1.76, "eval_loss": 0.6864747405052185, "eval_runtime": 96.4267, "eval_samples_per_second": 7.228, "eval_steps_per_second": 7.228, "step": 11000 } ], "logging_steps": 5, "max_steps": 18795, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "total_flos": 4.74708493467648e+17, "train_batch_size": 1, "trial_name": null, "trial_params": null }