|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 1005, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.029850746268656716, |
|
"grad_norm": 9.961305925410098, |
|
"learning_rate": 5e-06, |
|
"loss": 0.8067, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.05970149253731343, |
|
"grad_norm": 1.0752052367545273, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7212, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.08955223880597014, |
|
"grad_norm": 2.6880106756208657, |
|
"learning_rate": 5e-06, |
|
"loss": 0.7037, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11940298507462686, |
|
"grad_norm": 2.468677125810121, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6808, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14925373134328357, |
|
"grad_norm": 1.2259453137557428, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6673, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.1791044776119403, |
|
"grad_norm": 0.8046965924721278, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6673, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.208955223880597, |
|
"grad_norm": 0.8755706624782442, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6525, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.23880597014925373, |
|
"grad_norm": 0.9212844875335406, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6468, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.26865671641791045, |
|
"grad_norm": 1.12468469232501, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6393, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.29850746268656714, |
|
"grad_norm": 0.6121939679063821, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6349, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3283582089552239, |
|
"grad_norm": 0.6139174959470913, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6341, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.3582089552238806, |
|
"grad_norm": 0.5251117188495569, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6331, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.3880597014925373, |
|
"grad_norm": 0.5494317456200198, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6329, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.417910447761194, |
|
"grad_norm": 0.4941836924989687, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6351, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.44776119402985076, |
|
"grad_norm": 0.47052093951365964, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6209, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.47761194029850745, |
|
"grad_norm": 1.1146519645525566, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6305, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5074626865671642, |
|
"grad_norm": 0.8473149334181534, |
|
"learning_rate": 5e-06, |
|
"loss": 0.629, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.5373134328358209, |
|
"grad_norm": 0.7499567580667779, |
|
"learning_rate": 5e-06, |
|
"loss": 0.63, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.5671641791044776, |
|
"grad_norm": 0.5639441343205728, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6218, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 1.1342411031324615, |
|
"learning_rate": 5e-06, |
|
"loss": 0.622, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.6268656716417911, |
|
"grad_norm": 0.6456096595420062, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6232, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.6567164179104478, |
|
"grad_norm": 0.5879709496150657, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6236, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.6865671641791045, |
|
"grad_norm": 0.8978920811236428, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6099, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.7164179104477612, |
|
"grad_norm": 0.8274630792281841, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6108, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.746268656716418, |
|
"grad_norm": 0.6457291165426283, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6092, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.7761194029850746, |
|
"grad_norm": 0.5847180410958434, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6118, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.8059701492537313, |
|
"grad_norm": 0.9144134579449927, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6135, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.835820895522388, |
|
"grad_norm": 1.6213022633122485, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6149, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.8656716417910447, |
|
"grad_norm": 0.5755669090329376, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6193, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.8955223880597015, |
|
"grad_norm": 0.5239632952956513, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6152, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.9253731343283582, |
|
"grad_norm": 0.5078873832450146, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6142, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.9552238805970149, |
|
"grad_norm": 0.7451903752568965, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6067, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.9850746268656716, |
|
"grad_norm": 0.5114709685939808, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5999, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.6080397367477417, |
|
"eval_runtime": 59.4897, |
|
"eval_samples_per_second": 151.673, |
|
"eval_steps_per_second": 0.605, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 1.0149253731343284, |
|
"grad_norm": 0.6428823077249859, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5862, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 1.044776119402985, |
|
"grad_norm": 0.749643554288035, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5592, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 1.0746268656716418, |
|
"grad_norm": 0.46129260605818906, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5729, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 1.1044776119402986, |
|
"grad_norm": 0.6289609209087915, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5567, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 1.1343283582089552, |
|
"grad_norm": 0.605218538340198, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5637, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 1.164179104477612, |
|
"grad_norm": 0.5995330578684349, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5636, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 1.1940298507462686, |
|
"grad_norm": 0.6108058300152777, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5511, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.2238805970149254, |
|
"grad_norm": 0.5091266989811818, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5658, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 1.2537313432835822, |
|
"grad_norm": 0.497382336824621, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5648, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 1.2835820895522387, |
|
"grad_norm": 0.6009692943343384, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5632, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 1.3134328358208955, |
|
"grad_norm": 0.6663676150345131, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5539, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 1.3432835820895521, |
|
"grad_norm": 0.5153693458150974, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5604, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.373134328358209, |
|
"grad_norm": 0.4650717684546587, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5637, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.4029850746268657, |
|
"grad_norm": 0.5337571428788572, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5627, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.4328358208955223, |
|
"grad_norm": 0.5056863786298517, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5634, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.462686567164179, |
|
"grad_norm": 0.5950623554639968, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5642, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.4925373134328357, |
|
"grad_norm": 0.6247670785942998, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5625, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.5223880597014925, |
|
"grad_norm": 0.4901169936700308, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5627, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.5522388059701493, |
|
"grad_norm": 0.47156500447116634, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5687, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.582089552238806, |
|
"grad_norm": 0.493610926847214, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5597, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.6119402985074627, |
|
"grad_norm": 0.5267761487869876, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5616, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.6417910447761193, |
|
"grad_norm": 0.44433923391234365, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5576, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.671641791044776, |
|
"grad_norm": 0.4991219923642928, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5585, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.7014925373134329, |
|
"grad_norm": 0.4559161332352475, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5561, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.7313432835820897, |
|
"grad_norm": 0.44695659572809443, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5558, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.7611940298507462, |
|
"grad_norm": 0.6551282471596537, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5651, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.7910447761194028, |
|
"grad_norm": 0.590104074031775, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5666, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.8208955223880596, |
|
"grad_norm": 0.5262721423268055, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5579, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.8507462686567164, |
|
"grad_norm": 0.5025495933022038, |
|
"learning_rate": 5e-06, |
|
"loss": 0.561, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.8805970149253732, |
|
"grad_norm": 0.5437285628032003, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5671, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.9104477611940298, |
|
"grad_norm": 0.5830114249621688, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5569, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.9402985074626866, |
|
"grad_norm": 0.5691476967866934, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5584, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.9701492537313432, |
|
"grad_norm": 0.49823732746182914, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5571, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.4853362249094826, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5557, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.5995889902114868, |
|
"eval_runtime": 59.329, |
|
"eval_samples_per_second": 152.084, |
|
"eval_steps_per_second": 0.607, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 2.029850746268657, |
|
"grad_norm": 0.530250772889861, |
|
"learning_rate": 5e-06, |
|
"loss": 0.513, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 2.0597014925373136, |
|
"grad_norm": 0.5342911362277831, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5108, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 2.08955223880597, |
|
"grad_norm": 0.5434518597947492, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5105, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 2.1194029850746268, |
|
"grad_norm": 0.6286537295202244, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5123, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 2.1492537313432836, |
|
"grad_norm": 0.5956472591858897, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5065, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 2.1791044776119404, |
|
"grad_norm": 0.5608264698424108, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5112, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 2.208955223880597, |
|
"grad_norm": 0.529984658854658, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5078, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 2.2388059701492535, |
|
"grad_norm": 0.539168879551667, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5105, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 2.2686567164179103, |
|
"grad_norm": 0.5397218859742641, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5098, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 2.298507462686567, |
|
"grad_norm": 0.5080172238267691, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5094, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 2.328358208955224, |
|
"grad_norm": 0.5269231995045524, |
|
"learning_rate": 5e-06, |
|
"loss": 0.508, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 2.3582089552238807, |
|
"grad_norm": 0.485045053682394, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5115, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 2.388059701492537, |
|
"grad_norm": 0.6198237538120879, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5083, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.417910447761194, |
|
"grad_norm": 0.602606300035839, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5151, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 2.4477611940298507, |
|
"grad_norm": 0.5667564011219249, |
|
"learning_rate": 5e-06, |
|
"loss": 0.519, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 2.4776119402985075, |
|
"grad_norm": 0.5104272153178098, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5081, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 2.5074626865671643, |
|
"grad_norm": 0.4892216848477563, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5082, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 2.5373134328358207, |
|
"grad_norm": 0.520250022509386, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5148, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 2.5671641791044775, |
|
"grad_norm": 0.6047039208024556, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5098, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 2.5970149253731343, |
|
"grad_norm": 0.4743086712519712, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5085, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 2.626865671641791, |
|
"grad_norm": 0.5601721301492414, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5151, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 2.656716417910448, |
|
"grad_norm": 0.5312045710978154, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5144, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 2.6865671641791042, |
|
"grad_norm": 0.5148689817919981, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5104, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.716417910447761, |
|
"grad_norm": 0.5394739876753407, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5168, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.746268656716418, |
|
"grad_norm": 0.49195076728876497, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5124, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.7761194029850746, |
|
"grad_norm": 0.5138809386584957, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5173, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.8059701492537314, |
|
"grad_norm": 0.48130331745070326, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5108, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.835820895522388, |
|
"grad_norm": 0.4863446722303473, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5137, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.8656716417910446, |
|
"grad_norm": 0.4977491076164347, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5156, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.8955223880597014, |
|
"grad_norm": 0.5201471021840797, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5165, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.925373134328358, |
|
"grad_norm": 0.5049475021594374, |
|
"learning_rate": 5e-06, |
|
"loss": 0.5109, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.955223880597015, |
|
"grad_norm": 0.5648245603044626, |
|
"learning_rate": 5e-06, |
|
"loss": 0.518, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.9850746268656714, |
|
"grad_norm": 0.5144681511781752, |
|
"learning_rate": 5e-06, |
|
"loss": 0.504, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.6035678386688232, |
|
"eval_runtime": 59.1481, |
|
"eval_samples_per_second": 152.549, |
|
"eval_steps_per_second": 0.609, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1005, |
|
"total_flos": 1682574913044480.0, |
|
"train_loss": 0.5704716248298759, |
|
"train_runtime": 8778.2325, |
|
"train_samples_per_second": 58.585, |
|
"train_steps_per_second": 0.114 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1005, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1682574913044480.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|