|
{ |
|
"best_metric": 1.5566551155643538e-05, |
|
"best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-1150", |
|
"epoch": 0.6185938707185166, |
|
"eval_steps": 50, |
|
"global_step": 1201, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025753283543651817, |
|
"grad_norm": 14.230540018977528, |
|
"learning_rate": 2.9411764705882355e-06, |
|
"loss": 1.0954, |
|
"num_input_tokens_seen": 49920, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0051506567087303634, |
|
"grad_norm": 12.725175128208063, |
|
"learning_rate": 5.882352941176471e-06, |
|
"loss": 0.9793, |
|
"num_input_tokens_seen": 99840, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007725985063095545, |
|
"grad_norm": 11.908760913870685, |
|
"learning_rate": 8.823529411764707e-06, |
|
"loss": 1.0964, |
|
"num_input_tokens_seen": 149760, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010301313417460727, |
|
"grad_norm": 7.5714179545602835, |
|
"learning_rate": 1.1764705882352942e-05, |
|
"loss": 0.7079, |
|
"num_input_tokens_seen": 199680, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.012876641771825908, |
|
"grad_norm": 2.273110551179123, |
|
"learning_rate": 1.4705882352941177e-05, |
|
"loss": 0.4213, |
|
"num_input_tokens_seen": 249600, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01545197012619109, |
|
"grad_norm": 1.4511084067844011, |
|
"learning_rate": 1.7647058823529414e-05, |
|
"loss": 0.3359, |
|
"num_input_tokens_seen": 299520, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.018027298480556272, |
|
"grad_norm": 1.7462649262033438, |
|
"learning_rate": 2.058823529411765e-05, |
|
"loss": 0.3463, |
|
"num_input_tokens_seen": 349440, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.020602626834921454, |
|
"grad_norm": 1.2143595820366577, |
|
"learning_rate": 2.3529411764705884e-05, |
|
"loss": 0.28, |
|
"num_input_tokens_seen": 399360, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.023177955189286635, |
|
"grad_norm": 0.8468719125045373, |
|
"learning_rate": 2.647058823529412e-05, |
|
"loss": 0.3442, |
|
"num_input_tokens_seen": 449280, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.025753283543651816, |
|
"grad_norm": 0.9726867932660042, |
|
"learning_rate": 2.9411764705882354e-05, |
|
"loss": 0.3441, |
|
"num_input_tokens_seen": 499200, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.025753283543651816, |
|
"eval_loss": 0.33834776282310486, |
|
"eval_runtime": 47.4467, |
|
"eval_samples_per_second": 1.265, |
|
"eval_steps_per_second": 0.316, |
|
"num_input_tokens_seen": 499200, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.028328611898016998, |
|
"grad_norm": 1.4016556961184263, |
|
"learning_rate": 3.235294117647059e-05, |
|
"loss": 0.3182, |
|
"num_input_tokens_seen": 549120, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.03090394025238218, |
|
"grad_norm": 0.6437613769459606, |
|
"learning_rate": 3.529411764705883e-05, |
|
"loss": 0.3294, |
|
"num_input_tokens_seen": 599040, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.03347926860674736, |
|
"grad_norm": 0.7389008951321312, |
|
"learning_rate": 3.8235294117647055e-05, |
|
"loss": 0.3097, |
|
"num_input_tokens_seen": 648960, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.036054596961112545, |
|
"grad_norm": 0.771553860801019, |
|
"learning_rate": 4.11764705882353e-05, |
|
"loss": 0.3008, |
|
"num_input_tokens_seen": 698880, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03862992531547772, |
|
"grad_norm": 0.6965369148334918, |
|
"learning_rate": 4.411764705882353e-05, |
|
"loss": 0.3278, |
|
"num_input_tokens_seen": 748800, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.04120525366984291, |
|
"grad_norm": 0.912943461315541, |
|
"learning_rate": 4.705882352941177e-05, |
|
"loss": 0.3074, |
|
"num_input_tokens_seen": 798720, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.043780582024208085, |
|
"grad_norm": 0.8407481737577445, |
|
"learning_rate": 5e-05, |
|
"loss": 0.3423, |
|
"num_input_tokens_seen": 848640, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.04635591037857327, |
|
"grad_norm": 0.9112879058417015, |
|
"learning_rate": 5.294117647058824e-05, |
|
"loss": 0.3008, |
|
"num_input_tokens_seen": 898560, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.04893123873293845, |
|
"grad_norm": 2.391489040464162, |
|
"learning_rate": 5.588235294117647e-05, |
|
"loss": 0.2815, |
|
"num_input_tokens_seen": 948480, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.05150656708730363, |
|
"grad_norm": 2.155211791607199, |
|
"learning_rate": 5.882352941176471e-05, |
|
"loss": 0.2274, |
|
"num_input_tokens_seen": 998400, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05150656708730363, |
|
"eval_loss": 0.18663176894187927, |
|
"eval_runtime": 18.9199, |
|
"eval_samples_per_second": 3.171, |
|
"eval_steps_per_second": 0.793, |
|
"num_input_tokens_seen": 998400, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05408189544166881, |
|
"grad_norm": 2.2181531422996716, |
|
"learning_rate": 6.176470588235295e-05, |
|
"loss": 0.168, |
|
"num_input_tokens_seen": 1048320, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.056657223796033995, |
|
"grad_norm": 3.1829920225573236, |
|
"learning_rate": 6.470588235294118e-05, |
|
"loss": 0.0709, |
|
"num_input_tokens_seen": 1098240, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.05923255215039917, |
|
"grad_norm": 4.337350477588576, |
|
"learning_rate": 6.764705882352942e-05, |
|
"loss": 0.1609, |
|
"num_input_tokens_seen": 1148160, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.06180788050476436, |
|
"grad_norm": 2.1010046045637365, |
|
"learning_rate": 7.058823529411765e-05, |
|
"loss": 0.0354, |
|
"num_input_tokens_seen": 1198080, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.06438320885912954, |
|
"grad_norm": 2.232308844812103, |
|
"learning_rate": 7.352941176470589e-05, |
|
"loss": 0.1133, |
|
"num_input_tokens_seen": 1248000, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.06695853721349472, |
|
"grad_norm": 5.641631090993415, |
|
"learning_rate": 7.647058823529411e-05, |
|
"loss": 0.0867, |
|
"num_input_tokens_seen": 1297920, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0695338655678599, |
|
"grad_norm": 1.5031437609685787, |
|
"learning_rate": 7.941176470588235e-05, |
|
"loss": 0.1352, |
|
"num_input_tokens_seen": 1347840, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.07210919392222509, |
|
"grad_norm": 3.2992644431188465, |
|
"learning_rate": 8.23529411764706e-05, |
|
"loss": 0.101, |
|
"num_input_tokens_seen": 1397760, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.07468452227659027, |
|
"grad_norm": 3.494236832758233, |
|
"learning_rate": 8.529411764705883e-05, |
|
"loss": 0.0334, |
|
"num_input_tokens_seen": 1447680, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.07725985063095545, |
|
"grad_norm": 0.0602113869322109, |
|
"learning_rate": 8.823529411764706e-05, |
|
"loss": 0.0667, |
|
"num_input_tokens_seen": 1497600, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07725985063095545, |
|
"eval_loss": 0.09665286540985107, |
|
"eval_runtime": 19.2745, |
|
"eval_samples_per_second": 3.113, |
|
"eval_steps_per_second": 0.778, |
|
"num_input_tokens_seen": 1497600, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07983517898532062, |
|
"grad_norm": 6.096163706748617, |
|
"learning_rate": 9.11764705882353e-05, |
|
"loss": 0.06, |
|
"num_input_tokens_seen": 1547520, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.08241050733968582, |
|
"grad_norm": 4.278069142242893, |
|
"learning_rate": 9.411764705882353e-05, |
|
"loss": 0.0265, |
|
"num_input_tokens_seen": 1597440, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.08498583569405099, |
|
"grad_norm": 1.7183097652953412, |
|
"learning_rate": 9.705882352941177e-05, |
|
"loss": 0.1743, |
|
"num_input_tokens_seen": 1647360, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.08756116404841617, |
|
"grad_norm": 4.95401899568707, |
|
"learning_rate": 0.0001, |
|
"loss": 0.051, |
|
"num_input_tokens_seen": 1697280, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.09013649240278135, |
|
"grad_norm": 0.34558354886099124, |
|
"learning_rate": 9.999940874631277e-05, |
|
"loss": 0.0584, |
|
"num_input_tokens_seen": 1747200, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.09271182075714654, |
|
"grad_norm": 3.6326401692458146, |
|
"learning_rate": 9.999763499923432e-05, |
|
"loss": 0.0704, |
|
"num_input_tokens_seen": 1797120, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.09528714911151172, |
|
"grad_norm": 0.4999720825488852, |
|
"learning_rate": 9.999467880071402e-05, |
|
"loss": 0.0278, |
|
"num_input_tokens_seen": 1847040, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.0978624774658769, |
|
"grad_norm": 0.5211964505880501, |
|
"learning_rate": 9.999054022066641e-05, |
|
"loss": 0.0862, |
|
"num_input_tokens_seen": 1896960, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.10043780582024209, |
|
"grad_norm": 0.8767038751832389, |
|
"learning_rate": 9.998521935696953e-05, |
|
"loss": 0.0565, |
|
"num_input_tokens_seen": 1946880, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.10301313417460727, |
|
"grad_norm": 0.1848827361202722, |
|
"learning_rate": 9.997871633546257e-05, |
|
"loss": 0.0459, |
|
"num_input_tokens_seen": 1996800, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.10301313417460727, |
|
"eval_loss": 0.09957947582006454, |
|
"eval_runtime": 19.2651, |
|
"eval_samples_per_second": 3.114, |
|
"eval_steps_per_second": 0.779, |
|
"num_input_tokens_seen": 1996800, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.10558846252897244, |
|
"grad_norm": 1.0969393658164421, |
|
"learning_rate": 9.997103130994296e-05, |
|
"loss": 0.0539, |
|
"num_input_tokens_seen": 2046720, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.10816379088333762, |
|
"grad_norm": 2.885869194934028, |
|
"learning_rate": 9.996216446216267e-05, |
|
"loss": 0.0654, |
|
"num_input_tokens_seen": 2096640, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.11073911923770281, |
|
"grad_norm": 0.5225257245731217, |
|
"learning_rate": 9.995211600182397e-05, |
|
"loss": 0.0316, |
|
"num_input_tokens_seen": 2146560, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.11331444759206799, |
|
"grad_norm": 2.1553510734212797, |
|
"learning_rate": 9.994088616657444e-05, |
|
"loss": 0.1169, |
|
"num_input_tokens_seen": 2196480, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.11588977594643317, |
|
"grad_norm": 1.1133884703723633, |
|
"learning_rate": 9.992847522200133e-05, |
|
"loss": 0.0382, |
|
"num_input_tokens_seen": 2246400, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.11846510430079835, |
|
"grad_norm": 0.8875243341616034, |
|
"learning_rate": 9.99148834616253e-05, |
|
"loss": 0.0406, |
|
"num_input_tokens_seen": 2296320, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.12104043265516354, |
|
"grad_norm": 1.81283533812695, |
|
"learning_rate": 9.990011120689351e-05, |
|
"loss": 0.0182, |
|
"num_input_tokens_seen": 2346240, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.12361576100952872, |
|
"grad_norm": 3.873083258671571, |
|
"learning_rate": 9.988415880717194e-05, |
|
"loss": 0.0881, |
|
"num_input_tokens_seen": 2396160, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.1261910893638939, |
|
"grad_norm": 3.427761103620865, |
|
"learning_rate": 9.986702663973722e-05, |
|
"loss": 0.0565, |
|
"num_input_tokens_seen": 2446080, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.12876641771825909, |
|
"grad_norm": 1.531943599765959, |
|
"learning_rate": 9.98487151097676e-05, |
|
"loss": 0.0805, |
|
"num_input_tokens_seen": 2496000, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.12876641771825909, |
|
"eval_loss": 0.055875860154628754, |
|
"eval_runtime": 19.5106, |
|
"eval_samples_per_second": 3.075, |
|
"eval_steps_per_second": 0.769, |
|
"num_input_tokens_seen": 2496000, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.13134174607262425, |
|
"grad_norm": 0.845514751132877, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 0.0235, |
|
"num_input_tokens_seen": 2545920, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.13391707442698944, |
|
"grad_norm": 1.7441538057875345, |
|
"learning_rate": 9.980855572238714e-05, |
|
"loss": 0.0137, |
|
"num_input_tokens_seen": 2595840, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.13649240278135463, |
|
"grad_norm": 0.6628270146260998, |
|
"learning_rate": 9.978670881475172e-05, |
|
"loss": 0.0605, |
|
"num_input_tokens_seen": 2645760, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.1390677311357198, |
|
"grad_norm": 4.49926360488578, |
|
"learning_rate": 9.976368444410985e-05, |
|
"loss": 0.0585, |
|
"num_input_tokens_seen": 2695680, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.141643059490085, |
|
"grad_norm": 0.7159498856832776, |
|
"learning_rate": 9.973948315499126e-05, |
|
"loss": 0.0096, |
|
"num_input_tokens_seen": 2745600, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.14421838784445018, |
|
"grad_norm": 4.09842502301765, |
|
"learning_rate": 9.971410551976002e-05, |
|
"loss": 0.1383, |
|
"num_input_tokens_seen": 2795520, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.14679371619881534, |
|
"grad_norm": 1.0773897808674016, |
|
"learning_rate": 9.968755213860094e-05, |
|
"loss": 0.049, |
|
"num_input_tokens_seen": 2845440, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.14936904455318054, |
|
"grad_norm": 1.7224091076306651, |
|
"learning_rate": 9.96598236395054e-05, |
|
"loss": 0.0336, |
|
"num_input_tokens_seen": 2895360, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.1519443729075457, |
|
"grad_norm": 0.24158350585209873, |
|
"learning_rate": 9.96309206782565e-05, |
|
"loss": 0.019, |
|
"num_input_tokens_seen": 2945280, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.1545197012619109, |
|
"grad_norm": 0.6614271496466305, |
|
"learning_rate": 9.960084393841355e-05, |
|
"loss": 0.0381, |
|
"num_input_tokens_seen": 2995200, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.1545197012619109, |
|
"eval_loss": 0.030908752232789993, |
|
"eval_runtime": 19.8702, |
|
"eval_samples_per_second": 3.02, |
|
"eval_steps_per_second": 0.755, |
|
"num_input_tokens_seen": 2995200, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.15709502961627608, |
|
"grad_norm": 0.2314383428097956, |
|
"learning_rate": 9.956959413129585e-05, |
|
"loss": 0.0245, |
|
"num_input_tokens_seen": 3045120, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.15967035797064125, |
|
"grad_norm": 0.2035043692790888, |
|
"learning_rate": 9.953717199596598e-05, |
|
"loss": 0.0121, |
|
"num_input_tokens_seen": 3095040, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.16224568632500644, |
|
"grad_norm": 0.014152925203425593, |
|
"learning_rate": 9.95035782992122e-05, |
|
"loss": 0.054, |
|
"num_input_tokens_seen": 3144960, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.16482101467937163, |
|
"grad_norm": 1.0162363271291994, |
|
"learning_rate": 9.94688138355304e-05, |
|
"loss": 0.0849, |
|
"num_input_tokens_seen": 3194880, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.1673963430337368, |
|
"grad_norm": 3.92714662464831, |
|
"learning_rate": 9.943287942710527e-05, |
|
"loss": 0.0503, |
|
"num_input_tokens_seen": 3244800, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.16997167138810199, |
|
"grad_norm": 0.47450325213422423, |
|
"learning_rate": 9.939577592379088e-05, |
|
"loss": 0.0302, |
|
"num_input_tokens_seen": 3294720, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.17254699974246718, |
|
"grad_norm": 1.6706834486788253, |
|
"learning_rate": 9.935750420309055e-05, |
|
"loss": 0.082, |
|
"num_input_tokens_seen": 3344640, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.17512232809683234, |
|
"grad_norm": 0.16856461555999938, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 0.0411, |
|
"num_input_tokens_seen": 3394560, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.17769765645119753, |
|
"grad_norm": 8.475859938133665, |
|
"learning_rate": 9.927745975766654e-05, |
|
"loss": 0.1158, |
|
"num_input_tokens_seen": 3444480, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.1802729848055627, |
|
"grad_norm": 0.7891476176494399, |
|
"learning_rate": 9.923568892600578e-05, |
|
"loss": 0.1761, |
|
"num_input_tokens_seen": 3494400, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.1802729848055627, |
|
"eval_loss": 0.043948542326688766, |
|
"eval_runtime": 19.3589, |
|
"eval_samples_per_second": 3.099, |
|
"eval_steps_per_second": 0.775, |
|
"num_input_tokens_seen": 3494400, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.1828483131599279, |
|
"grad_norm": 0.3217035938609153, |
|
"learning_rate": 9.91927536630402e-05, |
|
"loss": 0.0267, |
|
"num_input_tokens_seen": 3544320, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.18542364151429308, |
|
"grad_norm": 1.9146681345367045, |
|
"learning_rate": 9.91486549841951e-05, |
|
"loss": 0.0642, |
|
"num_input_tokens_seen": 3594240, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.18799896986865824, |
|
"grad_norm": 0.053067921944857786, |
|
"learning_rate": 9.91033939324107e-05, |
|
"loss": 0.0302, |
|
"num_input_tokens_seen": 3644160, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.19057429822302344, |
|
"grad_norm": 0.07277339857890938, |
|
"learning_rate": 9.905697157811761e-05, |
|
"loss": 0.0099, |
|
"num_input_tokens_seen": 3694080, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.19314962657738863, |
|
"grad_norm": 4.117705571576552, |
|
"learning_rate": 9.900938901921131e-05, |
|
"loss": 0.0573, |
|
"num_input_tokens_seen": 3744000, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.1957249549317538, |
|
"grad_norm": 2.4479374171708304, |
|
"learning_rate": 9.896064738102635e-05, |
|
"loss": 0.0302, |
|
"num_input_tokens_seen": 3793920, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.19830028328611898, |
|
"grad_norm": 2.1751552571638153, |
|
"learning_rate": 9.891074781630966e-05, |
|
"loss": 0.0374, |
|
"num_input_tokens_seen": 3843840, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.20087561164048418, |
|
"grad_norm": 1.4138134932494708, |
|
"learning_rate": 9.885969150519331e-05, |
|
"loss": 0.0227, |
|
"num_input_tokens_seen": 3893760, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.20345093999484934, |
|
"grad_norm": 0.3312762825665537, |
|
"learning_rate": 9.88074796551666e-05, |
|
"loss": 0.0104, |
|
"num_input_tokens_seen": 3943680, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.20602626834921453, |
|
"grad_norm": 0.06774783590394957, |
|
"learning_rate": 9.875411350104744e-05, |
|
"loss": 0.0146, |
|
"num_input_tokens_seen": 3993600, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.20602626834921453, |
|
"eval_loss": 0.024433813989162445, |
|
"eval_runtime": 18.8143, |
|
"eval_samples_per_second": 3.189, |
|
"eval_steps_per_second": 0.797, |
|
"num_input_tokens_seen": 3993600, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.2086015967035797, |
|
"grad_norm": 0.06416132017781931, |
|
"learning_rate": 9.86995943049533e-05, |
|
"loss": 0.0074, |
|
"num_input_tokens_seen": 4043520, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.2111769250579449, |
|
"grad_norm": 0.04809215529224866, |
|
"learning_rate": 9.864392335627117e-05, |
|
"loss": 0.0375, |
|
"num_input_tokens_seen": 4093440, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.21375225341231008, |
|
"grad_norm": 0.40943321947836153, |
|
"learning_rate": 9.858710197162721e-05, |
|
"loss": 0.0217, |
|
"num_input_tokens_seen": 4143360, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.21632758176667524, |
|
"grad_norm": 0.08069011480339518, |
|
"learning_rate": 9.852913149485556e-05, |
|
"loss": 0.016, |
|
"num_input_tokens_seen": 4193280, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.21890291012104043, |
|
"grad_norm": 1.806585526467194, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 0.0832, |
|
"num_input_tokens_seen": 4243200, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.22147823847540563, |
|
"grad_norm": 3.122914329597603, |
|
"learning_rate": 9.840974877611422e-05, |
|
"loss": 0.0444, |
|
"num_input_tokens_seen": 4293120, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.2240535668297708, |
|
"grad_norm": 1.0101510644418257, |
|
"learning_rate": 9.834833935756344e-05, |
|
"loss": 0.0465, |
|
"num_input_tokens_seen": 4343040, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.22662889518413598, |
|
"grad_norm": 1.7770164472545809, |
|
"learning_rate": 9.828578649365601e-05, |
|
"loss": 0.0428, |
|
"num_input_tokens_seen": 4392960, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.22920422353850115, |
|
"grad_norm": 0.9798524063329249, |
|
"learning_rate": 9.822209166377635e-05, |
|
"loss": 0.02, |
|
"num_input_tokens_seen": 4442880, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.23177955189286634, |
|
"grad_norm": 0.22305429096692395, |
|
"learning_rate": 9.815725637431662e-05, |
|
"loss": 0.0157, |
|
"num_input_tokens_seen": 4492800, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.23177955189286634, |
|
"eval_loss": 0.006690301466733217, |
|
"eval_runtime": 18.7725, |
|
"eval_samples_per_second": 3.196, |
|
"eval_steps_per_second": 0.799, |
|
"num_input_tokens_seen": 4492800, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.23435488024723153, |
|
"grad_norm": 2.8137229294991064, |
|
"learning_rate": 9.809128215864097e-05, |
|
"loss": 0.0596, |
|
"num_input_tokens_seen": 4542720, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.2369302086015967, |
|
"grad_norm": 0.045182426772616975, |
|
"learning_rate": 9.802417057704931e-05, |
|
"loss": 0.0238, |
|
"num_input_tokens_seen": 4592640, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.23950553695596188, |
|
"grad_norm": 2.1659990503080424, |
|
"learning_rate": 9.795592321674045e-05, |
|
"loss": 0.0331, |
|
"num_input_tokens_seen": 4642560, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.24208086531032708, |
|
"grad_norm": 0.3208179936186945, |
|
"learning_rate": 9.788654169177453e-05, |
|
"loss": 0.0177, |
|
"num_input_tokens_seen": 4692480, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.24465619366469224, |
|
"grad_norm": 1.4150868022870595, |
|
"learning_rate": 9.781602764303487e-05, |
|
"loss": 0.028, |
|
"num_input_tokens_seen": 4742400, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.24723152201905743, |
|
"grad_norm": 0.21015675310797993, |
|
"learning_rate": 9.774438273818911e-05, |
|
"loss": 0.0561, |
|
"num_input_tokens_seen": 4792320, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.24980685037342262, |
|
"grad_norm": 2.6788940269709287, |
|
"learning_rate": 9.767160867164979e-05, |
|
"loss": 0.0242, |
|
"num_input_tokens_seen": 4842240, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.2523821787277878, |
|
"grad_norm": 0.03933517764434036, |
|
"learning_rate": 9.759770716453436e-05, |
|
"loss": 0.0286, |
|
"num_input_tokens_seen": 4892160, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.254957507082153, |
|
"grad_norm": 2.640299598300008, |
|
"learning_rate": 9.752267996462434e-05, |
|
"loss": 0.0394, |
|
"num_input_tokens_seen": 4942080, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.25753283543651817, |
|
"grad_norm": 0.03143215150024672, |
|
"learning_rate": 9.744652884632406e-05, |
|
"loss": 0.0122, |
|
"num_input_tokens_seen": 4992000, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.25753283543651817, |
|
"eval_loss": 0.007972972467541695, |
|
"eval_runtime": 18.8509, |
|
"eval_samples_per_second": 3.183, |
|
"eval_steps_per_second": 0.796, |
|
"num_input_tokens_seen": 4992000, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.26010816379088336, |
|
"grad_norm": 0.3421379771468272, |
|
"learning_rate": 9.736925561061871e-05, |
|
"loss": 0.0184, |
|
"num_input_tokens_seen": 5041920, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.2626834921452485, |
|
"grad_norm": 0.16336037200955245, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 0.0046, |
|
"num_input_tokens_seen": 5091840, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.2652588204996137, |
|
"grad_norm": 0.7318312729236729, |
|
"learning_rate": 9.721135012358156e-05, |
|
"loss": 0.0226, |
|
"num_input_tokens_seen": 5141760, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.2678341488539789, |
|
"grad_norm": 2.1703341770378333, |
|
"learning_rate": 9.713072160673777e-05, |
|
"loss": 0.0475, |
|
"num_input_tokens_seen": 5191680, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.2704094772083441, |
|
"grad_norm": 1.6372493711976897, |
|
"learning_rate": 9.704897844137673e-05, |
|
"loss": 0.0572, |
|
"num_input_tokens_seen": 5241600, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.27298480556270927, |
|
"grad_norm": 3.5840623375725804, |
|
"learning_rate": 9.696612256073633e-05, |
|
"loss": 0.025, |
|
"num_input_tokens_seen": 5291520, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.2755601339170744, |
|
"grad_norm": 0.06141348661076509, |
|
"learning_rate": 9.688215592437039e-05, |
|
"loss": 0.0122, |
|
"num_input_tokens_seen": 5341440, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.2781354622714396, |
|
"grad_norm": 0.04980492840656424, |
|
"learning_rate": 9.679708051810221e-05, |
|
"loss": 0.0045, |
|
"num_input_tokens_seen": 5391360, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.2807107906258048, |
|
"grad_norm": 3.153079438616822, |
|
"learning_rate": 9.67108983539777e-05, |
|
"loss": 0.0431, |
|
"num_input_tokens_seen": 5441280, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.28328611898017, |
|
"grad_norm": 1.6764625232464418, |
|
"learning_rate": 9.662361147021779e-05, |
|
"loss": 0.0339, |
|
"num_input_tokens_seen": 5491200, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.28328611898017, |
|
"eval_loss": 0.0033526704646646976, |
|
"eval_runtime": 18.8387, |
|
"eval_samples_per_second": 3.185, |
|
"eval_steps_per_second": 0.796, |
|
"num_input_tokens_seen": 5491200, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.28586144733453517, |
|
"grad_norm": 0.05427488923085365, |
|
"learning_rate": 9.653522193117013e-05, |
|
"loss": 0.0341, |
|
"num_input_tokens_seen": 5541120, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.28843677568890036, |
|
"grad_norm": 1.538376531568411, |
|
"learning_rate": 9.644573182726035e-05, |
|
"loss": 0.0341, |
|
"num_input_tokens_seen": 5591040, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.2910121040432655, |
|
"grad_norm": 0.04402865983757039, |
|
"learning_rate": 9.63551432749426e-05, |
|
"loss": 0.0453, |
|
"num_input_tokens_seen": 5640960, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.2935874323976307, |
|
"grad_norm": 0.1469885874710427, |
|
"learning_rate": 9.626345841664953e-05, |
|
"loss": 0.0227, |
|
"num_input_tokens_seen": 5690880, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.2961627607519959, |
|
"grad_norm": 0.3459204695983954, |
|
"learning_rate": 9.617067942074153e-05, |
|
"loss": 0.0135, |
|
"num_input_tokens_seen": 5740800, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.29873808910636107, |
|
"grad_norm": 1.7604264611882452, |
|
"learning_rate": 9.607680848145558e-05, |
|
"loss": 0.0439, |
|
"num_input_tokens_seen": 5790720, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.30131341746072626, |
|
"grad_norm": 2.1661707703583204, |
|
"learning_rate": 9.598184781885318e-05, |
|
"loss": 0.0344, |
|
"num_input_tokens_seen": 5840640, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.3038887458150914, |
|
"grad_norm": 0.19674934066326408, |
|
"learning_rate": 9.588579967876806e-05, |
|
"loss": 0.0244, |
|
"num_input_tokens_seen": 5890560, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.3064640741694566, |
|
"grad_norm": 4.463551986915384, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 0.022, |
|
"num_input_tokens_seen": 5940480, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.3090394025238218, |
|
"grad_norm": 0.08358626804613765, |
|
"learning_rate": 9.569045007802559e-05, |
|
"loss": 0.0217, |
|
"num_input_tokens_seen": 5990400, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.3090394025238218, |
|
"eval_loss": 0.013290103524923325, |
|
"eval_runtime": 18.8576, |
|
"eval_samples_per_second": 3.182, |
|
"eval_steps_per_second": 0.795, |
|
"num_input_tokens_seen": 5990400, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.311614730878187, |
|
"grad_norm": 0.06328082378687645, |
|
"learning_rate": 9.55911532374151e-05, |
|
"loss": 0.0207, |
|
"num_input_tokens_seen": 6040320, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.31419005923255217, |
|
"grad_norm": 1.846961607001629, |
|
"learning_rate": 9.549077815930636e-05, |
|
"loss": 0.023, |
|
"num_input_tokens_seen": 6090240, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.31676538758691736, |
|
"grad_norm": 0.07122251919235507, |
|
"learning_rate": 9.538932721758474e-05, |
|
"loss": 0.017, |
|
"num_input_tokens_seen": 6140160, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.3193407159412825, |
|
"grad_norm": 1.9564639611342651, |
|
"learning_rate": 9.528680281157999e-05, |
|
"loss": 0.009, |
|
"num_input_tokens_seen": 6190080, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.3219160442956477, |
|
"grad_norm": 0.43628623054662674, |
|
"learning_rate": 9.518320736600943e-05, |
|
"loss": 0.0421, |
|
"num_input_tokens_seen": 6240000, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.3244913726500129, |
|
"grad_norm": 0.7626652394047067, |
|
"learning_rate": 9.507854333092063e-05, |
|
"loss": 0.0324, |
|
"num_input_tokens_seen": 6289920, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.32706670100437807, |
|
"grad_norm": 0.5823990007842583, |
|
"learning_rate": 9.497281318163346e-05, |
|
"loss": 0.0139, |
|
"num_input_tokens_seen": 6339840, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.32964202935874326, |
|
"grad_norm": 1.6726620150276597, |
|
"learning_rate": 9.486601941868154e-05, |
|
"loss": 0.0375, |
|
"num_input_tokens_seen": 6389760, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.3322173577131084, |
|
"grad_norm": 0.08456122780891835, |
|
"learning_rate": 9.475816456775313e-05, |
|
"loss": 0.0618, |
|
"num_input_tokens_seen": 6439680, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.3347926860674736, |
|
"grad_norm": 0.3287256358007373, |
|
"learning_rate": 9.464925117963133e-05, |
|
"loss": 0.0327, |
|
"num_input_tokens_seen": 6489600, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.3347926860674736, |
|
"eval_loss": 0.02095886506140232, |
|
"eval_runtime": 19.1993, |
|
"eval_samples_per_second": 3.125, |
|
"eval_steps_per_second": 0.781, |
|
"num_input_tokens_seen": 6489600, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.3373680144218388, |
|
"grad_norm": 1.642912250249591, |
|
"learning_rate": 9.453928183013385e-05, |
|
"loss": 0.037, |
|
"num_input_tokens_seen": 6539520, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.33994334277620397, |
|
"grad_norm": 1.6829194140719153, |
|
"learning_rate": 9.442825912005202e-05, |
|
"loss": 0.034, |
|
"num_input_tokens_seen": 6589440, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.34251867113056916, |
|
"grad_norm": 1.7722553129499397, |
|
"learning_rate": 9.431618567508933e-05, |
|
"loss": 0.0584, |
|
"num_input_tokens_seen": 6639360, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.34509399948493436, |
|
"grad_norm": 1.8398383207694327, |
|
"learning_rate": 9.420306414579925e-05, |
|
"loss": 0.059, |
|
"num_input_tokens_seen": 6689280, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.3476693278392995, |
|
"grad_norm": 0.2451231053960763, |
|
"learning_rate": 9.408889720752266e-05, |
|
"loss": 0.023, |
|
"num_input_tokens_seen": 6739200, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.3502446561936647, |
|
"grad_norm": 0.13166014546250449, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 0.0327, |
|
"num_input_tokens_seen": 6789120, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.3528199845480299, |
|
"grad_norm": 1.9392864055798256, |
|
"learning_rate": 9.385743792892982e-05, |
|
"loss": 0.0241, |
|
"num_input_tokens_seen": 6839040, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.35539531290239507, |
|
"grad_norm": 0.7574757741852987, |
|
"learning_rate": 9.374015106265968e-05, |
|
"loss": 0.0227, |
|
"num_input_tokens_seen": 6888960, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.35797064125676026, |
|
"grad_norm": 1.1649279233149774, |
|
"learning_rate": 9.362182973536569e-05, |
|
"loss": 0.0171, |
|
"num_input_tokens_seen": 6938880, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.3605459696111254, |
|
"grad_norm": 0.04561814221227265, |
|
"learning_rate": 9.35024767453647e-05, |
|
"loss": 0.0267, |
|
"num_input_tokens_seen": 6988800, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.3605459696111254, |
|
"eval_loss": 0.005286735948175192, |
|
"eval_runtime": 19.2824, |
|
"eval_samples_per_second": 3.112, |
|
"eval_steps_per_second": 0.778, |
|
"num_input_tokens_seen": 6988800, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.3631212979654906, |
|
"grad_norm": 1.7432145241229078, |
|
"learning_rate": 9.338209491537257e-05, |
|
"loss": 0.0335, |
|
"num_input_tokens_seen": 7038720, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.3656966263198558, |
|
"grad_norm": 1.821635752664896, |
|
"learning_rate": 9.326068709243727e-05, |
|
"loss": 0.0108, |
|
"num_input_tokens_seen": 7088640, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.36827195467422097, |
|
"grad_norm": 0.5782878283043246, |
|
"learning_rate": 9.313825614787177e-05, |
|
"loss": 0.0296, |
|
"num_input_tokens_seen": 7138560, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.37084728302858616, |
|
"grad_norm": 2.0568373189070908, |
|
"learning_rate": 9.301480497718593e-05, |
|
"loss": 0.042, |
|
"num_input_tokens_seen": 7188480, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.37342261138295135, |
|
"grad_norm": 0.20623581315280795, |
|
"learning_rate": 9.289033650001817e-05, |
|
"loss": 0.0083, |
|
"num_input_tokens_seen": 7238400, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.3759979397373165, |
|
"grad_norm": 1.424505040979842, |
|
"learning_rate": 9.276485366006634e-05, |
|
"loss": 0.0109, |
|
"num_input_tokens_seen": 7288320, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.3785732680916817, |
|
"grad_norm": 2.1248350485820584, |
|
"learning_rate": 9.263835942501807e-05, |
|
"loss": 0.0122, |
|
"num_input_tokens_seen": 7338240, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.3811485964460469, |
|
"grad_norm": 0.357888255955218, |
|
"learning_rate": 9.251085678648072e-05, |
|
"loss": 0.017, |
|
"num_input_tokens_seen": 7388160, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.38372392480041206, |
|
"grad_norm": 3.859418562917624, |
|
"learning_rate": 9.238234875991046e-05, |
|
"loss": 0.0236, |
|
"num_input_tokens_seen": 7438080, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.38629925315477726, |
|
"grad_norm": 2.660896728773248, |
|
"learning_rate": 9.225283838454111e-05, |
|
"loss": 0.014, |
|
"num_input_tokens_seen": 7488000, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.38629925315477726, |
|
"eval_loss": 0.005334165878593922, |
|
"eval_runtime": 19.3986, |
|
"eval_samples_per_second": 3.093, |
|
"eval_steps_per_second": 0.773, |
|
"num_input_tokens_seen": 7488000, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.3888745815091424, |
|
"grad_norm": 3.511127189174474, |
|
"learning_rate": 9.21223287233121e-05, |
|
"loss": 0.0426, |
|
"num_input_tokens_seen": 7537920, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.3914499098635076, |
|
"grad_norm": 2.858952603531337, |
|
"learning_rate": 9.199082286279622e-05, |
|
"loss": 0.0094, |
|
"num_input_tokens_seen": 7587840, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.3940252382178728, |
|
"grad_norm": 1.9568584051828664, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 0.008, |
|
"num_input_tokens_seen": 7637760, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.39660056657223797, |
|
"grad_norm": 1.7485145115139782, |
|
"learning_rate": 9.172483500792244e-05, |
|
"loss": 0.0297, |
|
"num_input_tokens_seen": 7687680, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.39917589492660316, |
|
"grad_norm": 0.05161945290023201, |
|
"learning_rate": 9.159035930421658e-05, |
|
"loss": 0.0239, |
|
"num_input_tokens_seen": 7737600, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.40175122328096835, |
|
"grad_norm": 5.343080934932771, |
|
"learning_rate": 9.145489998237902e-05, |
|
"loss": 0.0162, |
|
"num_input_tokens_seen": 7787520, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.4043265516353335, |
|
"grad_norm": 0.020924688334471742, |
|
"learning_rate": 9.131846024604274e-05, |
|
"loss": 0.0309, |
|
"num_input_tokens_seen": 7837440, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.4069018799896987, |
|
"grad_norm": 0.5607043400469908, |
|
"learning_rate": 9.11810433220276e-05, |
|
"loss": 0.0432, |
|
"num_input_tokens_seen": 7887360, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.40947720834406387, |
|
"grad_norm": 0.05221845282486231, |
|
"learning_rate": 9.104265246026415e-05, |
|
"loss": 0.0022, |
|
"num_input_tokens_seen": 7937280, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.41205253669842906, |
|
"grad_norm": 0.09825330413354653, |
|
"learning_rate": 9.090329093371666e-05, |
|
"loss": 0.0065, |
|
"num_input_tokens_seen": 7987200, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.41205253669842906, |
|
"eval_loss": 0.006797688081860542, |
|
"eval_runtime": 19.3386, |
|
"eval_samples_per_second": 3.103, |
|
"eval_steps_per_second": 0.776, |
|
"num_input_tokens_seen": 7987200, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.41462786505279425, |
|
"grad_norm": 0.016984614729716928, |
|
"learning_rate": 9.076296203830579e-05, |
|
"loss": 0.0025, |
|
"num_input_tokens_seen": 8037120, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.4172031934071594, |
|
"grad_norm": 8.87156169406028, |
|
"learning_rate": 9.062166909283062e-05, |
|
"loss": 0.0338, |
|
"num_input_tokens_seen": 8087040, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.4197785217615246, |
|
"grad_norm": 0.07123492673307173, |
|
"learning_rate": 9.047941543889014e-05, |
|
"loss": 0.0505, |
|
"num_input_tokens_seen": 8136960, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.4223538501158898, |
|
"grad_norm": 0.3486611879887143, |
|
"learning_rate": 9.033620444080428e-05, |
|
"loss": 0.0335, |
|
"num_input_tokens_seen": 8186880, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.42492917847025496, |
|
"grad_norm": 0.2658900406741178, |
|
"learning_rate": 9.019203948553422e-05, |
|
"loss": 0.0196, |
|
"num_input_tokens_seen": 8236800, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.42750450682462016, |
|
"grad_norm": 0.15327530188348007, |
|
"learning_rate": 9.004692398260244e-05, |
|
"loss": 0.018, |
|
"num_input_tokens_seen": 8286720, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.43007983517898535, |
|
"grad_norm": 5.292901102191953, |
|
"learning_rate": 8.9900861364012e-05, |
|
"loss": 0.012, |
|
"num_input_tokens_seen": 8336640, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.4326551635333505, |
|
"grad_norm": 3.2113529029102375, |
|
"learning_rate": 8.975385508416532e-05, |
|
"loss": 0.0062, |
|
"num_input_tokens_seen": 8386560, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.4352304918877157, |
|
"grad_norm": 0.3685880212421972, |
|
"learning_rate": 8.960590861978265e-05, |
|
"loss": 0.0232, |
|
"num_input_tokens_seen": 8436480, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.43780582024208087, |
|
"grad_norm": 1.4858234531184304, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 0.0306, |
|
"num_input_tokens_seen": 8486400, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.43780582024208087, |
|
"eval_loss": 0.007198736071586609, |
|
"eval_runtime": 19.7019, |
|
"eval_samples_per_second": 3.045, |
|
"eval_steps_per_second": 0.761, |
|
"num_input_tokens_seen": 8486400, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.44038114859644606, |
|
"grad_norm": 0.22602261311014887, |
|
"learning_rate": 8.930720915538487e-05, |
|
"loss": 0.0255, |
|
"num_input_tokens_seen": 8536320, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.44295647695081125, |
|
"grad_norm": 0.1515497727795517, |
|
"learning_rate": 8.915646321965614e-05, |
|
"loss": 0.0267, |
|
"num_input_tokens_seen": 8586240, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.4455318053051764, |
|
"grad_norm": 0.039053785243136956, |
|
"learning_rate": 8.900479122779712e-05, |
|
"loss": 0.0107, |
|
"num_input_tokens_seen": 8636160, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.4481071336595416, |
|
"grad_norm": 0.42882717357803835, |
|
"learning_rate": 8.885219676687277e-05, |
|
"loss": 0.0277, |
|
"num_input_tokens_seen": 8686080, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.45068246201390677, |
|
"grad_norm": 5.519940082555776, |
|
"learning_rate": 8.869868344576459e-05, |
|
"loss": 0.0175, |
|
"num_input_tokens_seen": 8736000, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.45325779036827196, |
|
"grad_norm": 0.15983443395533375, |
|
"learning_rate": 8.854425489508532e-05, |
|
"loss": 0.0065, |
|
"num_input_tokens_seen": 8785920, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.45583311872263715, |
|
"grad_norm": 0.42885948370195226, |
|
"learning_rate": 8.838891476709288e-05, |
|
"loss": 0.0476, |
|
"num_input_tokens_seen": 8835840, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.4584084470770023, |
|
"grad_norm": 0.18209352469474613, |
|
"learning_rate": 8.823266673560426e-05, |
|
"loss": 0.018, |
|
"num_input_tokens_seen": 8885760, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.4609837754313675, |
|
"grad_norm": 1.7895291298260116, |
|
"learning_rate": 8.807551449590846e-05, |
|
"loss": 0.0337, |
|
"num_input_tokens_seen": 8935680, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.4635591037857327, |
|
"grad_norm": 0.11579050055375595, |
|
"learning_rate": 8.791746176467907e-05, |
|
"loss": 0.0063, |
|
"num_input_tokens_seen": 8985600, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.4635591037857327, |
|
"eval_loss": 0.010749292559921741, |
|
"eval_runtime": 19.2918, |
|
"eval_samples_per_second": 3.11, |
|
"eval_steps_per_second": 0.778, |
|
"num_input_tokens_seen": 8985600, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.46613443214009787, |
|
"grad_norm": 2.090931980208863, |
|
"learning_rate": 8.775851227988656e-05, |
|
"loss": 0.0318, |
|
"num_input_tokens_seen": 9035520, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.46870976049446306, |
|
"grad_norm": 1.7373295104389102, |
|
"learning_rate": 8.759866980070963e-05, |
|
"loss": 0.0635, |
|
"num_input_tokens_seen": 9085440, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.47128508884882825, |
|
"grad_norm": 0.1557668148789241, |
|
"learning_rate": 8.743793810744654e-05, |
|
"loss": 0.0035, |
|
"num_input_tokens_seen": 9135360, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.4738604172031934, |
|
"grad_norm": 0.5241949871459053, |
|
"learning_rate": 8.727632100142551e-05, |
|
"loss": 0.0047, |
|
"num_input_tokens_seen": 9185280, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.4764357455575586, |
|
"grad_norm": 2.329131853129593, |
|
"learning_rate": 8.711382230491493e-05, |
|
"loss": 0.0194, |
|
"num_input_tokens_seen": 9235200, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.47901107391192377, |
|
"grad_norm": 1.0158880737206768, |
|
"learning_rate": 8.695044586103296e-05, |
|
"loss": 0.0234, |
|
"num_input_tokens_seen": 9285120, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.48158640226628896, |
|
"grad_norm": 0.016729230547496875, |
|
"learning_rate": 8.678619553365659e-05, |
|
"loss": 0.0253, |
|
"num_input_tokens_seen": 9335040, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.48416173062065415, |
|
"grad_norm": 2.937987778998638, |
|
"learning_rate": 8.662107520733027e-05, |
|
"loss": 0.0191, |
|
"num_input_tokens_seen": 9384960, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.4867370589750193, |
|
"grad_norm": 0.20640046184009653, |
|
"learning_rate": 8.64550887871741e-05, |
|
"loss": 0.0469, |
|
"num_input_tokens_seen": 9434880, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.4893123873293845, |
|
"grad_norm": 1.9952606902760353, |
|
"learning_rate": 8.628824019879137e-05, |
|
"loss": 0.0415, |
|
"num_input_tokens_seen": 9484800, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.4893123873293845, |
|
"eval_loss": 0.007216573692858219, |
|
"eval_runtime": 19.2716, |
|
"eval_samples_per_second": 3.113, |
|
"eval_steps_per_second": 0.778, |
|
"num_input_tokens_seen": 9484800, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.49188771568374967, |
|
"grad_norm": 3.7704976412587468, |
|
"learning_rate": 8.612053338817581e-05, |
|
"loss": 0.0393, |
|
"num_input_tokens_seen": 9534720, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.49446304403811486, |
|
"grad_norm": 0.2205817340051924, |
|
"learning_rate": 8.595197232161824e-05, |
|
"loss": 0.0102, |
|
"num_input_tokens_seen": 9584640, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.49703837239248005, |
|
"grad_norm": 1.9646554268692766, |
|
"learning_rate": 8.578256098561275e-05, |
|
"loss": 0.0087, |
|
"num_input_tokens_seen": 9634560, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.49961370074684525, |
|
"grad_norm": 2.08637430640156, |
|
"learning_rate": 8.561230338676239e-05, |
|
"loss": 0.0097, |
|
"num_input_tokens_seen": 9684480, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.5021890291012104, |
|
"grad_norm": 0.018874732349478156, |
|
"learning_rate": 8.544120355168451e-05, |
|
"loss": 0.0013, |
|
"num_input_tokens_seen": 9734400, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.5047643574555756, |
|
"grad_norm": 0.007747713318690538, |
|
"learning_rate": 8.526926552691544e-05, |
|
"loss": 0.0268, |
|
"num_input_tokens_seen": 9784320, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.5073396858099408, |
|
"grad_norm": 4.2765032076143585, |
|
"learning_rate": 8.509649337881483e-05, |
|
"loss": 0.0418, |
|
"num_input_tokens_seen": 9834240, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.509915014164306, |
|
"grad_norm": 0.016725809588179666, |
|
"learning_rate": 8.492289119346943e-05, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 9884160, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.5124903425186711, |
|
"grad_norm": 1.5189362175317798, |
|
"learning_rate": 8.474846307659658e-05, |
|
"loss": 0.0058, |
|
"num_input_tokens_seen": 9934080, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.5150656708730363, |
|
"grad_norm": 0.5873213891971129, |
|
"learning_rate": 8.457321315344694e-05, |
|
"loss": 0.0547, |
|
"num_input_tokens_seen": 9984000, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5150656708730363, |
|
"eval_loss": 0.0007357922149822116, |
|
"eval_runtime": 19.1915, |
|
"eval_samples_per_second": 3.126, |
|
"eval_steps_per_second": 0.782, |
|
"num_input_tokens_seen": 9984000, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5176409992274015, |
|
"grad_norm": 0.06909777020752475, |
|
"learning_rate": 8.439714556870704e-05, |
|
"loss": 0.0042, |
|
"num_input_tokens_seen": 10033920, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.5202163275817667, |
|
"grad_norm": 0.15306455110846962, |
|
"learning_rate": 8.422026448640124e-05, |
|
"loss": 0.0419, |
|
"num_input_tokens_seen": 10083840, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.5227916559361319, |
|
"grad_norm": 0.06864588320363613, |
|
"learning_rate": 8.40425740897932e-05, |
|
"loss": 0.0313, |
|
"num_input_tokens_seen": 10133760, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.525366984290497, |
|
"grad_norm": 0.041539288845711536, |
|
"learning_rate": 8.386407858128706e-05, |
|
"loss": 0.005, |
|
"num_input_tokens_seen": 10183680, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.5279423126448622, |
|
"grad_norm": 3.866462743446183, |
|
"learning_rate": 8.368478218232787e-05, |
|
"loss": 0.0171, |
|
"num_input_tokens_seen": 10233600, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.5305176409992274, |
|
"grad_norm": 0.27036285469370147, |
|
"learning_rate": 8.350468913330192e-05, |
|
"loss": 0.0019, |
|
"num_input_tokens_seen": 10283520, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.5330929693535926, |
|
"grad_norm": 0.12025617159059553, |
|
"learning_rate": 8.33238036934364e-05, |
|
"loss": 0.0256, |
|
"num_input_tokens_seen": 10333440, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.5356682977079578, |
|
"grad_norm": 0.013500323699360073, |
|
"learning_rate": 8.31421301406986e-05, |
|
"loss": 0.0019, |
|
"num_input_tokens_seen": 10383360, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.5382436260623229, |
|
"grad_norm": 0.10118623585384687, |
|
"learning_rate": 8.29596727716949e-05, |
|
"loss": 0.018, |
|
"num_input_tokens_seen": 10433280, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.5408189544166881, |
|
"grad_norm": 0.23565575572231393, |
|
"learning_rate": 8.277643590156894e-05, |
|
"loss": 0.0007, |
|
"num_input_tokens_seen": 10483200, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.5408189544166881, |
|
"eval_loss": 0.05677948147058487, |
|
"eval_runtime": 19.0806, |
|
"eval_samples_per_second": 3.145, |
|
"eval_steps_per_second": 0.786, |
|
"num_input_tokens_seen": 10483200, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.5433942827710533, |
|
"grad_norm": 6.026032426572499, |
|
"learning_rate": 8.259242386389973e-05, |
|
"loss": 0.0436, |
|
"num_input_tokens_seen": 10533120, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.5459696111254185, |
|
"grad_norm": 9.168641384066685, |
|
"learning_rate": 8.240764101059912e-05, |
|
"loss": 0.0124, |
|
"num_input_tokens_seen": 10583040, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.5485449394797837, |
|
"grad_norm": 0.590720691746965, |
|
"learning_rate": 8.222209171180883e-05, |
|
"loss": 0.0155, |
|
"num_input_tokens_seen": 10632960, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.5511202678341488, |
|
"grad_norm": 8.790606959622112, |
|
"learning_rate": 8.203578035579715e-05, |
|
"loss": 0.0206, |
|
"num_input_tokens_seen": 10682880, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.553695596188514, |
|
"grad_norm": 0.11045279468612182, |
|
"learning_rate": 8.184871134885513e-05, |
|
"loss": 0.0237, |
|
"num_input_tokens_seen": 10732800, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.5562709245428792, |
|
"grad_norm": 0.004997644768678168, |
|
"learning_rate": 8.166088911519235e-05, |
|
"loss": 0.0018, |
|
"num_input_tokens_seen": 10782720, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.5588462528972444, |
|
"grad_norm": 2.249874392718823, |
|
"learning_rate": 8.147231809683236e-05, |
|
"loss": 0.0296, |
|
"num_input_tokens_seen": 10832640, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 0.5614215812516096, |
|
"grad_norm": 0.12969877743086014, |
|
"learning_rate": 8.128300275350756e-05, |
|
"loss": 0.0006, |
|
"num_input_tokens_seen": 10882560, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.5639969096059748, |
|
"grad_norm": 0.01592203313456207, |
|
"learning_rate": 8.109294756255375e-05, |
|
"loss": 0.0004, |
|
"num_input_tokens_seen": 10932480, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 0.56657223796034, |
|
"grad_norm": 0.048956329384631035, |
|
"learning_rate": 8.090215701880419e-05, |
|
"loss": 0.0056, |
|
"num_input_tokens_seen": 10982400, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.56657223796034, |
|
"eval_loss": 0.0003656313638202846, |
|
"eval_runtime": 19.3051, |
|
"eval_samples_per_second": 3.108, |
|
"eval_steps_per_second": 0.777, |
|
"num_input_tokens_seen": 10982400, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.5691475663147051, |
|
"grad_norm": 0.020540774848059624, |
|
"learning_rate": 8.07106356344834e-05, |
|
"loss": 0.0005, |
|
"num_input_tokens_seen": 11032320, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 0.5717228946690703, |
|
"grad_norm": 0.049646985711380834, |
|
"learning_rate": 8.051838793910038e-05, |
|
"loss": 0.0008, |
|
"num_input_tokens_seen": 11082240, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.5742982230234355, |
|
"grad_norm": 0.02410302194894019, |
|
"learning_rate": 8.032541847934146e-05, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 11132160, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 0.5768735513778007, |
|
"grad_norm": 0.03404095257940449, |
|
"learning_rate": 8.013173181896283e-05, |
|
"loss": 0.0219, |
|
"num_input_tokens_seen": 11182080, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.5794488797321659, |
|
"grad_norm": 0.0005611895912019751, |
|
"learning_rate": 7.993733253868256e-05, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 11232000, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.582024208086531, |
|
"grad_norm": 0.03678453984585236, |
|
"learning_rate": 7.974222523607236e-05, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 11281920, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.5845995364408962, |
|
"grad_norm": 0.002820815423322981, |
|
"learning_rate": 7.954641452544865e-05, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 11331840, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 0.5871748647952614, |
|
"grad_norm": 0.003003123169716893, |
|
"learning_rate": 7.934990503776363e-05, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 11381760, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.5897501931496266, |
|
"grad_norm": 0.020018776153065302, |
|
"learning_rate": 7.915270142049566e-05, |
|
"loss": 0.0001, |
|
"num_input_tokens_seen": 11431680, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 0.5923255215039918, |
|
"grad_norm": 0.0005407295497242103, |
|
"learning_rate": 7.89548083375394e-05, |
|
"loss": 0.0127, |
|
"num_input_tokens_seen": 11481600, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.5923255215039918, |
|
"eval_loss": 1.5566551155643538e-05, |
|
"eval_runtime": 19.3425, |
|
"eval_samples_per_second": 3.102, |
|
"eval_steps_per_second": 0.775, |
|
"num_input_tokens_seen": 11481600, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.5949008498583569, |
|
"grad_norm": 0.00046149068264744525, |
|
"learning_rate": 7.875623046909544e-05, |
|
"loss": 0.0003, |
|
"num_input_tokens_seen": 11531520, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 0.5974761782127221, |
|
"grad_norm": 0.0007891686440962277, |
|
"learning_rate": 7.855697251155967e-05, |
|
"loss": 0.0, |
|
"num_input_tokens_seen": 11581440, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.6000515065670873, |
|
"grad_norm": 6.055667685356653, |
|
"learning_rate": 7.835703917741212e-05, |
|
"loss": 0.0414, |
|
"num_input_tokens_seen": 11631360, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 0.6026268349214525, |
|
"grad_norm": 3.9367530795344696, |
|
"learning_rate": 7.81564351951057e-05, |
|
"loss": 0.02, |
|
"num_input_tokens_seen": 11681280, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.6052021632758177, |
|
"grad_norm": 0.014148361508830113, |
|
"learning_rate": 7.795516530895414e-05, |
|
"loss": 0.0002, |
|
"num_input_tokens_seen": 11731200, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.6077774916301828, |
|
"grad_norm": 0.12219412030414617, |
|
"learning_rate": 7.775323427901993e-05, |
|
"loss": 0.0026, |
|
"num_input_tokens_seen": 11781120, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.610352819984548, |
|
"grad_norm": 5.108798582105785, |
|
"learning_rate": 7.755064688100171e-05, |
|
"loss": 0.0168, |
|
"num_input_tokens_seen": 11831040, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 0.6129281483389132, |
|
"grad_norm": 0.004932933139070266, |
|
"learning_rate": 7.734740790612136e-05, |
|
"loss": 0.0037, |
|
"num_input_tokens_seen": 11880960, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.6155034766932784, |
|
"grad_norm": 0.022109282268939556, |
|
"learning_rate": 7.714352216101055e-05, |
|
"loss": 0.0007, |
|
"num_input_tokens_seen": 11930880, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 0.6180788050476436, |
|
"grad_norm": 0.037426371340479145, |
|
"learning_rate": 7.693899446759727e-05, |
|
"loss": 0.0038, |
|
"num_input_tokens_seen": 11980800, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.6180788050476436, |
|
"eval_loss": 0.0022160401567816734, |
|
"eval_runtime": 18.725, |
|
"eval_samples_per_second": 3.204, |
|
"eval_steps_per_second": 0.801, |
|
"num_input_tokens_seen": 11980800, |
|
"step": 1200 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 1200, |
|
"num_input_tokens_seen": 11990784, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 791107965943808.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|