|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9994869163673679, |
|
"eval_steps": 500, |
|
"global_step": 974, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"grad_norm": 0.41478540297292715, |
|
"learning_rate": 2.040816326530612e-06, |
|
"loss": 1.0591, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.5358244986842327, |
|
"learning_rate": 1.0204081632653061e-05, |
|
"loss": 0.9762, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 0.48168298126198883, |
|
"learning_rate": 2.0408163265306123e-05, |
|
"loss": 1.0246, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.23532241564777356, |
|
"learning_rate": 3.061224489795919e-05, |
|
"loss": 1.0114, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 0.2679701566666925, |
|
"learning_rate": 4.0816326530612245e-05, |
|
"loss": 0.9971, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.1583325918997123, |
|
"learning_rate": 5.102040816326531e-05, |
|
"loss": 0.9355, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 0.1840547641781333, |
|
"learning_rate": 6.122448979591838e-05, |
|
"loss": 0.943, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.239483711202957, |
|
"learning_rate": 7.142857142857143e-05, |
|
"loss": 0.8764, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 0.15385235740129172, |
|
"learning_rate": 8.163265306122449e-05, |
|
"loss": 0.8824, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.13852135103025864, |
|
"learning_rate": 9.183673469387756e-05, |
|
"loss": 0.8615, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 0.13036655403253458, |
|
"learning_rate": 0.00010204081632653062, |
|
"loss": 0.9202, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.138300443551494, |
|
"learning_rate": 0.00011224489795918367, |
|
"loss": 0.8849, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 0.12389157364724535, |
|
"learning_rate": 0.00012244897959183676, |
|
"loss": 0.862, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.1429134980556313, |
|
"learning_rate": 0.0001326530612244898, |
|
"loss": 0.8463, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 0.14541426965897125, |
|
"learning_rate": 0.00014285714285714287, |
|
"loss": 0.8289, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.15560066242946408, |
|
"learning_rate": 0.0001530612244897959, |
|
"loss": 0.8285, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 0.1382713303784162, |
|
"learning_rate": 0.00016326530612244898, |
|
"loss": 0.8267, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.1367658702482613, |
|
"learning_rate": 0.00017346938775510205, |
|
"loss": 0.8875, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"grad_norm": 0.15684837460586795, |
|
"learning_rate": 0.00018367346938775512, |
|
"loss": 0.8758, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.13232147433779046, |
|
"learning_rate": 0.00019387755102040816, |
|
"loss": 0.8457, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.12839583748095243, |
|
"learning_rate": 0.0001999974277115551, |
|
"loss": 0.8492, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.1321027185844607, |
|
"learning_rate": 0.00019996849098629418, |
|
"loss": 0.787, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 0.13562231873522493, |
|
"learning_rate": 0.00019990741151022301, |
|
"loss": 0.8034, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.1872551577847746, |
|
"learning_rate": 0.0001998142089221534, |
|
"loss": 0.8897, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 0.11987471778945524, |
|
"learning_rate": 0.0001996889131894033, |
|
"loss": 0.8548, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.13758980449343652, |
|
"learning_rate": 0.00019953156459816179, |
|
"loss": 0.8126, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 0.12651848093312767, |
|
"learning_rate": 0.0001993422137405354, |
|
"loss": 0.8606, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.14411919368392379, |
|
"learning_rate": 0.00019912092149828174, |
|
"loss": 0.8858, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 0.150512988037676, |
|
"learning_rate": 0.00019886775902323405, |
|
"loss": 0.8593, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.15943254043396862, |
|
"learning_rate": 0.00019858280771442385, |
|
"loss": 0.8577, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 0.1934721467912684, |
|
"learning_rate": 0.00019826615919190887, |
|
"loss": 0.8628, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.1232771853573132, |
|
"learning_rate": 0.00019791791526731445, |
|
"loss": 0.84, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 0.14669498826369695, |
|
"learning_rate": 0.00019753818791109828, |
|
"loss": 0.8185, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.14085085759954724, |
|
"learning_rate": 0.0001971270992165486, |
|
"loss": 0.7795, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"grad_norm": 0.1335331389930546, |
|
"learning_rate": 0.00019668478136052774, |
|
"loss": 0.8159, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.14301329004691762, |
|
"learning_rate": 0.0001962113765609735, |
|
"loss": 0.8008, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.1349013012428684, |
|
"learning_rate": 0.0001957070370311717, |
|
"loss": 0.8523, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.15345671624450827, |
|
"learning_rate": 0.00019517192493081565, |
|
"loss": 0.827, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 0.13105575744786446, |
|
"learning_rate": 0.00019460621231386676, |
|
"loss": 0.8844, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.14047662719322554, |
|
"learning_rate": 0.00019401008107323455, |
|
"loss": 0.8593, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.16716795882975244, |
|
"learning_rate": 0.0001933837228822925, |
|
"loss": 0.8105, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 0.14011283820384335, |
|
"learning_rate": 0.0001927273391332499, |
|
"loss": 0.8575, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.13652402697672666, |
|
"learning_rate": 0.00019204114087239806, |
|
"loss": 0.8615, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 0.1366384894487137, |
|
"learning_rate": 0.00019132534873225323, |
|
"loss": 0.8373, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.1418034030520275, |
|
"learning_rate": 0.00019058019286061665, |
|
"loss": 0.8335, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 0.12973981150967331, |
|
"learning_rate": 0.00018980591284657535, |
|
"loss": 0.8393, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.14098955432684662, |
|
"learning_rate": 0.00018900275764346768, |
|
"loss": 0.8324, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 0.1483857739019612, |
|
"learning_rate": 0.0001881709854888372, |
|
"loss": 0.8224, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.1207564764356093, |
|
"learning_rate": 0.00018731086382140226, |
|
"loss": 0.8115, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 0.147034450761397, |
|
"learning_rate": 0.00018642266919506644, |
|
"loss": 0.8473, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.16090713319886477, |
|
"learning_rate": 0.00018550668718999872, |
|
"loss": 0.7954, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 0.12088332198568695, |
|
"learning_rate": 0.0001845632123208111, |
|
"loss": 0.8553, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.13281843244805125, |
|
"learning_rate": 0.0001835925479418637, |
|
"loss": 0.8602, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"grad_norm": 0.11395640254482223, |
|
"learning_rate": 0.0001825950061497276, |
|
"loss": 0.799, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.18338212518218974, |
|
"learning_rate": 0.00018157090768283678, |
|
"loss": 0.818, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 0.14005425053437323, |
|
"learning_rate": 0.00018052058181836151, |
|
"loss": 0.8221, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.15372527780279158, |
|
"learning_rate": 0.00017944436626633623, |
|
"loss": 0.8417, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 0.14434072633220638, |
|
"learning_rate": 0.00017834260706107595, |
|
"loss": 0.8108, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.12627644118944992, |
|
"learning_rate": 0.00017721565844991643, |
|
"loss": 0.8068, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 0.14772289738958688, |
|
"learning_rate": 0.00017606388277931328, |
|
"loss": 0.8744, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.13984571980575156, |
|
"learning_rate": 0.0001748876503783373, |
|
"loss": 0.835, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 0.13153373295044746, |
|
"learning_rate": 0.00017368733943960276, |
|
"loss": 0.8383, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.1291787835295784, |
|
"learning_rate": 0.00017246333589766787, |
|
"loss": 0.8532, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 0.1313754669175255, |
|
"learning_rate": 0.00017121603330494544, |
|
"loss": 0.8638, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.14687623249066953, |
|
"learning_rate": 0.0001699458327051647, |
|
"loss": 0.8385, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 0.14850598828123243, |
|
"learning_rate": 0.00016865314250442398, |
|
"loss": 0.8169, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.14865425552332714, |
|
"learning_rate": 0.00016733837833987633, |
|
"loss": 0.8129, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 0.14327447625468243, |
|
"learning_rate": 0.00016600196294609045, |
|
"loss": 0.8112, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.1422668164659587, |
|
"learning_rate": 0.00016464432601912912, |
|
"loss": 0.8306, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"grad_norm": 0.12679383458857735, |
|
"learning_rate": 0.0001632659040783897, |
|
"loss": 0.8476, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.13298721576358818, |
|
"learning_rate": 0.00016186714032625035, |
|
"loss": 0.8527, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 0.15009708989503034, |
|
"learning_rate": 0.00016044848450556787, |
|
"loss": 0.819, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.1535277202050242, |
|
"learning_rate": 0.00015901039275507245, |
|
"loss": 0.8305, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 0.13563353212881551, |
|
"learning_rate": 0.00015755332746270572, |
|
"loss": 0.8104, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.14208140877531353, |
|
"learning_rate": 0.00015607775711694977, |
|
"loss": 0.8381, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 0.14084759655058215, |
|
"learning_rate": 0.00015458415615619484, |
|
"loss": 0.8186, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 0.1202290236098903, |
|
"learning_rate": 0.00015307300481619333, |
|
"loss": 0.8578, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.1427466626848493, |
|
"learning_rate": 0.00015154478897565045, |
|
"loss": 0.8816, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.11439109448818731, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.8142, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.1402674024104851, |
|
"learning_rate": 0.00014843913458341645, |
|
"loss": 0.8011, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 0.15497764154867305, |
|
"learning_rate": 0.00014686269458911332, |
|
"loss": 0.8179, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.14725740205615323, |
|
"learning_rate": 0.00014527118688797963, |
|
"loss": 0.9044, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"grad_norm": 0.1441807780130091, |
|
"learning_rate": 0.0001436651231956064, |
|
"loss": 0.8295, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.14314161398501463, |
|
"learning_rate": 0.00014204501990775533, |
|
"loss": 0.8471, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 0.12813797598238083, |
|
"learning_rate": 0.00014041139793432274, |
|
"loss": 0.8262, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.14355138774702908, |
|
"learning_rate": 0.00013876478253185183, |
|
"loss": 0.8544, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 0.14065230597830594, |
|
"learning_rate": 0.00013710570313464778, |
|
"loss": 0.8273, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.1502376541192836, |
|
"learning_rate": 0.0001354346931845492, |
|
"loss": 0.8629, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"grad_norm": 0.11612404557295304, |
|
"learning_rate": 0.00013375228995941133, |
|
"loss": 0.8474, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.12394764980545889, |
|
"learning_rate": 0.0001320590344003557, |
|
"loss": 0.8398, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 0.14219342684795552, |
|
"learning_rate": 0.00013035547093784186, |
|
"loss": 0.8386, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.13096131144311657, |
|
"learning_rate": 0.00012864214731661742, |
|
"loss": 0.8175, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 0.143251617809686, |
|
"learning_rate": 0.00012691961441960238, |
|
"loss": 0.8085, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.12498048690896839, |
|
"learning_rate": 0.00012518842609076413, |
|
"loss": 0.8259, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 0.14752810775417383, |
|
"learning_rate": 0.00012344913895704097, |
|
"loss": 0.8157, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.13076168345663483, |
|
"learning_rate": 0.00012170231224937032, |
|
"loss": 0.8102, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 0.14100703225317754, |
|
"learning_rate": 0.00011994850762287989, |
|
"loss": 0.8308, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.1406207969502619, |
|
"learning_rate": 0.0001181882889762994, |
|
"loss": 0.8538, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 0.12811621773423584, |
|
"learning_rate": 0.00011642222227065089, |
|
"loss": 0.8096, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.16240280156036038, |
|
"learning_rate": 0.00011465087534727587, |
|
"loss": 0.893, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 0.15744615260545805, |
|
"learning_rate": 0.0001128748177452581, |
|
"loss": 0.8271, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.13453736557228535, |
|
"learning_rate": 0.00011109462051830017, |
|
"loss": 0.8932, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 0.13488475951319145, |
|
"learning_rate": 0.00010931085605111354, |
|
"loss": 0.805, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.13615523867504728, |
|
"learning_rate": 0.00010752409787538, |
|
"loss": 0.8358, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"grad_norm": 0.11608735125900543, |
|
"learning_rate": 0.00010573492048534515, |
|
"loss": 0.7792, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.12529530307366807, |
|
"learning_rate": 0.00010394389915310149, |
|
"loss": 0.7858, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 0.1477196524951594, |
|
"learning_rate": 0.00010215160974362223, |
|
"loss": 0.8949, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.13478280062013237, |
|
"learning_rate": 0.00010035862852960387, |
|
"loss": 0.8675, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"grad_norm": 0.18751542446376887, |
|
"learning_rate": 9.856553200617805e-05, |
|
"loss": 0.8002, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.13384075191607198, |
|
"learning_rate": 9.677289670555169e-05, |
|
"loss": 0.8342, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 0.12830956280090938, |
|
"learning_rate": 9.49812990116353e-05, |
|
"loss": 0.8542, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.13139317688343669, |
|
"learning_rate": 9.319131497471894e-05, |
|
"loss": 0.857, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 0.11825939494831025, |
|
"learning_rate": 9.140352012625537e-05, |
|
"loss": 0.8286, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.1319552841228723, |
|
"learning_rate": 8.961848929381026e-05, |
|
"loss": 0.8513, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"grad_norm": 0.13846831656086162, |
|
"learning_rate": 8.783679641623845e-05, |
|
"loss": 0.8575, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 0.1565132050678128, |
|
"learning_rate": 8.605901435914607e-05, |
|
"loss": 0.8095, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.1349070576898933, |
|
"learning_rate": 8.428571473069775e-05, |
|
"loss": 0.8364, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.1478930655811248, |
|
"learning_rate": 8.25174676978282e-05, |
|
"loss": 0.7913, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.14739768176324797, |
|
"learning_rate": 8.075484180291701e-05, |
|
"loss": 0.8095, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 0.13061570270501272, |
|
"learning_rate": 7.899840378098588e-05, |
|
"loss": 0.8456, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.1400435628778491, |
|
"learning_rate": 7.724871837747707e-05, |
|
"loss": 0.854, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 0.14619473905537722, |
|
"learning_rate": 7.550634816667142e-05, |
|
"loss": 0.7878, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.1669686546083547, |
|
"learning_rate": 7.377185337080442e-05, |
|
"loss": 0.8293, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"grad_norm": 0.15161889018158878, |
|
"learning_rate": 7.204579167993881e-05, |
|
"loss": 0.8327, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.11802481456975004, |
|
"learning_rate": 7.032871807265096e-05, |
|
"loss": 0.8549, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.1428032348359374, |
|
"learning_rate": 6.862118463758943e-05, |
|
"loss": 0.8113, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.12867903817761486, |
|
"learning_rate": 6.69237403959624e-05, |
|
"loss": 0.8219, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 0.14771825500860036, |
|
"learning_rate": 6.52369311250116e-05, |
|
"loss": 0.8042, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.13258568806230905, |
|
"learning_rate": 6.356129918252927e-05, |
|
"loss": 0.8156, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 0.14732793338159728, |
|
"learning_rate": 6.189738333247432e-05, |
|
"loss": 0.8121, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.15966142035007472, |
|
"learning_rate": 6.024571857174443e-05, |
|
"loss": 0.8595, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 0.13684066043985474, |
|
"learning_rate": 5.860683595815893e-05, |
|
"loss": 0.8109, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.12490641045647621, |
|
"learning_rate": 5.698126243970845e-05, |
|
"loss": 0.8125, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 0.13460863241729326, |
|
"learning_rate": 5.536952068512608e-05, |
|
"loss": 0.7947, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.13010809900192385, |
|
"learning_rate": 5.3772128915834184e-05, |
|
"loss": 0.9054, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 0.12951084999951157, |
|
"learning_rate": 5.218960073932122e-05, |
|
"loss": 0.8647, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.12953418083912346, |
|
"learning_rate": 5.062244498400228e-05, |
|
"loss": 0.8474, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 0.13349564170084294, |
|
"learning_rate": 4.907116553561607e-05, |
|
"loss": 0.8127, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.11698335940312711, |
|
"learning_rate": 4.753626117521103e-05, |
|
"loss": 0.7862, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"grad_norm": 0.1417412770863787, |
|
"learning_rate": 4.601822541877291e-05, |
|
"loss": 0.7952, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.1490752355077661, |
|
"learning_rate": 4.451754635854517e-05, |
|
"loss": 0.8458, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 0.1331101949433864, |
|
"learning_rate": 4.303470650609325e-05, |
|
"loss": 0.8044, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.13611270992724214, |
|
"learning_rate": 4.1570182637163155e-05, |
|
"loss": 0.8855, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"grad_norm": 0.13998690568497368, |
|
"learning_rate": 4.0124445638384366e-05, |
|
"loss": 0.7553, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.14235394415159117, |
|
"learning_rate": 3.869796035586625e-05, |
|
"loss": 0.8048, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 0.13460415814884583, |
|
"learning_rate": 3.7291185445736444e-05, |
|
"loss": 0.808, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.13186297670947147, |
|
"learning_rate": 3.590457322666997e-05, |
|
"loss": 0.8512, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 0.13821443417948492, |
|
"learning_rate": 3.453856953445557e-05, |
|
"loss": 0.8445, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.11419625295174221, |
|
"learning_rate": 3.319361357864663e-05, |
|
"loss": 0.8169, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 0.12406541104741345, |
|
"learning_rate": 3.187013780134291e-05, |
|
"loss": 0.8428, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.1286809514714421, |
|
"learning_rate": 3.05685677381475e-05, |
|
"loss": 0.85, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 0.1445484766618607, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.8106, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"grad_norm": 0.14262528146944597, |
|
"learning_rate": 2.8032811545345294e-05, |
|
"loss": 0.8609, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.1769607359536476, |
|
"learning_rate": 2.679944073443158e-05, |
|
"loss": 0.8045, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 0.14280821635903052, |
|
"learning_rate": 2.5589606012863963e-05, |
|
"loss": 0.866, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.1429520090865704, |
|
"learning_rate": 2.4403696377371142e-05, |
|
"loss": 0.8667, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.13554177395410102, |
|
"learning_rate": 2.324209313207736e-05, |
|
"loss": 0.8577, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.1270863540245396, |
|
"learning_rate": 2.210516976590179e-05, |
|
"loss": 0.839, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 0.15157612066040596, |
|
"learning_rate": 2.099329183247126e-05, |
|
"loss": 0.8193, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.13563544257174368, |
|
"learning_rate": 1.9906816832584253e-05, |
|
"loss": 0.7841, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 0.1438162068777128, |
|
"learning_rate": 1.8846094099263912e-05, |
|
"loss": 0.8317, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.14098563226328448, |
|
"learning_rate": 1.781146468543765e-05, |
|
"loss": 0.7682, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"grad_norm": 0.12904056725011795, |
|
"learning_rate": 1.6803261254278636e-05, |
|
"loss": 0.8184, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.1577908132849086, |
|
"learning_rate": 1.582180797224507e-05, |
|
"loss": 0.8208, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 0.1448578052047199, |
|
"learning_rate": 1.4867420404851307e-05, |
|
"loss": 0.828, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.11696975769424958, |
|
"learning_rate": 1.3940405415204416e-05, |
|
"loss": 0.8405, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 0.12780753501046968, |
|
"learning_rate": 1.30410610653389e-05, |
|
"loss": 0.8647, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.1401641424372156, |
|
"learning_rate": 1.2169676520381168e-05, |
|
"loss": 0.8346, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 0.1507027680390559, |
|
"learning_rate": 1.1326531955574526e-05, |
|
"loss": 0.8532, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.13417895219140907, |
|
"learning_rate": 1.0511898466194903e-05, |
|
"loss": 0.8092, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 0.127954613918298, |
|
"learning_rate": 9.726037980385738e-06, |
|
"loss": 0.7773, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.1366880841170061, |
|
"learning_rate": 8.969203174940654e-06, |
|
"loss": 0.8018, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 0.11768872682875378, |
|
"learning_rate": 8.24163739406062e-06, |
|
"loss": 0.8483, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.1419611383110849, |
|
"learning_rate": 7.543574571111655e-06, |
|
"loss": 0.844, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 0.14220531250615795, |
|
"learning_rate": 6.875239153408542e-06, |
|
"loss": 0.7975, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.1566277092103407, |
|
"learning_rate": 6.236846030048604e-06, |
|
"loss": 0.8063, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.1268170098289612, |
|
"learning_rate": 5.6286004628186675e-06, |
|
"loss": 0.8484, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.13964640969590117, |
|
"learning_rate": 5.0506980201973974e-06, |
|
"loss": 0.8284, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"grad_norm": 0.14682575127314904, |
|
"learning_rate": 4.503324514474483e-06, |
|
"loss": 0.8706, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.13996242963540317, |
|
"learning_rate": 3.986655942006579e-06, |
|
"loss": 0.8293, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 0.16776823076764466, |
|
"learning_rate": 3.5008584266294386e-06, |
|
"loss": 0.8063, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.1510152152640099, |
|
"learning_rate": 3.0460881662442763e-06, |
|
"loss": 0.8718, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 0.1366248738676145, |
|
"learning_rate": 2.622491382595693e-06, |
|
"loss": 0.8484, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.1404750930152043, |
|
"learning_rate": 2.2302042742571193e-06, |
|
"loss": 0.8313, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"grad_norm": 0.1270118228269091, |
|
"learning_rate": 1.869352972839067e-06, |
|
"loss": 0.8117, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.16190747206009376, |
|
"learning_rate": 1.5400535024342022e-06, |
|
"loss": 0.8031, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 0.1529977790553585, |
|
"learning_rate": 1.2424117423122328e-06, |
|
"loss": 0.8316, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.12937443064788462, |
|
"learning_rate": 9.765233928766493e-07, |
|
"loss": 0.7992, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 0.1449373612680855, |
|
"learning_rate": 7.42473944894384e-07, |
|
"loss": 0.8107, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.12996651495057476, |
|
"learning_rate": 5.403386520079323e-07, |
|
"loss": 0.7778, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 0.12904475182773067, |
|
"learning_rate": 3.701825065392184e-07, |
|
"loss": 0.8515, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 0.12597952420128744, |
|
"learning_rate": 2.320602185927001e-07, |
|
"loss": 0.7737, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.1376254333598378, |
|
"learning_rate": 1.2601619846444035e-07, |
|
"loss": 0.8671, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 0.13114870010510168, |
|
"learning_rate": 5.208454236296234e-08, |
|
"loss": 0.8289, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.14712841990455128, |
|
"learning_rate": 1.0289021446308056e-08, |
|
"loss": 0.8719, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": NaN, |
|
"eval_runtime": 1840.0201, |
|
"eval_samples_per_second": 3.767, |
|
"eval_steps_per_second": 0.942, |
|
"step": 974 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 974, |
|
"total_flos": 1.052863723732992e+16, |
|
"train_loss": 0.838743479834445, |
|
"train_runtime": 18938.875, |
|
"train_samples_per_second": 3.293, |
|
"train_steps_per_second": 0.051 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 974, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 1.052863723732992e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|