|
{ |
|
"best_metric": 0.872858618136751, |
|
"best_model_checkpoint": "D:/00_DATA/02_ELTE/MODELS/hupunct-v02f-01\\checkpoint-371200", |
|
"epoch": 0.887830125951332, |
|
"global_step": 371200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.9731, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.3913, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.999999717021846e-05, |
|
"loss": 0.3215, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.999997453197575e-05, |
|
"loss": 0.2949, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"eval_accuracy": 0.9126170744127641, |
|
"eval_f1": 0.8145480413876998, |
|
"eval_loss": 0.2609182596206665, |
|
"eval_precision": 0.8187897831637073, |
|
"eval_recall": 0.8103500218088356, |
|
"eval_runtime": 534.8153, |
|
"eval_samples_per_second": 31.695, |
|
"eval_steps_per_second": 3.962, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1.9999929255541574e-05, |
|
"loss": 0.2828, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9999861341018434e-05, |
|
"loss": 0.2728, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9999770788560077e-05, |
|
"loss": 0.266, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9999657598371494e-05, |
|
"loss": 0.2623, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"eval_accuracy": 0.9194747488767328, |
|
"eval_f1": 0.8286546809878433, |
|
"eval_loss": 0.23863454163074493, |
|
"eval_precision": 0.8377330317376491, |
|
"eval_recall": 0.8197709815005051, |
|
"eval_runtime": 522.5618, |
|
"eval_samples_per_second": 32.438, |
|
"eval_steps_per_second": 4.055, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9999521770708934e-05, |
|
"loss": 0.2648, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9999363305879883e-05, |
|
"loss": 0.2549, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9999182204243077e-05, |
|
"loss": 0.2499, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9998978466208495e-05, |
|
"loss": 0.2476, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"eval_accuracy": 0.921358067877135, |
|
"eval_f1": 0.8341903407191237, |
|
"eval_loss": 0.22921068966388702, |
|
"eval_precision": 0.8304616814064032, |
|
"eval_recall": 0.8379526333811813, |
|
"eval_runtime": 506.5545, |
|
"eval_samples_per_second": 33.463, |
|
"eval_steps_per_second": 4.183, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9998752092237376e-05, |
|
"loss": 0.2524, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9998503082842177e-05, |
|
"loss": 0.2478, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.9998231438586622e-05, |
|
"loss": 0.2421, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9997937160085664e-05, |
|
"loss": 0.241, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"eval_accuracy": 0.9234379527885445, |
|
"eval_f1": 0.8365269322200962, |
|
"eval_loss": 0.22358419001102448, |
|
"eval_precision": 0.8486267645192161, |
|
"eval_recall": 0.8247672914340725, |
|
"eval_runtime": 514.0292, |
|
"eval_samples_per_second": 32.977, |
|
"eval_steps_per_second": 4.122, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.999762024800549e-05, |
|
"loss": 0.2421, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9997280703063548e-05, |
|
"loss": 0.2372, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.999691852602849e-05, |
|
"loss": 0.2372, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9996533717720243e-05, |
|
"loss": 0.2386, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"eval_accuracy": 0.9254024865212617, |
|
"eval_f1": 0.8414120516235785, |
|
"eval_loss": 0.21748805046081543, |
|
"eval_precision": 0.8474481429336846, |
|
"eval_recall": 0.8354613383365032, |
|
"eval_runtime": 516.1733, |
|
"eval_samples_per_second": 32.84, |
|
"eval_steps_per_second": 4.105, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9996126279009926e-05, |
|
"loss": 0.2394, |
|
"step": 8400 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9995696210819922e-05, |
|
"loss": 0.238, |
|
"step": 8800 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9995243514123824e-05, |
|
"loss": 0.2349, |
|
"step": 9200 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9994768189946457e-05, |
|
"loss": 0.2349, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"eval_accuracy": 0.9247767767129416, |
|
"eval_f1": 0.8413244775987313, |
|
"eval_loss": 0.2197960615158081, |
|
"eval_precision": 0.8403090006181837, |
|
"eval_recall": 0.8423424118685799, |
|
"eval_runtime": 529.8447, |
|
"eval_samples_per_second": 31.992, |
|
"eval_steps_per_second": 3.999, |
|
"step": 9600 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.9994270239363875e-05, |
|
"loss": 0.2274, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.999374966350335e-05, |
|
"loss": 0.23, |
|
"step": 10400 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9993206463543375e-05, |
|
"loss": 0.229, |
|
"step": 10800 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.999264064071366e-05, |
|
"loss": 0.2333, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_accuracy": 0.9262532718002005, |
|
"eval_f1": 0.8427277359860608, |
|
"eval_loss": 0.21289820969104767, |
|
"eval_precision": 0.8513247864743565, |
|
"eval_recall": 0.8343025831608348, |
|
"eval_runtime": 528.0366, |
|
"eval_samples_per_second": 32.102, |
|
"eval_steps_per_second": 4.013, |
|
"step": 11200 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9992052196295125e-05, |
|
"loss": 0.2319, |
|
"step": 11600 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.999144113161991e-05, |
|
"loss": 0.2332, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9990807448071357e-05, |
|
"loss": 0.2303, |
|
"step": 12400 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.999015114708401e-05, |
|
"loss": 0.2286, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_accuracy": 0.9269069284794558, |
|
"eval_f1": 0.8437310209280239, |
|
"eval_loss": 0.21160121262073517, |
|
"eval_precision": 0.8538120476568147, |
|
"eval_recall": 0.8338852712327451, |
|
"eval_runtime": 521.5959, |
|
"eval_samples_per_second": 32.498, |
|
"eval_steps_per_second": 4.063, |
|
"step": 12800 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9989472230143628e-05, |
|
"loss": 0.2309, |
|
"step": 13200 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9988770698787157e-05, |
|
"loss": 0.2327, |
|
"step": 13600 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9988046554602735e-05, |
|
"loss": 0.2299, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 1.9987299799229706e-05, |
|
"loss": 0.2295, |
|
"step": 14400 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"eval_accuracy": 0.9272647984777396, |
|
"eval_f1": 0.8454534656124277, |
|
"eval_loss": 0.21050414443016052, |
|
"eval_precision": 0.848673467095452, |
|
"eval_recall": 0.842257806162611, |
|
"eval_runtime": 495.85, |
|
"eval_samples_per_second": 34.186, |
|
"eval_steps_per_second": 4.273, |
|
"step": 14400 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.9986530434358588e-05, |
|
"loss": 0.2241, |
|
"step": 14800 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.998573846173109e-05, |
|
"loss": 0.2297, |
|
"step": 15200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.99849238831401e-05, |
|
"loss": 0.2201, |
|
"step": 15600 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.9984086700429683e-05, |
|
"loss": 0.2256, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_accuracy": 0.9279364611946443, |
|
"eval_f1": 0.8463979985331287, |
|
"eval_loss": 0.20822134613990784, |
|
"eval_precision": 0.8561796543632718, |
|
"eval_recall": 0.8368373243788484, |
|
"eval_runtime": 497.8701, |
|
"eval_samples_per_second": 34.047, |
|
"eval_steps_per_second": 4.256, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.998322691549507e-05, |
|
"loss": 0.2271, |
|
"step": 16400 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.9982344530282666e-05, |
|
"loss": 0.2217, |
|
"step": 16800 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.998143954679004e-05, |
|
"loss": 0.2204, |
|
"step": 17200 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.9980511967065905e-05, |
|
"loss": 0.2238, |
|
"step": 17600 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_accuracy": 0.9277969144028607, |
|
"eval_f1": 0.8467242502560018, |
|
"eval_loss": 0.20991012454032898, |
|
"eval_precision": 0.8475703430285909, |
|
"eval_recall": 0.8458798450343596, |
|
"eval_runtime": 498.3421, |
|
"eval_samples_per_second": 34.015, |
|
"eval_steps_per_second": 4.252, |
|
"step": 17600 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.997956179321015e-05, |
|
"loss": 0.2275, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.99785890273738e-05, |
|
"loss": 0.2226, |
|
"step": 18400 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.9977593671759022e-05, |
|
"loss": 0.2251, |
|
"step": 18800 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9976575728619133e-05, |
|
"loss": 0.2264, |
|
"step": 19200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"eval_accuracy": 0.928742794068136, |
|
"eval_f1": 0.8487050269322584, |
|
"eval_loss": 0.20485791563987732, |
|
"eval_precision": 0.8557066460107723, |
|
"eval_recall": 0.8418170561673273, |
|
"eval_runtime": 497.9006, |
|
"eval_samples_per_second": 34.045, |
|
"eval_steps_per_second": 4.256, |
|
"step": 19200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9975535200258573e-05, |
|
"loss": 0.2231, |
|
"step": 19600 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.997447208903292e-05, |
|
"loss": 0.2219, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9973386397348867e-05, |
|
"loss": 0.2215, |
|
"step": 20400 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9972278127664235e-05, |
|
"loss": 0.2234, |
|
"step": 20800 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"eval_accuracy": 0.92921413961619, |
|
"eval_f1": 0.8492843483019629, |
|
"eval_loss": 0.20633253455162048, |
|
"eval_precision": 0.855414881500537, |
|
"eval_recall": 0.8432410616644115, |
|
"eval_runtime": 527.3621, |
|
"eval_samples_per_second": 32.143, |
|
"eval_steps_per_second": 4.018, |
|
"step": 20800 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.997114728248795e-05, |
|
"loss": 0.2223, |
|
"step": 21200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9969993864380048e-05, |
|
"loss": 0.2165, |
|
"step": 21600 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9968817875951666e-05, |
|
"loss": 0.2204, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9967619319865032e-05, |
|
"loss": 0.2218, |
|
"step": 22400 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"eval_accuracy": 0.9294052662033238, |
|
"eval_f1": 0.8486742459996486, |
|
"eval_loss": 0.2052079290151596, |
|
"eval_precision": 0.8625646753774255, |
|
"eval_recall": 0.835224099363685, |
|
"eval_runtime": 527.1534, |
|
"eval_samples_per_second": 32.156, |
|
"eval_steps_per_second": 4.02, |
|
"step": 22400 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 1.9966398198833473e-05, |
|
"loss": 0.2218, |
|
"step": 22800 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.996515451562139e-05, |
|
"loss": 0.22, |
|
"step": 23200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9963888273044266e-05, |
|
"loss": 0.2238, |
|
"step": 23600 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9962599473968648e-05, |
|
"loss": 0.2201, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_accuracy": 0.9294566584357817, |
|
"eval_f1": 0.8486989268171238, |
|
"eval_loss": 0.20412935316562653, |
|
"eval_precision": 0.8638502628204141, |
|
"eval_recall": 0.8340699174694205, |
|
"eval_runtime": 530.3778, |
|
"eval_samples_per_second": 31.96, |
|
"eval_steps_per_second": 3.995, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9961288121312156e-05, |
|
"loss": 0.2199, |
|
"step": 24400 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.995995421804346e-05, |
|
"loss": 0.2109, |
|
"step": 24800 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9958597767182288e-05, |
|
"loss": 0.2182, |
|
"step": 25200 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.99572187717994e-05, |
|
"loss": 0.2145, |
|
"step": 25600 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"eval_accuracy": 0.9299360691881997, |
|
"eval_f1": 0.8503947108030143, |
|
"eval_loss": 0.20322902500629425, |
|
"eval_precision": 0.8596300224752702, |
|
"eval_recall": 0.8413557264057266, |
|
"eval_runtime": 528.0331, |
|
"eval_samples_per_second": 32.102, |
|
"eval_steps_per_second": 4.013, |
|
"step": 25600 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.995581723501661e-05, |
|
"loss": 0.2168, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.995439316000674e-05, |
|
"loss": 0.2143, |
|
"step": 26400 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 1.9952946549993654e-05, |
|
"loss": 0.215, |
|
"step": 26800 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.9951477408252226e-05, |
|
"loss": 0.2191, |
|
"step": 27200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_accuracy": 0.9298897411538306, |
|
"eval_f1": 0.8502200955614806, |
|
"eval_loss": 0.2037987858057022, |
|
"eval_precision": 0.8568045001480312, |
|
"eval_recall": 0.8437361193763645, |
|
"eval_runtime": 534.3649, |
|
"eval_samples_per_second": 31.722, |
|
"eval_steps_per_second": 3.965, |
|
"step": 27200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.994998573810833e-05, |
|
"loss": 0.2124, |
|
"step": 27600 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.9948471542938844e-05, |
|
"loss": 0.2206, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.994693482617165e-05, |
|
"loss": 0.2181, |
|
"step": 28400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.9945375591285598e-05, |
|
"loss": 0.2166, |
|
"step": 28800 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_accuracy": 0.9298203428837232, |
|
"eval_f1": 0.8498029100498555, |
|
"eval_loss": 0.2016349732875824, |
|
"eval_precision": 0.8582691443207665, |
|
"eval_recall": 0.8415020714106458, |
|
"eval_runtime": 528.1147, |
|
"eval_samples_per_second": 32.097, |
|
"eval_steps_per_second": 4.012, |
|
"step": 28800 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.994379384181053e-05, |
|
"loss": 0.219, |
|
"step": 29200 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.994218958132724e-05, |
|
"loss": 0.2143, |
|
"step": 29600 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.9940562813467498e-05, |
|
"loss": 0.21, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.9938913541914023e-05, |
|
"loss": 0.2181, |
|
"step": 30400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_accuracy": 0.9298070259183783, |
|
"eval_f1": 0.8510556795525572, |
|
"eval_loss": 0.20200121402740479, |
|
"eval_precision": 0.8524384543694888, |
|
"eval_recall": 0.8496773835799761, |
|
"eval_runtime": 541.0606, |
|
"eval_samples_per_second": 31.329, |
|
"eval_steps_per_second": 3.916, |
|
"step": 30400 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.9937241770400475e-05, |
|
"loss": 0.2101, |
|
"step": 30800 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 1.993554750271145e-05, |
|
"loss": 0.2156, |
|
"step": 31200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.993383074268248e-05, |
|
"loss": 0.2135, |
|
"step": 31600 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9932091494200005e-05, |
|
"loss": 0.2162, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.9304222322047362, |
|
"eval_f1": 0.8521463432829397, |
|
"eval_loss": 0.20058637857437134, |
|
"eval_precision": 0.8554234912632299, |
|
"eval_recall": 0.8488942091395886, |
|
"eval_runtime": 531.2526, |
|
"eval_samples_per_second": 31.908, |
|
"eval_steps_per_second": 3.989, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9930329761201378e-05, |
|
"loss": 0.2166, |
|
"step": 32400 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9928545547674855e-05, |
|
"loss": 0.2184, |
|
"step": 32800 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9926738857659583e-05, |
|
"loss": 0.2129, |
|
"step": 33200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9924909695245593e-05, |
|
"loss": 0.2132, |
|
"step": 33600 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.9308982668250948, |
|
"eval_f1": 0.8532510553958745, |
|
"eval_loss": 0.19807742536067963, |
|
"eval_precision": 0.8564849081088927, |
|
"eval_recall": 0.8500415311117475, |
|
"eval_runtime": 513.6824, |
|
"eval_samples_per_second": 32.999, |
|
"eval_steps_per_second": 4.125, |
|
"step": 33600 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9923058064573785e-05, |
|
"loss": 0.2153, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9921183969835935e-05, |
|
"loss": 0.2182, |
|
"step": 34400 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9919287415274652e-05, |
|
"loss": 0.215, |
|
"step": 34800 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 1.9917368405183417e-05, |
|
"loss": 0.2122, |
|
"step": 35200 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"eval_accuracy": 0.9304693104906739, |
|
"eval_f1": 0.8513594685871199, |
|
"eval_loss": 0.20116887986660004, |
|
"eval_precision": 0.8675550803002179, |
|
"eval_recall": 0.8357574583073942, |
|
"eval_runtime": 528.6969, |
|
"eval_samples_per_second": 32.062, |
|
"eval_steps_per_second": 4.008, |
|
"step": 35200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9915426943906523e-05, |
|
"loss": 0.2107, |
|
"step": 35600 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9913463035839103e-05, |
|
"loss": 0.2173, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9911476685427103e-05, |
|
"loss": 0.2162, |
|
"step": 36400 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9909467897167268e-05, |
|
"loss": 0.2144, |
|
"step": 36800 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_accuracy": 0.9311981798896942, |
|
"eval_f1": 0.8535460038652544, |
|
"eval_loss": 0.19797766208648682, |
|
"eval_precision": 0.8594919936426753, |
|
"eval_recall": 0.8476817179074265, |
|
"eval_runtime": 527.1896, |
|
"eval_samples_per_second": 32.154, |
|
"eval_steps_per_second": 4.019, |
|
"step": 36800 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9907436675607143e-05, |
|
"loss": 0.2098, |
|
"step": 37200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9905383025345063e-05, |
|
"loss": 0.2103, |
|
"step": 37600 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9903306951030127e-05, |
|
"loss": 0.2132, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9901208457362206e-05, |
|
"loss": 0.2108, |
|
"step": 38400 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_accuracy": 0.9314845884260566, |
|
"eval_f1": 0.8536932048458404, |
|
"eval_loss": 0.1978066861629486, |
|
"eval_precision": 0.8647678400612767, |
|
"eval_recall": 0.8428986372193077, |
|
"eval_runtime": 511.1837, |
|
"eval_samples_per_second": 33.16, |
|
"eval_steps_per_second": 4.145, |
|
"step": 38400 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9899087549091925e-05, |
|
"loss": 0.2121, |
|
"step": 38800 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9896944231020643e-05, |
|
"loss": 0.2157, |
|
"step": 39200 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.989477850800046e-05, |
|
"loss": 0.206, |
|
"step": 39600 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9892590384934197e-05, |
|
"loss": 0.2135, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_accuracy": 0.9315256646994445, |
|
"eval_f1": 0.854083963556381, |
|
"eval_loss": 0.19659361243247986, |
|
"eval_precision": 0.861560967665005, |
|
"eval_recall": 0.8467356203170313, |
|
"eval_runtime": 525.1972, |
|
"eval_samples_per_second": 32.275, |
|
"eval_steps_per_second": 4.035, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9890379866775375e-05, |
|
"loss": 0.2087, |
|
"step": 40400 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9888146958528226e-05, |
|
"loss": 0.2121, |
|
"step": 40800 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.988589166524766e-05, |
|
"loss": 0.2137, |
|
"step": 41200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9883613992039265e-05, |
|
"loss": 0.2124, |
|
"step": 41600 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_accuracy": 0.9315911241488162, |
|
"eval_f1": 0.8541205712583507, |
|
"eval_loss": 0.1993984431028366, |
|
"eval_precision": 0.8652020353679193, |
|
"eval_recall": 0.8433193791084502, |
|
"eval_runtime": 506.5743, |
|
"eval_samples_per_second": 33.462, |
|
"eval_steps_per_second": 4.183, |
|
"step": 41600 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.98813139440593e-05, |
|
"loss": 0.2098, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9878991526514663e-05, |
|
"loss": 0.2093, |
|
"step": 42400 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9876646744662905e-05, |
|
"loss": 0.2053, |
|
"step": 42800 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.9874279603812207e-05, |
|
"loss": 0.2149, |
|
"step": 43200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"eval_accuracy": 0.9317447381575135, |
|
"eval_f1": 0.854901810239421, |
|
"eval_loss": 0.19594933092594147, |
|
"eval_precision": 0.8568641650447913, |
|
"eval_recall": 0.852948423103989, |
|
"eval_runtime": 529.9283, |
|
"eval_samples_per_second": 31.987, |
|
"eval_steps_per_second": 3.999, |
|
"step": 43200 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 1.987189010932135e-05, |
|
"loss": 0.2131, |
|
"step": 43600 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.986947826659974e-05, |
|
"loss": 0.2121, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.986704408110736e-05, |
|
"loss": 0.2114, |
|
"step": 44400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9864587558354785e-05, |
|
"loss": 0.214, |
|
"step": 44800 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_accuracy": 0.9318758446191489, |
|
"eval_f1": 0.8547824642975755, |
|
"eval_loss": 0.19624784588813782, |
|
"eval_precision": 0.8622172747052573, |
|
"eval_recall": 0.847474776923908, |
|
"eval_runtime": 521.3801, |
|
"eval_samples_per_second": 32.512, |
|
"eval_steps_per_second": 4.064, |
|
"step": 44800 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9862108703903152e-05, |
|
"loss": 0.2041, |
|
"step": 45200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9859607523364156e-05, |
|
"loss": 0.2152, |
|
"step": 45600 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9857084022400023e-05, |
|
"loss": 0.2052, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9854538206723522e-05, |
|
"loss": 0.2121, |
|
"step": 46400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_accuracy": 0.9320234566152963, |
|
"eval_f1": 0.8549453521269698, |
|
"eval_loss": 0.19660033285617828, |
|
"eval_precision": 0.8624304031903434, |
|
"eval_recall": 0.8475891089590011, |
|
"eval_runtime": 558.1559, |
|
"eval_samples_per_second": 30.37, |
|
"eval_steps_per_second": 3.796, |
|
"step": 46400 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9851970082097937e-05, |
|
"loss": 0.2093, |
|
"step": 46800 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9849379654337047e-05, |
|
"loss": 0.2125, |
|
"step": 47200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9846766929305134e-05, |
|
"loss": 0.2094, |
|
"step": 47600 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9844131912916942e-05, |
|
"loss": 0.2047, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_accuracy": 0.9315588633313608, |
|
"eval_f1": 0.854936316200278, |
|
"eval_loss": 0.19534911215305328, |
|
"eval_precision": 0.8586442077488341, |
|
"eval_recall": 0.8512603106058397, |
|
"eval_runtime": 531.9624, |
|
"eval_samples_per_second": 31.865, |
|
"eval_steps_per_second": 3.983, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.984147461113769e-05, |
|
"loss": 0.2095, |
|
"step": 48400 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9838795029983045e-05, |
|
"loss": 0.2077, |
|
"step": 48800 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.983609317551911e-05, |
|
"loss": 0.2098, |
|
"step": 49200 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9833369053862413e-05, |
|
"loss": 0.2067, |
|
"step": 49600 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.9323271209377394, |
|
"eval_f1": 0.8558910139962379, |
|
"eval_loss": 0.1953110247850418, |
|
"eval_precision": 0.8618274362928947, |
|
"eval_recall": 0.8500358145099929, |
|
"eval_runtime": 522.8678, |
|
"eval_samples_per_second": 32.419, |
|
"eval_steps_per_second": 4.053, |
|
"step": 49600 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9830622671179878e-05, |
|
"loss": 0.2026, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9827854033688846e-05, |
|
"loss": 0.2081, |
|
"step": 50400 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9825063147657023e-05, |
|
"loss": 0.2113, |
|
"step": 50800 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9822250019402484e-05, |
|
"loss": 0.2122, |
|
"step": 51200 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"eval_accuracy": 0.9314024358792807, |
|
"eval_f1": 0.8545864448960637, |
|
"eval_loss": 0.1963811069726944, |
|
"eval_precision": 0.8560482342096463, |
|
"eval_recall": 0.8531296393796115, |
|
"eval_runtime": 560.9821, |
|
"eval_samples_per_second": 30.217, |
|
"eval_steps_per_second": 3.777, |
|
"step": 51200 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9819414655293664e-05, |
|
"loss": 0.2077, |
|
"step": 51600 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.9816557061749324e-05, |
|
"loss": 0.2078, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9813677245238563e-05, |
|
"loss": 0.2082, |
|
"step": 52400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9810775212280777e-05, |
|
"loss": 0.2089, |
|
"step": 52800 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_accuracy": 0.932371948468971, |
|
"eval_f1": 0.8555021770451893, |
|
"eval_loss": 0.19524754583835602, |
|
"eval_precision": 0.8634036344497207, |
|
"eval_recall": 0.8477440288665522, |
|
"eval_runtime": 539.6367, |
|
"eval_samples_per_second": 31.412, |
|
"eval_steps_per_second": 3.927, |
|
"step": 52800 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.980785096944566e-05, |
|
"loss": 0.2044, |
|
"step": 53200 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.980490452335319e-05, |
|
"loss": 0.2103, |
|
"step": 53600 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.98019358806736e-05, |
|
"loss": 0.207, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.979894504812738e-05, |
|
"loss": 0.2082, |
|
"step": 54400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_accuracy": 0.9324216526353993, |
|
"eval_f1": 0.8556495566699869, |
|
"eval_loss": 0.19236618280410767, |
|
"eval_precision": 0.863101275835435, |
|
"eval_recall": 0.8483254072650005, |
|
"eval_runtime": 529.6871, |
|
"eval_samples_per_second": 32.002, |
|
"eval_steps_per_second": 4.0, |
|
"step": 54400 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.979593203248525e-05, |
|
"loss": 0.211, |
|
"step": 54800 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.979289684056815e-05, |
|
"loss": 0.2128, |
|
"step": 55200 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9789839479247225e-05, |
|
"loss": 0.2072, |
|
"step": 55600 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9786759955443803e-05, |
|
"loss": 0.2108, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"eval_accuracy": 0.9325281883581589, |
|
"eval_f1": 0.8558209667182756, |
|
"eval_loss": 0.19310402870178223, |
|
"eval_precision": 0.8646491603220899, |
|
"eval_recall": 0.847171225370736, |
|
"eval_runtime": 532.8149, |
|
"eval_samples_per_second": 31.814, |
|
"eval_steps_per_second": 3.977, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.978365827612939e-05, |
|
"loss": 0.2036, |
|
"step": 56400 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9780534448325643e-05, |
|
"loss": 0.2092, |
|
"step": 56800 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.977738847910436e-05, |
|
"loss": 0.2069, |
|
"step": 57200 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9774220375587466e-05, |
|
"loss": 0.2132, |
|
"step": 57600 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_accuracy": 0.9326324733262122, |
|
"eval_f1": 0.8566221682929204, |
|
"eval_loss": 0.19397494196891785, |
|
"eval_precision": 0.8605342615742637, |
|
"eval_recall": 0.8527454837416988, |
|
"eval_runtime": 520.0235, |
|
"eval_samples_per_second": 32.597, |
|
"eval_steps_per_second": 4.075, |
|
"step": 57600 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9771030144946995e-05, |
|
"loss": 0.2051, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9767817794405063e-05, |
|
"loss": 0.2047, |
|
"step": 58400 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9764583331233878e-05, |
|
"loss": 0.2064, |
|
"step": 58800 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9761326762755694e-05, |
|
"loss": 0.2054, |
|
"step": 59200 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"eval_accuracy": 0.9326658595210207, |
|
"eval_f1": 0.8568344024447003, |
|
"eval_loss": 0.1941342055797577, |
|
"eval_precision": 0.8607349951658717, |
|
"eval_recall": 0.8529690028703057, |
|
"eval_runtime": 535.718, |
|
"eval_samples_per_second": 31.642, |
|
"eval_steps_per_second": 3.955, |
|
"step": 59200 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.975804809634281e-05, |
|
"loss": 0.2069, |
|
"step": 59600 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9754747339417558e-05, |
|
"loss": 0.2057, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.9751424499452267e-05, |
|
"loss": 0.2044, |
|
"step": 60400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.974807958396927e-05, |
|
"loss": 0.2059, |
|
"step": 60800 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_accuracy": 0.9328301646145724, |
|
"eval_f1": 0.8566406503175108, |
|
"eval_loss": 0.19376616179943085, |
|
"eval_precision": 0.8633672116146144, |
|
"eval_recall": 0.8500180930445534, |
|
"eval_runtime": 536.8802, |
|
"eval_samples_per_second": 31.573, |
|
"eval_steps_per_second": 3.947, |
|
"step": 60800 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9744712600540868e-05, |
|
"loss": 0.2043, |
|
"step": 61200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9741323556789322e-05, |
|
"loss": 0.2084, |
|
"step": 61600 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9737912460386838e-05, |
|
"loss": 0.2027, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9734479319055534e-05, |
|
"loss": 0.2067, |
|
"step": 62400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_accuracy": 0.933122012474808, |
|
"eval_f1": 0.8577661832107555, |
|
"eval_loss": 0.19168977439403534, |
|
"eval_precision": 0.8620395800772849, |
|
"eval_recall": 0.8535349464440165, |
|
"eval_runtime": 534.4114, |
|
"eval_samples_per_second": 31.719, |
|
"eval_steps_per_second": 3.965, |
|
"step": 62400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9731024140567446e-05, |
|
"loss": 0.2073, |
|
"step": 62800 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.972754693274449e-05, |
|
"loss": 0.204, |
|
"step": 63200 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.972404770345846e-05, |
|
"loss": 0.2046, |
|
"step": 63600 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.972052646063099e-05, |
|
"loss": 0.199, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_accuracy": 0.9330196031356764, |
|
"eval_f1": 0.8569652491838428, |
|
"eval_loss": 0.1935248225927353, |
|
"eval_precision": 0.8657376402698408, |
|
"eval_recall": 0.8483688534383359, |
|
"eval_runtime": 524.3065, |
|
"eval_samples_per_second": 32.33, |
|
"eval_steps_per_second": 4.042, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9716983212233566e-05, |
|
"loss": 0.2049, |
|
"step": 64400 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 1.9713417966287478e-05, |
|
"loss": 0.206, |
|
"step": 64800 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.970983073086382e-05, |
|
"loss": 0.2094, |
|
"step": 65200 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.970622151408346e-05, |
|
"loss": 0.2013, |
|
"step": 65600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.9325128082009999, |
|
"eval_f1": 0.8558148238957365, |
|
"eval_loss": 0.19440777599811554, |
|
"eval_precision": 0.8647777929522669, |
|
"eval_recall": 0.8470357419091507, |
|
"eval_runtime": 522.3949, |
|
"eval_samples_per_second": 32.449, |
|
"eval_steps_per_second": 4.056, |
|
"step": 65600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.970259032411704e-05, |
|
"loss": 0.2075, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.969893716918494e-05, |
|
"loss": 0.2055, |
|
"step": 66400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.969526205755725e-05, |
|
"loss": 0.2067, |
|
"step": 66800 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9691564997553793e-05, |
|
"loss": 0.2065, |
|
"step": 67200 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.9328590492999684, |
|
"eval_f1": 0.8572791976520902, |
|
"eval_loss": 0.19182860851287842, |
|
"eval_precision": 0.8606402904681217, |
|
"eval_recall": 0.8539442551296497, |
|
"eval_runtime": 519.5077, |
|
"eval_samples_per_second": 32.629, |
|
"eval_steps_per_second": 4.079, |
|
"step": 67200 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.968784599754406e-05, |
|
"loss": 0.2009, |
|
"step": 67600 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9684105065947214e-05, |
|
"loss": 0.208, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9680342211232074e-05, |
|
"loss": 0.2064, |
|
"step": 68400 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 1.9676557441917078e-05, |
|
"loss": 0.2042, |
|
"step": 68800 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"eval_accuracy": 0.9334931994384367, |
|
"eval_f1": 0.8582636191226987, |
|
"eval_loss": 0.19114798307418823, |
|
"eval_precision": 0.8652401924816433, |
|
"eval_recall": 0.8513986523683024, |
|
"eval_runtime": 534.6796, |
|
"eval_samples_per_second": 31.703, |
|
"eval_steps_per_second": 3.963, |
|
"step": 68800 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9672750766570285e-05, |
|
"loss": 0.1997, |
|
"step": 69200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9668922193809342e-05, |
|
"loss": 0.2074, |
|
"step": 69600 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9665071732301467e-05, |
|
"loss": 0.2062, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.966119939076343e-05, |
|
"loss": 0.2061, |
|
"step": 70400 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_accuracy": 0.9335400901614823, |
|
"eval_f1": 0.8580545497594751, |
|
"eval_loss": 0.19060759246349335, |
|
"eval_precision": 0.8668960757166553, |
|
"eval_recall": 0.8493915534922434, |
|
"eval_runtime": 536.0524, |
|
"eval_samples_per_second": 31.622, |
|
"eval_steps_per_second": 3.953, |
|
"step": 70400 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9657305177961533e-05, |
|
"loss": 0.2061, |
|
"step": 70800 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9653389102711595e-05, |
|
"loss": 0.2009, |
|
"step": 71200 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.964945117387892e-05, |
|
"loss": 0.2056, |
|
"step": 71600 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9645491400378297e-05, |
|
"loss": 0.2004, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"eval_accuracy": 0.9332803155558098, |
|
"eval_f1": 0.856993119884046, |
|
"eval_loss": 0.19279062747955322, |
|
"eval_precision": 0.870760433009192, |
|
"eval_recall": 0.8436543719712729, |
|
"eval_runtime": 555.1185, |
|
"eval_samples_per_second": 30.536, |
|
"eval_steps_per_second": 3.817, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9641509791173953e-05, |
|
"loss": 0.2043, |
|
"step": 72400 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 1.9637506355279556e-05, |
|
"loss": 0.2026, |
|
"step": 72800 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9633481101758184e-05, |
|
"loss": 0.2087, |
|
"step": 73200 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9629434039722306e-05, |
|
"loss": 0.2035, |
|
"step": 73600 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_accuracy": 0.9337994896413704, |
|
"eval_f1": 0.8586298935722808, |
|
"eval_loss": 0.1900637298822403, |
|
"eval_precision": 0.8671817697358678, |
|
"eval_recall": 0.8502450421342133, |
|
"eval_runtime": 588.1093, |
|
"eval_samples_per_second": 28.823, |
|
"eval_steps_per_second": 3.603, |
|
"step": 73600 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9625365178333766e-05, |
|
"loss": 0.2007, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9621274526803746e-05, |
|
"loss": 0.2047, |
|
"step": 74400 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.961716209439277e-05, |
|
"loss": 0.2058, |
|
"step": 74800 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9613027890410664e-05, |
|
"loss": 0.2024, |
|
"step": 75200 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_accuracy": 0.9331368299432904, |
|
"eval_f1": 0.8577313380902346, |
|
"eval_loss": 0.19313408434391022, |
|
"eval_precision": 0.8586822319658123, |
|
"eval_recall": 0.8567825479008353, |
|
"eval_runtime": 560.4828, |
|
"eval_samples_per_second": 30.244, |
|
"eval_steps_per_second": 3.781, |
|
"step": 75200 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9608871924216543e-05, |
|
"loss": 0.2031, |
|
"step": 75600 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9604694205218786e-05, |
|
"loss": 0.2064, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9600494742875017e-05, |
|
"loss": 0.2087, |
|
"step": 76400 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9596273546692083e-05, |
|
"loss": 0.2081, |
|
"step": 76800 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_accuracy": 0.933596359029137, |
|
"eval_f1": 0.8587259463687285, |
|
"eval_loss": 0.18991322815418243, |
|
"eval_precision": 0.8629753346711713, |
|
"eval_recall": 0.854518201945817, |
|
"eval_runtime": 586.3522, |
|
"eval_samples_per_second": 28.909, |
|
"eval_steps_per_second": 3.614, |
|
"step": 76800 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.9592030626226027e-05, |
|
"loss": 0.2047, |
|
"step": 77200 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.9587765991082094e-05, |
|
"loss": 0.2071, |
|
"step": 77600 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.9583479650914658e-05, |
|
"loss": 0.2046, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.957917161542725e-05, |
|
"loss": 0.2066, |
|
"step": 78400 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"eval_accuracy": 0.9335881062618809, |
|
"eval_f1": 0.858251755880012, |
|
"eval_loss": 0.18930841982364655, |
|
"eval_precision": 0.8665965780515792, |
|
"eval_recall": 0.8500661124992925, |
|
"eval_runtime": 583.568, |
|
"eval_samples_per_second": 29.047, |
|
"eval_steps_per_second": 3.631, |
|
"step": 78400 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.9574841894372497e-05, |
|
"loss": 0.2065, |
|
"step": 78800 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.9570490497552145e-05, |
|
"loss": 0.2019, |
|
"step": 79200 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.956611743481698e-05, |
|
"loss": 0.2016, |
|
"step": 79600 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.956172271606686e-05, |
|
"loss": 0.206, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"eval_accuracy": 0.9338216220626478, |
|
"eval_f1": 0.8590100009373626, |
|
"eval_loss": 0.18893107771873474, |
|
"eval_precision": 0.8668623725466912, |
|
"eval_recall": 0.8512986118375959, |
|
"eval_runtime": 566.5189, |
|
"eval_samples_per_second": 29.921, |
|
"eval_steps_per_second": 3.74, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.9557306351250655e-05, |
|
"loss": 0.2043, |
|
"step": 80400 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.9552868350366244e-05, |
|
"loss": 0.2038, |
|
"step": 80800 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 1.954840872346048e-05, |
|
"loss": 0.2001, |
|
"step": 81200 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9543927480629183e-05, |
|
"loss": 0.2049, |
|
"step": 81600 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.9339795500178654, |
|
"eval_f1": 0.8589639865169605, |
|
"eval_loss": 0.18991310894489288, |
|
"eval_precision": 0.8678010738479005, |
|
"eval_recall": 0.8503050664526371, |
|
"eval_runtime": 557.0448, |
|
"eval_samples_per_second": 30.43, |
|
"eval_steps_per_second": 3.804, |
|
"step": 81600 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9539424632017097e-05, |
|
"loss": 0.2033, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9534900187817882e-05, |
|
"loss": 0.203, |
|
"step": 82400 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9530354158274092e-05, |
|
"loss": 0.2052, |
|
"step": 82800 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9525786553677144e-05, |
|
"loss": 0.1982, |
|
"step": 83200 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.9339099641848657, |
|
"eval_f1": 0.8592924732031696, |
|
"eval_loss": 0.1890699863433838, |
|
"eval_precision": 0.8662634588192373, |
|
"eval_recall": 0.8524327856257192, |
|
"eval_runtime": 539.2047, |
|
"eval_samples_per_second": 31.437, |
|
"eval_steps_per_second": 3.93, |
|
"step": 83200 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.952119738436729e-05, |
|
"loss": 0.2053, |
|
"step": 83600 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9516586660733604e-05, |
|
"loss": 0.1983, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.951195439321396e-05, |
|
"loss": 0.2066, |
|
"step": 84400 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.9507300592295e-05, |
|
"loss": 0.2064, |
|
"step": 84800 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_accuracy": 0.9341284749542581, |
|
"eval_f1": 0.8597960265243075, |
|
"eval_loss": 0.18801522254943848, |
|
"eval_precision": 0.864076778649965, |
|
"eval_recall": 0.8555574801448129, |
|
"eval_runtime": 541.7673, |
|
"eval_samples_per_second": 31.288, |
|
"eval_steps_per_second": 3.911, |
|
"step": 84800 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.950262526851212e-05, |
|
"loss": 0.2018, |
|
"step": 85200 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 1.949792843244943e-05, |
|
"loss": 0.2013, |
|
"step": 85600 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.949321009473974e-05, |
|
"loss": 0.2012, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9488470266064544e-05, |
|
"loss": 0.2024, |
|
"step": 86400 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_accuracy": 0.9340680797029755, |
|
"eval_f1": 0.8598979780117517, |
|
"eval_loss": 0.18994107842445374, |
|
"eval_precision": 0.8619611163413725, |
|
"eval_recall": 0.8578446925068499, |
|
"eval_runtime": 553.5054, |
|
"eval_samples_per_second": 30.625, |
|
"eval_steps_per_second": 3.828, |
|
"step": 86400 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.948370895715399e-05, |
|
"loss": 0.2021, |
|
"step": 86800 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9478926178786838e-05, |
|
"loss": 0.2012, |
|
"step": 87200 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9474121941790467e-05, |
|
"loss": 0.2017, |
|
"step": 87600 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9469296257040824e-05, |
|
"loss": 0.197, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_accuracy": 0.9335798534946249, |
|
"eval_f1": 0.8583250869787784, |
|
"eval_loss": 0.19300752878189087, |
|
"eval_precision": 0.8641208309240769, |
|
"eval_recall": 0.8526065703190607, |
|
"eval_runtime": 566.2208, |
|
"eval_samples_per_second": 29.937, |
|
"eval_steps_per_second": 3.742, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.946444913546242e-05, |
|
"loss": 0.1979, |
|
"step": 88400 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9459580588028288e-05, |
|
"loss": 0.2008, |
|
"step": 88800 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9454690625759964e-05, |
|
"loss": 0.2115, |
|
"step": 89200 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 1.9449779259727465e-05, |
|
"loss": 0.1971, |
|
"step": 89600 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"eval_accuracy": 0.9340952763223418, |
|
"eval_f1": 0.8593661543865001, |
|
"eval_loss": 0.1885276883840561, |
|
"eval_precision": 0.8685007738442398, |
|
"eval_recall": 0.8504216851284321, |
|
"eval_runtime": 572.9896, |
|
"eval_samples_per_second": 29.583, |
|
"eval_steps_per_second": 3.698, |
|
"step": 89600 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.944484650104927e-05, |
|
"loss": 0.2036, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.943989236089228e-05, |
|
"loss": 0.2018, |
|
"step": 90400 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9434916850471796e-05, |
|
"loss": 0.2009, |
|
"step": 90800 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9429919981051503e-05, |
|
"loss": 0.2005, |
|
"step": 91200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_accuracy": 0.9335445916708947, |
|
"eval_f1": 0.8592451467237712, |
|
"eval_loss": 0.19177058339118958, |
|
"eval_precision": 0.8576361421451625, |
|
"eval_recall": 0.8608601999324298, |
|
"eval_runtime": 558.7922, |
|
"eval_samples_per_second": 30.335, |
|
"eval_steps_per_second": 3.792, |
|
"step": 91200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9424901763943444e-05, |
|
"loss": 0.2029, |
|
"step": 91600 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.941986221050798e-05, |
|
"loss": 0.2, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9414801332153777e-05, |
|
"loss": 0.2016, |
|
"step": 92400 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.940971914033778e-05, |
|
"loss": 0.1998, |
|
"step": 92800 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_accuracy": 0.9339776743889435, |
|
"eval_f1": 0.8600518249254383, |
|
"eval_loss": 0.1904406100511551, |
|
"eval_precision": 0.8604969893824371, |
|
"eval_recall": 0.8596071208278097, |
|
"eval_runtime": 563.7074, |
|
"eval_samples_per_second": 30.071, |
|
"eval_steps_per_second": 3.759, |
|
"step": 92800 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.940461564656518e-05, |
|
"loss": 0.1965, |
|
"step": 93200 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9399490862389393e-05, |
|
"loss": 0.1996, |
|
"step": 93600 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9394344799412032e-05, |
|
"loss": 0.2021, |
|
"step": 94000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.9389177469282885e-05, |
|
"loss": 0.1994, |
|
"step": 94400 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"eval_accuracy": 0.9344293258333185, |
|
"eval_f1": 0.8603255176511329, |
|
"eval_loss": 0.18682879209518433, |
|
"eval_precision": 0.866478567477395, |
|
"eval_recall": 0.8542592398863311, |
|
"eval_runtime": 557.36, |
|
"eval_samples_per_second": 30.413, |
|
"eval_steps_per_second": 3.802, |
|
"step": 94400 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.9383988883699882e-05, |
|
"loss": 0.1987, |
|
"step": 94800 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.937877905440907e-05, |
|
"loss": 0.1985, |
|
"step": 95200 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.9373547993204595e-05, |
|
"loss": 0.2011, |
|
"step": 95600 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.9368295711928663e-05, |
|
"loss": 0.198, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"eval_accuracy": 0.9344143208019439, |
|
"eval_f1": 0.8607285431866443, |
|
"eval_loss": 0.1870521456003189, |
|
"eval_precision": 0.8654424742449163, |
|
"eval_recall": 0.8560656860408017, |
|
"eval_runtime": 571.9595, |
|
"eval_samples_per_second": 29.637, |
|
"eval_steps_per_second": 3.705, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.9363022222471514e-05, |
|
"loss": 0.1956, |
|
"step": 96400 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.935772753677141e-05, |
|
"loss": 0.2051, |
|
"step": 96800 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.9352411666814587e-05, |
|
"loss": 0.2025, |
|
"step": 97200 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.934707462463525e-05, |
|
"loss": 0.2013, |
|
"step": 97600 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"eval_accuracy": 0.9346378957694251, |
|
"eval_f1": 0.8605651474679509, |
|
"eval_loss": 0.18816794455051422, |
|
"eval_precision": 0.8697104592544158, |
|
"eval_recall": 0.8516101666332245, |
|
"eval_runtime": 586.058, |
|
"eval_samples_per_second": 28.924, |
|
"eval_steps_per_second": 3.616, |
|
"step": 97600 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 1.9341716422315522e-05, |
|
"loss": 0.2001, |
|
"step": 98000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.933633707198544e-05, |
|
"loss": 0.1973, |
|
"step": 98400 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.933093658582291e-05, |
|
"loss": 0.2038, |
|
"step": 98800 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.932551497605369e-05, |
|
"loss": 0.1986, |
|
"step": 99200 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.9345182306442129, |
|
"eval_f1": 0.8603678920111084, |
|
"eval_loss": 0.18777824938297272, |
|
"eval_precision": 0.8690284230703961, |
|
"eval_recall": 0.8518782752555178, |
|
"eval_runtime": 576.3642, |
|
"eval_samples_per_second": 29.41, |
|
"eval_steps_per_second": 3.676, |
|
"step": 99200 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.932007225495135e-05, |
|
"loss": 0.2055, |
|
"step": 99600 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.9314608434837257e-05, |
|
"loss": 0.2032, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.9309123528080547e-05, |
|
"loss": 0.1943, |
|
"step": 100400 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.9303617547098085e-05, |
|
"loss": 0.1981, |
|
"step": 100800 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.9347168597470339, |
|
"eval_f1": 0.8615201794813424, |
|
"eval_loss": 0.18754957616329193, |
|
"eval_precision": 0.8630764076066814, |
|
"eval_recall": 0.8599695533790547, |
|
"eval_runtime": 576.9225, |
|
"eval_samples_per_second": 29.382, |
|
"eval_steps_per_second": 3.673, |
|
"step": 100800 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.9298090504354456e-05, |
|
"loss": 0.2044, |
|
"step": 101200 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.9292542412361904e-05, |
|
"loss": 0.1962, |
|
"step": 101600 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.928697328368035e-05, |
|
"loss": 0.2001, |
|
"step": 102000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.9281383130917316e-05, |
|
"loss": 0.1989, |
|
"step": 102400 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"eval_accuracy": 0.9346658426403603, |
|
"eval_f1": 0.8603147718806256, |
|
"eval_loss": 0.18593807518482208, |
|
"eval_precision": 0.8706630441336685, |
|
"eval_recall": 0.8502095992033344, |
|
"eval_runtime": 592.5905, |
|
"eval_samples_per_second": 28.605, |
|
"eval_steps_per_second": 3.576, |
|
"step": 102400 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.9275771966727935e-05, |
|
"loss": 0.1961, |
|
"step": 102800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.9270139803814904e-05, |
|
"loss": 0.2001, |
|
"step": 103200 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.9264486654928446e-05, |
|
"loss": 0.1972, |
|
"step": 103600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.925881253286631e-05, |
|
"loss": 0.2021, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_accuracy": 0.9343708062109576, |
|
"eval_f1": 0.8604473028249575, |
|
"eval_loss": 0.18771718442440033, |
|
"eval_precision": 0.8620065474298526, |
|
"eval_recall": 0.8588936889288289, |
|
"eval_runtime": 589.4347, |
|
"eval_samples_per_second": 28.758, |
|
"eval_steps_per_second": 3.595, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.92531174504737e-05, |
|
"loss": 0.1999, |
|
"step": 104400 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.9247401420643298e-05, |
|
"loss": 0.1992, |
|
"step": 104800 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.924166445631519e-05, |
|
"loss": 0.2034, |
|
"step": 105200 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.9235906570476856e-05, |
|
"loss": 0.1982, |
|
"step": 105600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"eval_accuracy": 0.9346393962725627, |
|
"eval_f1": 0.8608628343048513, |
|
"eval_loss": 0.18736371397972107, |
|
"eval_precision": 0.8650660885005201, |
|
"eval_recall": 0.8567002288355683, |
|
"eval_runtime": 542.7745, |
|
"eval_samples_per_second": 31.23, |
|
"eval_steps_per_second": 3.904, |
|
"step": 105600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.923012777616315e-05, |
|
"loss": 0.1971, |
|
"step": 106000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.922432808645624e-05, |
|
"loss": 0.2043, |
|
"step": 106400 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9218507514485618e-05, |
|
"loss": 0.2008, |
|
"step": 106800 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9212666073428033e-05, |
|
"loss": 0.1974, |
|
"step": 107200 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_accuracy": 0.9349308690070139, |
|
"eval_f1": 0.8612429075740525, |
|
"eval_loss": 0.18690726161003113, |
|
"eval_precision": 0.8663683718831617, |
|
"eval_recall": 0.8561777314351929, |
|
"eval_runtime": 547.7551, |
|
"eval_samples_per_second": 30.946, |
|
"eval_steps_per_second": 3.869, |
|
"step": 107200 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9206803776507485e-05, |
|
"loss": 0.2005, |
|
"step": 107600 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9200920636995193e-05, |
|
"loss": 0.2061, |
|
"step": 108000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.919501666820955e-05, |
|
"loss": 0.1988, |
|
"step": 108400 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9189091883516107e-05, |
|
"loss": 0.2002, |
|
"step": 108800 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_accuracy": 0.9346275798103552, |
|
"eval_f1": 0.8608065115134624, |
|
"eval_loss": 0.18747952580451965, |
|
"eval_precision": 0.8657082909100581, |
|
"eval_recall": 0.8559599289083406, |
|
"eval_runtime": 548.6018, |
|
"eval_samples_per_second": 30.899, |
|
"eval_steps_per_second": 3.863, |
|
"step": 108800 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9183146296327546e-05, |
|
"loss": 0.1975, |
|
"step": 109200 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9177179920103626e-05, |
|
"loss": 0.2038, |
|
"step": 109600 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9171192768351184e-05, |
|
"loss": 0.2014, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.9165184854624087e-05, |
|
"loss": 0.1985, |
|
"step": 110400 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"eval_accuracy": 0.9349625671357927, |
|
"eval_f1": 0.8614543710510242, |
|
"eval_loss": 0.1877671778202057, |
|
"eval_precision": 0.8671524027311504, |
|
"eval_recall": 0.8558307337086855, |
|
"eval_runtime": 510.7455, |
|
"eval_samples_per_second": 33.189, |
|
"eval_steps_per_second": 4.149, |
|
"step": 110400 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.915915619252319e-05, |
|
"loss": 0.1987, |
|
"step": 110800 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.9153106795696335e-05, |
|
"loss": 0.1988, |
|
"step": 111200 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.9147036677838292e-05, |
|
"loss": 0.194, |
|
"step": 111600 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.9140945852690753e-05, |
|
"loss": 0.1973, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"eval_accuracy": 0.9347995749824863, |
|
"eval_f1": 0.8608099277554584, |
|
"eval_loss": 0.18676413595676422, |
|
"eval_precision": 0.8697211122832896, |
|
"eval_recall": 0.8520794996372816, |
|
"eval_runtime": 521.7009, |
|
"eval_samples_per_second": 32.492, |
|
"eval_steps_per_second": 4.062, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.913483433404227e-05, |
|
"loss": 0.1965, |
|
"step": 112400 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.9128702135728262e-05, |
|
"loss": 0.1972, |
|
"step": 112800 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.9122549271630943e-05, |
|
"loss": 0.1954, |
|
"step": 113200 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.911637575567933e-05, |
|
"loss": 0.1976, |
|
"step": 113600 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"eval_accuracy": 0.9349265550604937, |
|
"eval_f1": 0.8613004235446717, |
|
"eval_loss": 0.1872067153453827, |
|
"eval_precision": 0.86785388869866, |
|
"eval_recall": 0.8548451915661831, |
|
"eval_runtime": 499.7627, |
|
"eval_samples_per_second": 33.918, |
|
"eval_steps_per_second": 4.24, |
|
"step": 113600 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.911018160184917e-05, |
|
"loss": 0.1977, |
|
"step": 114000 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.9103966824162948e-05, |
|
"loss": 0.1971, |
|
"step": 114400 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.9097731436689838e-05, |
|
"loss": 0.1989, |
|
"step": 114800 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9091475453545655e-05, |
|
"loss": 0.2008, |
|
"step": 115200 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_accuracy": 0.9352112755308264, |
|
"eval_f1": 0.8614071533618605, |
|
"eval_loss": 0.1852777600288391, |
|
"eval_precision": 0.8708558956710106, |
|
"eval_recall": 0.8521612470423732, |
|
"eval_runtime": 499.6991, |
|
"eval_samples_per_second": 33.922, |
|
"eval_steps_per_second": 4.241, |
|
"step": 115200 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.908519888889286e-05, |
|
"loss": 0.1961, |
|
"step": 115600 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9078901756940487e-05, |
|
"loss": 0.1967, |
|
"step": 116000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.907258407194415e-05, |
|
"loss": 0.1969, |
|
"step": 116400 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9066245848205976e-05, |
|
"loss": 0.1965, |
|
"step": 116800 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_accuracy": 0.9348509672149442, |
|
"eval_f1": 0.8620987333605483, |
|
"eval_loss": 0.18574866652488708, |
|
"eval_precision": 0.8610201753000406, |
|
"eval_recall": 0.8631799969244682, |
|
"eval_runtime": 504.8348, |
|
"eval_samples_per_second": 33.577, |
|
"eval_steps_per_second": 4.197, |
|
"step": 116800 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9059887100074593e-05, |
|
"loss": 0.2024, |
|
"step": 117200 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9053507841945094e-05, |
|
"loss": 0.1945, |
|
"step": 117600 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9047108088259005e-05, |
|
"loss": 0.1964, |
|
"step": 118000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.9040687853504245e-05, |
|
"loss": 0.1978, |
|
"step": 118400 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_accuracy": 0.9351407518833659, |
|
"eval_f1": 0.8617508928903632, |
|
"eval_loss": 0.18537767231464386, |
|
"eval_precision": 0.8696446659137744, |
|
"eval_recall": 0.8539991345064943, |
|
"eval_runtime": 544.2804, |
|
"eval_samples_per_second": 31.144, |
|
"eval_steps_per_second": 3.893, |
|
"step": 118400 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 1.90342471522151e-05, |
|
"loss": 0.1964, |
|
"step": 118800 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9027785998972196e-05, |
|
"loss": 0.1962, |
|
"step": 119200 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.902130440840245e-05, |
|
"loss": 0.1959, |
|
"step": 119600 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9014802395179042e-05, |
|
"loss": 0.1959, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.9354226589103158, |
|
"eval_f1": 0.8624690387137249, |
|
"eval_loss": 0.18383263051509857, |
|
"eval_precision": 0.8679774760483272, |
|
"eval_recall": 0.8570300767568118, |
|
"eval_runtime": 543.7329, |
|
"eval_samples_per_second": 31.175, |
|
"eval_steps_per_second": 3.897, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9008279974021402e-05, |
|
"loss": 0.198, |
|
"step": 120400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.9001737159695142e-05, |
|
"loss": 0.1978, |
|
"step": 120800 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.8995173967012047e-05, |
|
"loss": 0.1994, |
|
"step": 121200 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.8988590410830047e-05, |
|
"loss": 0.2025, |
|
"step": 121600 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.9350537227013933, |
|
"eval_f1": 0.8620096371094856, |
|
"eval_loss": 0.1854720115661621, |
|
"eval_precision": 0.8635032267316793, |
|
"eval_recall": 0.8605212054483788, |
|
"eval_runtime": 541.9489, |
|
"eval_samples_per_second": 31.278, |
|
"eval_steps_per_second": 3.91, |
|
"step": 121600 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.898198650605315e-05, |
|
"loss": 0.1999, |
|
"step": 122000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.897536226763144e-05, |
|
"loss": 0.1962, |
|
"step": 122400 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.896871771056104e-05, |
|
"loss": 0.1951, |
|
"step": 122800 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.8962052849884055e-05, |
|
"loss": 0.1981, |
|
"step": 123200 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"eval_accuracy": 0.9353843960803107, |
|
"eval_f1": 0.862056548555178, |
|
"eval_loss": 0.18524692952632904, |
|
"eval_precision": 0.8695834506313151, |
|
"eval_recall": 0.8546588303489814, |
|
"eval_runtime": 517.5866, |
|
"eval_samples_per_second": 32.75, |
|
"eval_steps_per_second": 4.094, |
|
"step": 123200 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.8955367700688573e-05, |
|
"loss": 0.1946, |
|
"step": 123600 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.8948662278108597e-05, |
|
"loss": 0.1965, |
|
"step": 124000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.8941936597324024e-05, |
|
"loss": 0.1983, |
|
"step": 124400 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.893519067356062e-05, |
|
"loss": 0.1964, |
|
"step": 124800 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"eval_accuracy": 0.935124433911746, |
|
"eval_f1": 0.861646976935707, |
|
"eval_loss": 0.18531428277492523, |
|
"eval_precision": 0.8656379544608781, |
|
"eval_recall": 0.8576926309001761, |
|
"eval_runtime": 514.953, |
|
"eval_samples_per_second": 32.918, |
|
"eval_steps_per_second": 4.115, |
|
"step": 124800 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.8928424522089983e-05, |
|
"loss": 0.1947, |
|
"step": 125200 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.892163815822949e-05, |
|
"loss": 0.1932, |
|
"step": 125600 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.8914831597342277e-05, |
|
"loss": 0.1956, |
|
"step": 126000 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.890800485483721e-05, |
|
"loss": 0.1956, |
|
"step": 126400 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"eval_accuracy": 0.9353521352628553, |
|
"eval_f1": 0.8630423592505593, |
|
"eval_loss": 0.1859443336725235, |
|
"eval_precision": 0.8653921648081379, |
|
"eval_recall": 0.8607052800248787, |
|
"eval_runtime": 514.6128, |
|
"eval_samples_per_second": 32.939, |
|
"eval_steps_per_second": 4.118, |
|
"step": 126400 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.890115794616884e-05, |
|
"loss": 0.1981, |
|
"step": 126800 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.8894290886837366e-05, |
|
"loss": 0.1952, |
|
"step": 127200 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.888740369238861e-05, |
|
"loss": 0.1936, |
|
"step": 127600 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.8880496378413974e-05, |
|
"loss": 0.197, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_accuracy": 0.935512313972779, |
|
"eval_f1": 0.8624489085907296, |
|
"eval_loss": 0.18416373431682587, |
|
"eval_precision": 0.8690499890592525, |
|
"eval_recall": 0.8559473523844804, |
|
"eval_runtime": 542.9086, |
|
"eval_samples_per_second": 31.223, |
|
"eval_steps_per_second": 3.903, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.8873568960550406e-05, |
|
"loss": 0.1995, |
|
"step": 128400 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.886662145448037e-05, |
|
"loss": 0.195, |
|
"step": 128800 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.8859653875931804e-05, |
|
"loss": 0.1935, |
|
"step": 129200 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.885266624067808e-05, |
|
"loss": 0.1943, |
|
"step": 129600 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_accuracy": 0.9352442865998505, |
|
"eval_f1": 0.8617004186586686, |
|
"eval_loss": 0.18642759323120117, |
|
"eval_precision": 0.8679745711447103, |
|
"eval_recall": 0.8555163206121794, |
|
"eval_runtime": 566.0379, |
|
"eval_samples_per_second": 29.947, |
|
"eval_steps_per_second": 3.744, |
|
"step": 129600 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.8845658564537986e-05, |
|
"loss": 0.1956, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.8838630863375674e-05, |
|
"loss": 0.1978, |
|
"step": 130400 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.8831583153100627e-05, |
|
"loss": 0.1965, |
|
"step": 130800 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.8824515449667623e-05, |
|
"loss": 0.1937, |
|
"step": 131200 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"eval_accuracy": 0.9355049990199839, |
|
"eval_f1": 0.86239672392324, |
|
"eval_loss": 0.18440395593643188, |
|
"eval_precision": 0.8676997452025099, |
|
"eval_recall": 0.857158128636116, |
|
"eval_runtime": 566.8628, |
|
"eval_samples_per_second": 29.903, |
|
"eval_steps_per_second": 3.738, |
|
"step": 131200 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.881742776907671e-05, |
|
"loss": 0.1946, |
|
"step": 131600 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.8810320127373162e-05, |
|
"loss": 0.1964, |
|
"step": 132000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.8803192540647423e-05, |
|
"loss": 0.2, |
|
"step": 132400 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.879604502503511e-05, |
|
"loss": 0.1991, |
|
"step": 132800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_accuracy": 0.9354054031242351, |
|
"eval_f1": 0.8616404433041611, |
|
"eval_loss": 0.18495428562164307, |
|
"eval_precision": 0.873688299309574, |
|
"eval_recall": 0.8499203391545489, |
|
"eval_runtime": 538.9322, |
|
"eval_samples_per_second": 31.453, |
|
"eval_steps_per_second": 3.932, |
|
"step": 132800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.8788877596716943e-05, |
|
"loss": 0.1996, |
|
"step": 133200 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.8781690271918728e-05, |
|
"loss": 0.1899, |
|
"step": 133600 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.87744830669113e-05, |
|
"loss": 0.1983, |
|
"step": 134000 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.8767255998010525e-05, |
|
"loss": 0.1972, |
|
"step": 134400 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"eval_accuracy": 0.9353163107504485, |
|
"eval_f1": 0.8621394384747701, |
|
"eval_loss": 0.18624624609947205, |
|
"eval_precision": 0.868391169900086, |
|
"eval_recall": 0.8559770787136045, |
|
"eval_runtime": 524.7222, |
|
"eval_samples_per_second": 32.305, |
|
"eval_steps_per_second": 4.038, |
|
"step": 134400 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.876000908157721e-05, |
|
"loss": 0.1916, |
|
"step": 134800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.8752742334017108e-05, |
|
"loss": 0.1933, |
|
"step": 135200 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.874545577178087e-05, |
|
"loss": 0.1956, |
|
"step": 135600 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.873814941136398e-05, |
|
"loss": 0.1995, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.9355563912524418, |
|
"eval_f1": 0.8629284829048433, |
|
"eval_loss": 0.18578127026557922, |
|
"eval_precision": 0.8657137711865626, |
|
"eval_recall": 0.8601610595378356, |
|
"eval_runtime": 519.4595, |
|
"eval_samples_per_second": 32.632, |
|
"eval_steps_per_second": 4.079, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.873082326930678e-05, |
|
"loss": 0.1954, |
|
"step": 136400 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.8723477362194356e-05, |
|
"loss": 0.1922, |
|
"step": 136800 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.8716111706656564e-05, |
|
"loss": 0.1968, |
|
"step": 137200 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.8708726319367952e-05, |
|
"loss": 0.1959, |
|
"step": 137600 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.9352711080934326, |
|
"eval_f1": 0.8615832044755558, |
|
"eval_loss": 0.18546319007873535, |
|
"eval_precision": 0.8708289927919763, |
|
"eval_recall": 0.8525316828360747, |
|
"eval_runtime": 535.9179, |
|
"eval_samples_per_second": 31.63, |
|
"eval_steps_per_second": 3.954, |
|
"step": 137600 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.8701321217047755e-05, |
|
"loss": 0.1959, |
|
"step": 138000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.8693896416459818e-05, |
|
"loss": 0.1957, |
|
"step": 138400 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.8686451934412596e-05, |
|
"loss": 0.1944, |
|
"step": 138800 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.8678987787759086e-05, |
|
"loss": 0.1963, |
|
"step": 139200 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_accuracy": 0.9354489177152214, |
|
"eval_f1": 0.8628111718275653, |
|
"eval_loss": 0.1849052608013153, |
|
"eval_precision": 0.8645300109220369, |
|
"eval_recall": 0.8610991538857743, |
|
"eval_runtime": 549.6064, |
|
"eval_samples_per_second": 30.842, |
|
"eval_steps_per_second": 3.855, |
|
"step": 139200 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.8671503993396812e-05, |
|
"loss": 0.1959, |
|
"step": 139600 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.8664000568267777e-05, |
|
"loss": 0.1968, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.865647752935842e-05, |
|
"loss": 0.1939, |
|
"step": 140400 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.8648934893699584e-05, |
|
"loss": 0.1997, |
|
"step": 140800 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_accuracy": 0.9354712376993911, |
|
"eval_f1": 0.8623423711611862, |
|
"eval_loss": 0.18563467264175415, |
|
"eval_precision": 0.8679296525071037, |
|
"eval_recall": 0.8568265657343461, |
|
"eval_runtime": 542.6057, |
|
"eval_samples_per_second": 31.24, |
|
"eval_steps_per_second": 3.905, |
|
"step": 140800 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.8641372678366472e-05, |
|
"loss": 0.1954, |
|
"step": 141200 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.8633790900478623e-05, |
|
"loss": 0.1979, |
|
"step": 141600 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.862618957719985e-05, |
|
"loss": 0.1928, |
|
"step": 142000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.8618568725738216e-05, |
|
"loss": 0.1924, |
|
"step": 142400 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_accuracy": 0.9357368267547211, |
|
"eval_f1": 0.8631844113251734, |
|
"eval_loss": 0.18370920419692993, |
|
"eval_precision": 0.8660627656525688, |
|
"eval_recall": 0.8603251260081942, |
|
"eval_runtime": 516.7709, |
|
"eval_samples_per_second": 32.802, |
|
"eval_steps_per_second": 4.1, |
|
"step": 142400 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.8610928363345996e-05, |
|
"loss": 0.195, |
|
"step": 142800 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.8603268507319634e-05, |
|
"loss": 0.1995, |
|
"step": 143200 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.8595589174999702e-05, |
|
"loss": 0.1961, |
|
"step": 143600 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 1.8587890383770867e-05, |
|
"loss": 0.1946, |
|
"step": 144000 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"eval_accuracy": 0.935770588075314, |
|
"eval_f1": 0.862975122807687, |
|
"eval_loss": 0.1839868724346161, |
|
"eval_precision": 0.8728422630794482, |
|
"eval_recall": 0.8533285771206734, |
|
"eval_runtime": 525.3673, |
|
"eval_samples_per_second": 32.265, |
|
"eval_steps_per_second": 4.033, |
|
"step": 144000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.858017215106184e-05, |
|
"loss": 0.1932, |
|
"step": 144400 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.8572434494345347e-05, |
|
"loss": 0.1942, |
|
"step": 144800 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.8564677431138092e-05, |
|
"loss": 0.1928, |
|
"step": 145200 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.855690097900071e-05, |
|
"loss": 0.1922, |
|
"step": 145600 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"eval_accuracy": 0.9361505904948754, |
|
"eval_f1": 0.8638582801099173, |
|
"eval_loss": 0.18395419418811798, |
|
"eval_precision": 0.8729281380996128, |
|
"eval_recall": 0.8549749584260138, |
|
"eval_runtime": 534.3444, |
|
"eval_samples_per_second": 31.723, |
|
"eval_steps_per_second": 3.966, |
|
"step": 145600 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.854910515553772e-05, |
|
"loss": 0.1954, |
|
"step": 146000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.8541289978397506e-05, |
|
"loss": 0.1972, |
|
"step": 146400 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.8533455465272262e-05, |
|
"loss": 0.1946, |
|
"step": 146800 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.852560163389795e-05, |
|
"loss": 0.1943, |
|
"step": 147200 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"eval_accuracy": 0.9359800958258816, |
|
"eval_f1": 0.8632278971777607, |
|
"eval_loss": 0.18454664945602417, |
|
"eval_precision": 0.8709464562732244, |
|
"eval_recall": 0.8556449441516591, |
|
"eval_runtime": 534.9156, |
|
"eval_samples_per_second": 31.689, |
|
"eval_steps_per_second": 3.961, |
|
"step": 147200 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.8517728502054274e-05, |
|
"loss": 0.1962, |
|
"step": 147600 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.850983608756462e-05, |
|
"loss": 0.1966, |
|
"step": 148000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.8501924408296036e-05, |
|
"loss": 0.1988, |
|
"step": 148400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.849399348215918e-05, |
|
"loss": 0.1973, |
|
"step": 148800 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_accuracy": 0.9361468392370317, |
|
"eval_f1": 0.8637009753627363, |
|
"eval_loss": 0.18206927180290222, |
|
"eval_precision": 0.8684571748924279, |
|
"eval_recall": 0.8589965877604127, |
|
"eval_runtime": 536.5063, |
|
"eval_samples_per_second": 31.595, |
|
"eval_steps_per_second": 3.95, |
|
"step": 148800 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.8486043327108278e-05, |
|
"loss": 0.1941, |
|
"step": 149200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.8478073961141087e-05, |
|
"loss": 0.1982, |
|
"step": 149600 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.847008540229886e-05, |
|
"loss": 0.1915, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.8462077668666295e-05, |
|
"loss": 0.1945, |
|
"step": 150400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_accuracy": 0.9361391491584522, |
|
"eval_f1": 0.8639027567995943, |
|
"eval_loss": 0.18260230123996735, |
|
"eval_precision": 0.870098158598503, |
|
"eval_recall": 0.8577949580715845, |
|
"eval_runtime": 505.2518, |
|
"eval_samples_per_second": 33.55, |
|
"eval_steps_per_second": 4.194, |
|
"step": 150400 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.84540507783715e-05, |
|
"loss": 0.1902, |
|
"step": 150800 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.8446004749585947e-05, |
|
"loss": 0.1917, |
|
"step": 151200 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.8437939600524433e-05, |
|
"loss": 0.192, |
|
"step": 151600 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.842985534944505e-05, |
|
"loss": 0.191, |
|
"step": 152000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_accuracy": 0.936032988561477, |
|
"eval_f1": 0.8635728905416703, |
|
"eval_loss": 0.1825074702501297, |
|
"eval_precision": 0.8710259134489294, |
|
"eval_recall": 0.8562463306562488, |
|
"eval_runtime": 530.3094, |
|
"eval_samples_per_second": 31.964, |
|
"eval_steps_per_second": 3.996, |
|
"step": 152000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 1.8421752014649122e-05, |
|
"loss": 0.1901, |
|
"step": 152400 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.8413629614481182e-05, |
|
"loss": 0.194, |
|
"step": 152800 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.8405488167328925e-05, |
|
"loss": 0.1908, |
|
"step": 153200 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.8397327691623155e-05, |
|
"loss": 0.1966, |
|
"step": 153600 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_accuracy": 0.9357295118019261, |
|
"eval_f1": 0.8634136063216951, |
|
"eval_loss": 0.1827763170003891, |
|
"eval_precision": 0.8659648513467977, |
|
"eval_recall": 0.8608773497376937, |
|
"eval_runtime": 530.9715, |
|
"eval_samples_per_second": 31.925, |
|
"eval_steps_per_second": 3.991, |
|
"step": 153600 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.838914820583777e-05, |
|
"loss": 0.1951, |
|
"step": 154000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.838094972848968e-05, |
|
"loss": 0.189, |
|
"step": 154400 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.837273227813882e-05, |
|
"loss": 0.1881, |
|
"step": 154800 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.836449587338804e-05, |
|
"loss": 0.1917, |
|
"step": 155200 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_accuracy": 0.9357989100720335, |
|
"eval_f1": 0.8633634176002423, |
|
"eval_loss": 0.18302373588085175, |
|
"eval_precision": 0.8659402350091523, |
|
"eval_recall": 0.8608018905945323, |
|
"eval_runtime": 557.0907, |
|
"eval_samples_per_second": 30.428, |
|
"eval_steps_per_second": 3.804, |
|
"step": 155200 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.8356240532883133e-05, |
|
"loss": 0.1948, |
|
"step": 155600 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.8347966275312733e-05, |
|
"loss": 0.1952, |
|
"step": 156000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 1.8339673119408318e-05, |
|
"loss": 0.1956, |
|
"step": 156400 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.8331361083944142e-05, |
|
"loss": 0.1875, |
|
"step": 156800 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_accuracy": 0.9362734441892547, |
|
"eval_f1": 0.8639983755677102, |
|
"eval_loss": 0.1830715537071228, |
|
"eval_precision": 0.8694183198118101, |
|
"eval_recall": 0.8586455884126769, |
|
"eval_runtime": 551.1816, |
|
"eval_samples_per_second": 30.754, |
|
"eval_steps_per_second": 3.844, |
|
"step": 156800 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.8323030187737195e-05, |
|
"loss": 0.1979, |
|
"step": 157200 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.8314680449647167e-05, |
|
"loss": 0.1938, |
|
"step": 157600 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.8306311888576398e-05, |
|
"loss": 0.1904, |
|
"step": 158000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.8297924523469857e-05, |
|
"loss": 0.1958, |
|
"step": 158400 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_accuracy": 0.9363338394405374, |
|
"eval_f1": 0.8645095772199598, |
|
"eval_loss": 0.18189725279808044, |
|
"eval_precision": 0.8681702593276495, |
|
"eval_recall": 0.8608796363783956, |
|
"eval_runtime": 646.8836, |
|
"eval_samples_per_second": 26.204, |
|
"eval_steps_per_second": 3.276, |
|
"step": 158400 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.828951837331506e-05, |
|
"loss": 0.189, |
|
"step": 158800 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.8281093457142063e-05, |
|
"loss": 0.1912, |
|
"step": 159200 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.8272649794023402e-05, |
|
"loss": 0.1985, |
|
"step": 159600 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.826418740307405e-05, |
|
"loss": 0.1941, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"eval_accuracy": 0.9358709342226316, |
|
"eval_f1": 0.8637889298690363, |
|
"eval_loss": 0.18294936418533325, |
|
"eval_precision": 0.8642926925865917, |
|
"eval_recall": 0.8632857540569293, |
|
"eval_runtime": 569.8258, |
|
"eval_samples_per_second": 29.748, |
|
"eval_steps_per_second": 3.719, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.8255706303451378e-05, |
|
"loss": 0.1937, |
|
"step": 160400 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 1.8247206514355116e-05, |
|
"loss": 0.1921, |
|
"step": 160800 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.823868805502729e-05, |
|
"loss": 0.1932, |
|
"step": 161200 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.8230150944752205e-05, |
|
"loss": 0.1911, |
|
"step": 161600 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"eval_accuracy": 0.9365847985902773, |
|
"eval_f1": 0.864502451122147, |
|
"eval_loss": 0.18319186568260193, |
|
"eval_precision": 0.870321125154688, |
|
"eval_recall": 0.8587610637681209, |
|
"eval_runtime": 567.6286, |
|
"eval_samples_per_second": 29.863, |
|
"eval_steps_per_second": 3.733, |
|
"step": 161600 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.8221595202856383e-05, |
|
"loss": 0.1879, |
|
"step": 162000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.821302084870853e-05, |
|
"loss": 0.1944, |
|
"step": 162400 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.8204427901719473e-05, |
|
"loss": 0.1942, |
|
"step": 162800 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.8195816381342147e-05, |
|
"loss": 0.1906, |
|
"step": 163200 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"eval_accuracy": 0.9366063683228782, |
|
"eval_f1": 0.8649281595185726, |
|
"eval_loss": 0.18157990276813507, |
|
"eval_precision": 0.8704162976085031, |
|
"eval_recall": 0.8595087952776296, |
|
"eval_runtime": 562.2656, |
|
"eval_samples_per_second": 30.148, |
|
"eval_steps_per_second": 3.769, |
|
"step": 163200 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.8187186307071527e-05, |
|
"loss": 0.1945, |
|
"step": 163600 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.8178537698444585e-05, |
|
"loss": 0.1924, |
|
"step": 164000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.816987057504026e-05, |
|
"loss": 0.1952, |
|
"step": 164400 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 1.81611849564794e-05, |
|
"loss": 0.1932, |
|
"step": 164800 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"eval_accuracy": 0.936222239519689, |
|
"eval_f1": 0.8640641150200202, |
|
"eval_loss": 0.18193493783473969, |
|
"eval_precision": 0.8684520382536728, |
|
"eval_recall": 0.8597203095425519, |
|
"eval_runtime": 547.6597, |
|
"eval_samples_per_second": 30.952, |
|
"eval_steps_per_second": 3.869, |
|
"step": 164800 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.815248086242473e-05, |
|
"loss": 0.1947, |
|
"step": 165200 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.8143758312580784e-05, |
|
"loss": 0.1908, |
|
"step": 165600 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.8135017326693897e-05, |
|
"loss": 0.1924, |
|
"step": 166000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.8126257924552127e-05, |
|
"loss": 0.1936, |
|
"step": 166400 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"eval_accuracy": 0.9360403035142721, |
|
"eval_f1": 0.8639651593480756, |
|
"eval_loss": 0.18314072489738464, |
|
"eval_precision": 0.8639997347284483, |
|
"eval_recall": 0.8639305867348543, |
|
"eval_runtime": 545.0012, |
|
"eval_samples_per_second": 31.103, |
|
"eval_steps_per_second": 3.888, |
|
"step": 166400 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.811748012598523e-05, |
|
"loss": 0.1943, |
|
"step": 166800 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.8108683950864602e-05, |
|
"loss": 0.1909, |
|
"step": 167200 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.8099869419103243e-05, |
|
"loss": 0.192, |
|
"step": 167600 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.809103655065571e-05, |
|
"loss": 0.1942, |
|
"step": 168000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"eval_accuracy": 0.9364471274274155, |
|
"eval_f1": 0.8637520176336846, |
|
"eval_loss": 0.18178768455982208, |
|
"eval_precision": 0.8742628367849934, |
|
"eval_recall": 0.8534909286105056, |
|
"eval_runtime": 542.463, |
|
"eval_samples_per_second": 31.248, |
|
"eval_steps_per_second": 3.906, |
|
"step": 168000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.8082185365518075e-05, |
|
"loss": 0.1882, |
|
"step": 168400 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.8073315883727868e-05, |
|
"loss": 0.1957, |
|
"step": 168800 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.8064428125364037e-05, |
|
"loss": 0.1957, |
|
"step": 169200 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.805552211054692e-05, |
|
"loss": 0.1948, |
|
"step": 169600 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.9365275919081617, |
|
"eval_f1": 0.8646494279078834, |
|
"eval_loss": 0.18111450970172882, |
|
"eval_precision": 0.8697243196192007, |
|
"eval_recall": 0.859633417195881, |
|
"eval_runtime": 546.1161, |
|
"eval_samples_per_second": 31.039, |
|
"eval_steps_per_second": 3.88, |
|
"step": 169600 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.8046597859438174e-05, |
|
"loss": 0.1919, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.8037655392240733e-05, |
|
"loss": 0.1933, |
|
"step": 170400 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.8028694729198788e-05, |
|
"loss": 0.1919, |
|
"step": 170800 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.80197158905977e-05, |
|
"loss": 0.1945, |
|
"step": 171200 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.9363854192358876, |
|
"eval_f1": 0.8642587219547421, |
|
"eval_loss": 0.18125833570957184, |
|
"eval_precision": 0.8699469003176251, |
|
"eval_recall": 0.858644445092326, |
|
"eval_runtime": 563.7361, |
|
"eval_samples_per_second": 30.069, |
|
"eval_steps_per_second": 3.759, |
|
"step": 171200 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.8010718896764e-05, |
|
"loss": 0.1929, |
|
"step": 171600 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.8001703768065294e-05, |
|
"loss": 0.1932, |
|
"step": 172000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.7992670524910264e-05, |
|
"loss": 0.1901, |
|
"step": 172400 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.798361918774859e-05, |
|
"loss": 0.197, |
|
"step": 172800 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"eval_accuracy": 0.9366748287785248, |
|
"eval_f1": 0.8645392167846061, |
|
"eval_loss": 0.18077276647090912, |
|
"eval_precision": 0.8734207936301429, |
|
"eval_recall": 0.8558364503104401, |
|
"eval_runtime": 532.1714, |
|
"eval_samples_per_second": 31.853, |
|
"eval_steps_per_second": 3.982, |
|
"step": 172800 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 1.7974549777070905e-05, |
|
"loss": 0.1925, |
|
"step": 173200 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.7965462313408783e-05, |
|
"loss": 0.1909, |
|
"step": 173600 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.7956356817334637e-05, |
|
"loss": 0.1911, |
|
"step": 174000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.7947233309461722e-05, |
|
"loss": 0.1908, |
|
"step": 174400 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_accuracy": 0.936513524691248, |
|
"eval_f1": 0.8640832130522675, |
|
"eval_loss": 0.18064941465854645, |
|
"eval_precision": 0.8722351266471586, |
|
"eval_recall": 0.8560822641858902, |
|
"eval_runtime": 534.2706, |
|
"eval_samples_per_second": 31.727, |
|
"eval_steps_per_second": 3.966, |
|
"step": 174400 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.793809181044406e-05, |
|
"loss": 0.1863, |
|
"step": 174800 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.7928932340976407e-05, |
|
"loss": 0.19, |
|
"step": 175200 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.7919754921794198e-05, |
|
"loss": 0.1859, |
|
"step": 175600 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.79105595736735e-05, |
|
"loss": 0.198, |
|
"step": 176000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_accuracy": 0.9366624496276408, |
|
"eval_f1": 0.8649571843107045, |
|
"eval_loss": 0.18033598363399506, |
|
"eval_precision": 0.8705677496166515, |
|
"eval_recall": 0.8594184729699061, |
|
"eval_runtime": 538.4171, |
|
"eval_samples_per_second": 31.483, |
|
"eval_steps_per_second": 3.936, |
|
"step": 176000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.790134631743097e-05, |
|
"loss": 0.1888, |
|
"step": 176400 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.789211517392381e-05, |
|
"loss": 0.195, |
|
"step": 176800 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.788286616404972e-05, |
|
"loss": 0.1925, |
|
"step": 177200 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 1.7873599308746825e-05, |
|
"loss": 0.1919, |
|
"step": 177600 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"eval_accuracy": 0.9364557553204559, |
|
"eval_f1": 0.863988686121461, |
|
"eval_loss": 0.18185955286026, |
|
"eval_precision": 0.8746926616656756, |
|
"eval_recall": 0.8535435213466485, |
|
"eval_runtime": 613.3691, |
|
"eval_samples_per_second": 27.636, |
|
"eval_steps_per_second": 3.455, |
|
"step": 177600 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.786431462899367e-05, |
|
"loss": 0.1909, |
|
"step": 178000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.7855012145809146e-05, |
|
"loss": 0.1914, |
|
"step": 178400 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.7845691880252437e-05, |
|
"loss": 0.1896, |
|
"step": 178800 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.7836353853423007e-05, |
|
"loss": 0.1933, |
|
"step": 179200 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"eval_accuracy": 0.93679130533457, |
|
"eval_f1": 0.8654764033546585, |
|
"eval_loss": 0.17977716028690338, |
|
"eval_precision": 0.8691162737072693, |
|
"eval_recall": 0.8618668935014243, |
|
"eval_runtime": 603.7349, |
|
"eval_samples_per_second": 28.077, |
|
"eval_steps_per_second": 3.51, |
|
"step": 179200 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.7826998086460504e-05, |
|
"loss": 0.1934, |
|
"step": 179600 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.7817624600544746e-05, |
|
"loss": 0.1917, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.7808233416895668e-05, |
|
"loss": 0.1922, |
|
"step": 180400 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.7798824556773264e-05, |
|
"loss": 0.1936, |
|
"step": 180800 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"eval_accuracy": 0.9367671097214785, |
|
"eval_f1": 0.8649637293034157, |
|
"eval_loss": 0.18049688637256622, |
|
"eval_precision": 0.8702838022862976, |
|
"eval_recall": 0.859708304678867, |
|
"eval_runtime": 588.8741, |
|
"eval_samples_per_second": 28.785, |
|
"eval_steps_per_second": 3.598, |
|
"step": 180800 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.7789398041477544e-05, |
|
"loss": 0.1893, |
|
"step": 181200 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 1.7779953892348492e-05, |
|
"loss": 0.1887, |
|
"step": 181600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.7770492130766004e-05, |
|
"loss": 0.1904, |
|
"step": 182000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.7761012778149854e-05, |
|
"loss": 0.1913, |
|
"step": 182400 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_accuracy": 0.9366129330241046, |
|
"eval_f1": 0.8648489691729236, |
|
"eval_loss": 0.18060122430324554, |
|
"eval_precision": 0.8698051251569123, |
|
"eval_recall": 0.8599489736127379, |
|
"eval_runtime": 625.1723, |
|
"eval_samples_per_second": 27.114, |
|
"eval_steps_per_second": 3.389, |
|
"step": 182400 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.7751515855959636e-05, |
|
"loss": 0.1908, |
|
"step": 182800 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.7742001385694714e-05, |
|
"loss": 0.1893, |
|
"step": 183200 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.7732469388894197e-05, |
|
"loss": 0.1935, |
|
"step": 183600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.7722919887136845e-05, |
|
"loss": 0.1918, |
|
"step": 184000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_accuracy": 0.9365904254770427, |
|
"eval_f1": 0.8649240640386249, |
|
"eval_loss": 0.18266482651233673, |
|
"eval_precision": 0.8677856728538283, |
|
"eval_recall": 0.8620812660672238, |
|
"eval_runtime": 645.6309, |
|
"eval_samples_per_second": 26.255, |
|
"eval_steps_per_second": 3.282, |
|
"step": 184000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.771335290204106e-05, |
|
"loss": 0.1893, |
|
"step": 184400 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.770376845526482e-05, |
|
"loss": 0.1918, |
|
"step": 184800 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.7694166568505635e-05, |
|
"loss": 0.1941, |
|
"step": 185200 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.76845472635005e-05, |
|
"loss": 0.1939, |
|
"step": 185600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_accuracy": 0.9371011592324552, |
|
"eval_f1": 0.866086878576331, |
|
"eval_loss": 0.1795293688774109, |
|
"eval_precision": 0.8705601112621765, |
|
"eval_recall": 0.8616593808577303, |
|
"eval_runtime": 625.6577, |
|
"eval_samples_per_second": 27.093, |
|
"eval_steps_per_second": 3.387, |
|
"step": 185600 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.7674910562025832e-05, |
|
"loss": 0.1885, |
|
"step": 186000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.766525648589744e-05, |
|
"loss": 0.1904, |
|
"step": 186400 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.765558505697046e-05, |
|
"loss": 0.1882, |
|
"step": 186800 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.7645896297139313e-05, |
|
"loss": 0.1902, |
|
"step": 187200 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_accuracy": 0.936948670601111, |
|
"eval_f1": 0.8656596062001887, |
|
"eval_loss": 0.1790514886379242, |
|
"eval_precision": 0.8710032304025908, |
|
"eval_recall": 0.8603811487053897, |
|
"eval_runtime": 652.9854, |
|
"eval_samples_per_second": 25.959, |
|
"eval_steps_per_second": 3.245, |
|
"step": 187200 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.763619022833765e-05, |
|
"loss": 0.1915, |
|
"step": 187600 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.7626466872538324e-05, |
|
"loss": 0.1916, |
|
"step": 188000 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.76167262517533e-05, |
|
"loss": 0.1922, |
|
"step": 188400 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.7606968388033645e-05, |
|
"loss": 0.1911, |
|
"step": 188800 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"eval_accuracy": 0.9366885208696542, |
|
"eval_f1": 0.8642576577531772, |
|
"eval_loss": 0.18099896609783173, |
|
"eval_precision": 0.8760903476191483, |
|
"eval_recall": 0.8527403388001196, |
|
"eval_runtime": 670.0987, |
|
"eval_samples_per_second": 25.296, |
|
"eval_steps_per_second": 3.162, |
|
"step": 188800 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.7597193303469444e-05, |
|
"loss": 0.1934, |
|
"step": 189200 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.7587401020189786e-05, |
|
"loss": 0.1926, |
|
"step": 189600 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 1.757759156036268e-05, |
|
"loss": 0.1897, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.7567764946195033e-05, |
|
"loss": 0.1909, |
|
"step": 190400 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_accuracy": 0.9366526963572472, |
|
"eval_f1": 0.86432857336827, |
|
"eval_loss": 0.1799914687871933, |
|
"eval_precision": 0.8736855507385446, |
|
"eval_recall": 0.8551698945458475, |
|
"eval_runtime": 664.8848, |
|
"eval_samples_per_second": 25.495, |
|
"eval_steps_per_second": 3.187, |
|
"step": 190400 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.755792119993257e-05, |
|
"loss": 0.1936, |
|
"step": 190800 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.7548060343859818e-05, |
|
"loss": 0.1869, |
|
"step": 191200 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.7538182400300017e-05, |
|
"loss": 0.1945, |
|
"step": 191600 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.7528287391615113e-05, |
|
"loss": 0.1905, |
|
"step": 192000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_accuracy": 0.9369248501138038, |
|
"eval_f1": 0.8658738945973239, |
|
"eval_loss": 0.1806378960609436, |
|
"eval_precision": 0.8675803751442674, |
|
"eval_recall": 0.8641741139696025, |
|
"eval_runtime": 682.7164, |
|
"eval_samples_per_second": 24.829, |
|
"eval_steps_per_second": 3.104, |
|
"step": 192000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.7518375340205667e-05, |
|
"loss": 0.1918, |
|
"step": 192400 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.750844626851083e-05, |
|
"loss": 0.1871, |
|
"step": 192800 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.7498500199008282e-05, |
|
"loss": 0.192, |
|
"step": 193200 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.748853715421418e-05, |
|
"loss": 0.1878, |
|
"step": 193600 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"eval_accuracy": 0.9369516716073859, |
|
"eval_f1": 0.8655718913158194, |
|
"eval_loss": 0.17844319343566895, |
|
"eval_precision": 0.8713492333368283, |
|
"eval_recall": 0.8598706561686992, |
|
"eval_runtime": 709.4665, |
|
"eval_samples_per_second": 23.893, |
|
"eval_steps_per_second": 2.987, |
|
"step": 193600 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.747855715668312e-05, |
|
"loss": 0.1912, |
|
"step": 194000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 1.746856022900806e-05, |
|
"loss": 0.1878, |
|
"step": 194400 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.7458546393820303e-05, |
|
"loss": 0.1892, |
|
"step": 194800 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.744851567378941e-05, |
|
"loss": 0.1929, |
|
"step": 195200 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_accuracy": 0.9370535182578408, |
|
"eval_f1": 0.8652750536627823, |
|
"eval_loss": 0.17946113646030426, |
|
"eval_precision": 0.8731046981834436, |
|
"eval_recall": 0.8575845871270131, |
|
"eval_runtime": 728.3318, |
|
"eval_samples_per_second": 23.274, |
|
"eval_steps_per_second": 2.909, |
|
"step": 195200 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.7438468091623176e-05, |
|
"loss": 0.1885, |
|
"step": 195600 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.7428403670067577e-05, |
|
"loss": 0.1899, |
|
"step": 196000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.741832243190669e-05, |
|
"loss": 0.1841, |
|
"step": 196400 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.7408224399962682e-05, |
|
"loss": 0.1923, |
|
"step": 196800 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_accuracy": 0.9366226862944981, |
|
"eval_f1": 0.865599699190374, |
|
"eval_loss": 0.17995527386665344, |
|
"eval_precision": 0.865276816994431, |
|
"eval_recall": 0.8659228224463511, |
|
"eval_runtime": 739.6018, |
|
"eval_samples_per_second": 22.919, |
|
"eval_steps_per_second": 2.865, |
|
"step": 196800 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.7398109597095724e-05, |
|
"loss": 0.1878, |
|
"step": 197200 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.7387978046203957e-05, |
|
"loss": 0.1901, |
|
"step": 197600 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.737782977022344e-05, |
|
"loss": 0.188, |
|
"step": 198000 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 1.7367664792128097e-05, |
|
"loss": 0.1934, |
|
"step": 198400 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_accuracy": 0.936658510806905, |
|
"eval_f1": 0.8654177974587316, |
|
"eval_loss": 0.1802603155374527, |
|
"eval_precision": 0.8659475535042171, |
|
"eval_recall": 0.8648886891889342, |
|
"eval_runtime": 719.4576, |
|
"eval_samples_per_second": 23.561, |
|
"eval_steps_per_second": 2.945, |
|
"step": 198400 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.735748313492966e-05, |
|
"loss": 0.1918, |
|
"step": 198800 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.7347284821677608e-05, |
|
"loss": 0.1908, |
|
"step": 199200 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.733706987545914e-05, |
|
"loss": 0.1893, |
|
"step": 199600 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.7326838319399115e-05, |
|
"loss": 0.1896, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"eval_accuracy": 0.9369638631953777, |
|
"eval_f1": 0.865953863991919, |
|
"eval_loss": 0.17965461313724518, |
|
"eval_precision": 0.8701607773464061, |
|
"eval_recall": 0.8617874327370346, |
|
"eval_runtime": 772.5348, |
|
"eval_samples_per_second": 21.942, |
|
"eval_steps_per_second": 2.743, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.731659017665998e-05, |
|
"loss": 0.1943, |
|
"step": 200400 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.730632547044173e-05, |
|
"loss": 0.1912, |
|
"step": 200800 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.7296044223981863e-05, |
|
"loss": 0.1904, |
|
"step": 201200 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.728574646055533e-05, |
|
"loss": 0.1887, |
|
"step": 201600 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"eval_accuracy": 0.9371735585088375, |
|
"eval_f1": 0.8660090435166752, |
|
"eval_loss": 0.17885449528694153, |
|
"eval_precision": 0.8714266282720998, |
|
"eval_recall": 0.8606584038904905, |
|
"eval_runtime": 770.9132, |
|
"eval_samples_per_second": 21.988, |
|
"eval_steps_per_second": 2.749, |
|
"step": 201600 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.7275432203474453e-05, |
|
"loss": 0.1899, |
|
"step": 202000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.7265101476088913e-05, |
|
"loss": 0.1905, |
|
"step": 202400 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.7254754301785663e-05, |
|
"loss": 0.1882, |
|
"step": 202800 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.7244390703988896e-05, |
|
"loss": 0.1866, |
|
"step": 203200 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.9372767180995377, |
|
"eval_f1": 0.8662844568320104, |
|
"eval_loss": 0.17921319603919983, |
|
"eval_precision": 0.8716134143426825, |
|
"eval_recall": 0.86102026478156, |
|
"eval_runtime": 806.4299, |
|
"eval_samples_per_second": 21.02, |
|
"eval_steps_per_second": 2.628, |
|
"step": 203200 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.723401070615998e-05, |
|
"loss": 0.1922, |
|
"step": 203600 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.722361433179742e-05, |
|
"loss": 0.1918, |
|
"step": 204000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.721320160443678e-05, |
|
"loss": 0.189, |
|
"step": 204400 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.7202772547650654e-05, |
|
"loss": 0.1902, |
|
"step": 204800 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.937223450238158, |
|
"eval_f1": 0.8663150342148309, |
|
"eval_loss": 0.17978140711784363, |
|
"eval_precision": 0.8719854843408215, |
|
"eval_recall": 0.8607178565487389, |
|
"eval_runtime": 795.8867, |
|
"eval_samples_per_second": 21.298, |
|
"eval_steps_per_second": 2.662, |
|
"step": 204800 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.71923271850486e-05, |
|
"loss": 0.1889, |
|
"step": 205200 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.7181865540277096e-05, |
|
"loss": 0.1862, |
|
"step": 205600 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.717138763701946e-05, |
|
"loss": 0.1918, |
|
"step": 206000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.7160893498995845e-05, |
|
"loss": 0.1909, |
|
"step": 206400 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"eval_accuracy": 0.9371424230687352, |
|
"eval_f1": 0.8660603850087962, |
|
"eval_loss": 0.18042324483394623, |
|
"eval_precision": 0.8715986153319735, |
|
"eval_recall": 0.8605920913101365, |
|
"eval_runtime": 808.6294, |
|
"eval_samples_per_second": 20.963, |
|
"eval_steps_per_second": 2.62, |
|
"step": 206400 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 1.7150383149963136e-05, |
|
"loss": 0.1867, |
|
"step": 206800 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.713985661371492e-05, |
|
"loss": 0.187, |
|
"step": 207200 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.7129313914081437e-05, |
|
"loss": 0.1889, |
|
"step": 207600 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.7118755074929513e-05, |
|
"loss": 0.1888, |
|
"step": 208000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_accuracy": 0.937109224436819, |
|
"eval_f1": 0.8663811783329459, |
|
"eval_loss": 0.1801934689283371, |
|
"eval_precision": 0.8705300872166842, |
|
"eval_recall": 0.8622716289056538, |
|
"eval_runtime": 850.4864, |
|
"eval_samples_per_second": 19.931, |
|
"eval_steps_per_second": 2.492, |
|
"step": 208000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.7108180120162508e-05, |
|
"loss": 0.1858, |
|
"step": 208400 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.709758907372027e-05, |
|
"loss": 0.1876, |
|
"step": 208800 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.708698195957907e-05, |
|
"loss": 0.19, |
|
"step": 209200 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.7076358801751562e-05, |
|
"loss": 0.1937, |
|
"step": 209600 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_accuracy": 0.937145236512118, |
|
"eval_f1": 0.8656761084273905, |
|
"eval_loss": 0.17913849651813507, |
|
"eval_precision": 0.8749678837367915, |
|
"eval_recall": 0.8565796085385451, |
|
"eval_runtime": 879.9677, |
|
"eval_samples_per_second": 19.263, |
|
"eval_steps_per_second": 2.408, |
|
"step": 209600 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.7065719624286715e-05, |
|
"loss": 0.1877, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.7055064451269763e-05, |
|
"loss": 0.1896, |
|
"step": 210400 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 1.704439330682215e-05, |
|
"loss": 0.1867, |
|
"step": 210800 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.7033706215101485e-05, |
|
"loss": 0.1883, |
|
"step": 211200 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_accuracy": 0.9368340696739875, |
|
"eval_f1": 0.8663467804242857, |
|
"eval_loss": 0.17967061698436737, |
|
"eval_precision": 0.8647642969633281, |
|
"eval_recall": 0.8679350662639892, |
|
"eval_runtime": 911.8125, |
|
"eval_samples_per_second": 18.59, |
|
"eval_steps_per_second": 2.324, |
|
"step": 211200 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.7023003200301463e-05, |
|
"loss": 0.1896, |
|
"step": 211600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.7012284286651842e-05, |
|
"loss": 0.1902, |
|
"step": 212000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.7001549498418364e-05, |
|
"loss": 0.1887, |
|
"step": 212400 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.6990798859902713e-05, |
|
"loss": 0.1901, |
|
"step": 212800 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_accuracy": 0.9370465784308301, |
|
"eval_f1": 0.8653579989788354, |
|
"eval_loss": 0.17951498925685883, |
|
"eval_precision": 0.8734082205085891, |
|
"eval_recall": 0.8574548202671826, |
|
"eval_runtime": 916.9521, |
|
"eval_samples_per_second": 18.486, |
|
"eval_steps_per_second": 2.311, |
|
"step": 212800 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.6980032395442445e-05, |
|
"loss": 0.1894, |
|
"step": 213200 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.6969250129410958e-05, |
|
"loss": 0.1874, |
|
"step": 213600 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.6958452086217413e-05, |
|
"loss": 0.1854, |
|
"step": 214000 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.6947638290306683e-05, |
|
"loss": 0.1847, |
|
"step": 214400 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"eval_accuracy": 0.9374852880356445, |
|
"eval_f1": 0.8669684104342598, |
|
"eval_loss": 0.17802837491035461, |
|
"eval_precision": 0.8702301024002046, |
|
"eval_recall": 0.8637310773336169, |
|
"eval_runtime": 927.1123, |
|
"eval_samples_per_second": 18.284, |
|
"eval_steps_per_second": 2.286, |
|
"step": 214400 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.6936808766159318e-05, |
|
"loss": 0.1914, |
|
"step": 214800 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 1.6925963538291456e-05, |
|
"loss": 0.1895, |
|
"step": 215200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.69151026312548e-05, |
|
"loss": 0.1883, |
|
"step": 215600 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.6904226069636538e-05, |
|
"loss": 0.189, |
|
"step": 216000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_accuracy": 0.9375539360541831, |
|
"eval_f1": 0.8663820965537621, |
|
"eval_loss": 0.17785754799842834, |
|
"eval_precision": 0.8747725157672869, |
|
"eval_recall": 0.8581511023608993, |
|
"eval_runtime": 980.242, |
|
"eval_samples_per_second": 17.293, |
|
"eval_steps_per_second": 2.162, |
|
"step": 216000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.6893333878059303e-05, |
|
"loss": 0.1941, |
|
"step": 216400 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.6882426081181112e-05, |
|
"loss": 0.1939, |
|
"step": 216800 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.6871502703695303e-05, |
|
"loss": 0.1829, |
|
"step": 217200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.686056377033049e-05, |
|
"loss": 0.1865, |
|
"step": 217600 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_accuracy": 0.9374036981775452, |
|
"eval_f1": 0.8663718082660126, |
|
"eval_loss": 0.17854823172092438, |
|
"eval_precision": 0.8707146024546055, |
|
"eval_recall": 0.8620721195044163, |
|
"eval_runtime": 997.5773, |
|
"eval_samples_per_second": 16.992, |
|
"eval_steps_per_second": 2.124, |
|
"step": 217600 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.684960930585051e-05, |
|
"loss": 0.1893, |
|
"step": 218000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.683863933505434e-05, |
|
"loss": 0.1868, |
|
"step": 218400 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.6827653882776087e-05, |
|
"loss": 0.1907, |
|
"step": 218800 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 1.6816652973884882e-05, |
|
"loss": 0.188, |
|
"step": 219200 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"eval_accuracy": 0.9375256140574636, |
|
"eval_f1": 0.8670440634281203, |
|
"eval_loss": 0.1784038245677948, |
|
"eval_precision": 0.8698775326077199, |
|
"eval_recall": 0.8642289933464472, |
|
"eval_runtime": 974.6353, |
|
"eval_samples_per_second": 17.392, |
|
"eval_steps_per_second": 2.174, |
|
"step": 219200 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.680563663328486e-05, |
|
"loss": 0.1914, |
|
"step": 219600 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.6794604885915088e-05, |
|
"loss": 0.187, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.678355775674951e-05, |
|
"loss": 0.1845, |
|
"step": 220400 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.6772495270796886e-05, |
|
"loss": 0.1895, |
|
"step": 220800 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.937413826573723, |
|
"eval_f1": 0.8668241753012625, |
|
"eval_loss": 0.17992745339870453, |
|
"eval_precision": 0.8693401257478316, |
|
"eval_recall": 0.8643227456152235, |
|
"eval_runtime": 1079.3638, |
|
"eval_samples_per_second": 15.705, |
|
"eval_steps_per_second": 1.963, |
|
"step": 220800 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.6761417453100758e-05, |
|
"loss": 0.1858, |
|
"step": 221200 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.675032432873936e-05, |
|
"loss": 0.1861, |
|
"step": 221600 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.6739215922825582e-05, |
|
"loss": 0.1891, |
|
"step": 222000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.6728092260506916e-05, |
|
"loss": 0.1863, |
|
"step": 222400 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"eval_accuracy": 0.9374770352683884, |
|
"eval_f1": 0.8666634278465134, |
|
"eval_loss": 0.18013346195220947, |
|
"eval_precision": 0.8716621697732432, |
|
"eval_recall": 0.8617216918168561, |
|
"eval_runtime": 1165.6574, |
|
"eval_samples_per_second": 14.542, |
|
"eval_steps_per_second": 1.818, |
|
"step": 222400 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.6716953366965377e-05, |
|
"loss": 0.1906, |
|
"step": 222800 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.670579926741748e-05, |
|
"loss": 0.1879, |
|
"step": 223200 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.6694629987114145e-05, |
|
"loss": 0.1915, |
|
"step": 223600 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.668344555134067e-05, |
|
"loss": 0.1903, |
|
"step": 224000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"eval_accuracy": 0.9370158181165122, |
|
"eval_f1": 0.864867511175318, |
|
"eval_loss": 0.18029174208641052, |
|
"eval_precision": 0.8766908082051474, |
|
"eval_recall": 0.8533588751099731, |
|
"eval_runtime": 1162.7051, |
|
"eval_samples_per_second": 14.579, |
|
"eval_steps_per_second": 1.822, |
|
"step": 224000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.6672245985416656e-05, |
|
"loss": 0.1882, |
|
"step": 224400 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.6661031314695966e-05, |
|
"loss": 0.1873, |
|
"step": 224800 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.6649801564566647e-05, |
|
"loss": 0.1898, |
|
"step": 225200 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.663855676045089e-05, |
|
"loss": 0.1875, |
|
"step": 225600 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"eval_accuracy": 0.9376805410064062, |
|
"eval_f1": 0.8673769183029066, |
|
"eval_loss": 0.1786891371011734, |
|
"eval_precision": 0.8699072647233351, |
|
"eval_recall": 0.8648612495005119, |
|
"eval_runtime": 1219.9375, |
|
"eval_samples_per_second": 13.895, |
|
"eval_steps_per_second": 1.737, |
|
"step": 225600 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.6627296927804954e-05, |
|
"loss": 0.1902, |
|
"step": 226000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.661602209211914e-05, |
|
"loss": 0.1876, |
|
"step": 226400 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.6604732278917695e-05, |
|
"loss": 0.1859, |
|
"step": 226800 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.659342751375878e-05, |
|
"loss": 0.188, |
|
"step": 227200 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"eval_accuracy": 0.9376726633649346, |
|
"eval_f1": 0.8671941772296538, |
|
"eval_loss": 0.17850236594676971, |
|
"eval_precision": 0.8710525981883218, |
|
"eval_recall": 0.8633697881027228, |
|
"eval_runtime": 1365.932, |
|
"eval_samples_per_second": 12.41, |
|
"eval_steps_per_second": 1.551, |
|
"step": 227200 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 1.658210782223441e-05, |
|
"loss": 0.1919, |
|
"step": 227600 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.6570773229970377e-05, |
|
"loss": 0.1852, |
|
"step": 228000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.6559423762626215e-05, |
|
"loss": 0.1873, |
|
"step": 228400 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.654805944589513e-05, |
|
"loss": 0.1887, |
|
"step": 228800 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_accuracy": 0.9378007688202951, |
|
"eval_f1": 0.8676160121238438, |
|
"eval_loss": 0.1776517778635025, |
|
"eval_precision": 0.8705870638290035, |
|
"eval_recall": 0.8646651700603273, |
|
"eval_runtime": 1347.9483, |
|
"eval_samples_per_second": 12.575, |
|
"eval_steps_per_second": 1.572, |
|
"step": 228800 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.6536680305503953e-05, |
|
"loss": 0.1871, |
|
"step": 229200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.652528636721306e-05, |
|
"loss": 0.1885, |
|
"step": 229600 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.6513877656816325e-05, |
|
"loss": 0.1884, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.6502454200141086e-05, |
|
"loss": 0.1893, |
|
"step": 230400 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_accuracy": 0.9375102339003047, |
|
"eval_f1": 0.8665494793203288, |
|
"eval_loss": 0.17748743295669556, |
|
"eval_precision": 0.8726716284484248, |
|
"eval_recall": 0.8605126305457468, |
|
"eval_runtime": 1496.3516, |
|
"eval_samples_per_second": 11.328, |
|
"eval_steps_per_second": 1.416, |
|
"step": 230400 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.6491016023048036e-05, |
|
"loss": 0.1864, |
|
"step": 230800 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.647956315143121e-05, |
|
"loss": 0.1928, |
|
"step": 231200 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.6468095611217906e-05, |
|
"loss": 0.1873, |
|
"step": 231600 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.6456613428368627e-05, |
|
"loss": 0.1865, |
|
"step": 232000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_accuracy": 0.937655595141746, |
|
"eval_f1": 0.8666062090876223, |
|
"eval_loss": 0.17793767154216766, |
|
"eval_precision": 0.8748837751237984, |
|
"eval_recall": 0.8584838085830202, |
|
"eval_runtime": 1628.3506, |
|
"eval_samples_per_second": 10.41, |
|
"eval_steps_per_second": 1.301, |
|
"step": 232000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.6445116628877023e-05, |
|
"loss": 0.1853, |
|
"step": 232400 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.643360523876983e-05, |
|
"loss": 0.1841, |
|
"step": 232800 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.642207928410683e-05, |
|
"loss": 0.192, |
|
"step": 233200 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.641053879098076e-05, |
|
"loss": 0.1888, |
|
"step": 233600 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_accuracy": 0.9377015480503307, |
|
"eval_f1": 0.8671411822565678, |
|
"eval_loss": 0.17848214507102966, |
|
"eval_precision": 0.8705092213592871, |
|
"eval_recall": 0.8637991048944973, |
|
"eval_runtime": 2062.075, |
|
"eval_samples_per_second": 8.22, |
|
"eval_steps_per_second": 1.028, |
|
"step": 233600 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.6398983785517278e-05, |
|
"loss": 0.1842, |
|
"step": 234000 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.638741429387489e-05, |
|
"loss": 0.1867, |
|
"step": 234400 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.63758303422449e-05, |
|
"loss": 0.1873, |
|
"step": 234800 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.636423195685135e-05, |
|
"loss": 0.1893, |
|
"step": 235200 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_accuracy": 0.9380693588819001, |
|
"eval_f1": 0.8680359208535846, |
|
"eval_loss": 0.176702082157135, |
|
"eval_precision": 0.8732650801346397, |
|
"eval_recall": 0.862869013789015, |
|
"eval_runtime": 2471.7979, |
|
"eval_samples_per_second": 6.858, |
|
"eval_steps_per_second": 0.857, |
|
"step": 235200 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.6352619163950954e-05, |
|
"loss": 0.1877, |
|
"step": 235600 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 1.6340991989833036e-05, |
|
"loss": 0.1877, |
|
"step": 236000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.632935046081949e-05, |
|
"loss": 0.1864, |
|
"step": 236400 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.631769460326469e-05, |
|
"loss": 0.1876, |
|
"step": 236800 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_accuracy": 0.9378168992290228, |
|
"eval_f1": 0.8676546405447269, |
|
"eval_loss": 0.17699034512043, |
|
"eval_precision": 0.8718895392465856, |
|
"eval_recall": 0.8634606820706218, |
|
"eval_runtime": 1988.0252, |
|
"eval_samples_per_second": 8.527, |
|
"eval_steps_per_second": 1.066, |
|
"step": 236800 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.6306024443555462e-05, |
|
"loss": 0.1894, |
|
"step": 237200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.6294340008111008e-05, |
|
"loss": 0.1871, |
|
"step": 237600 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.628264132338284e-05, |
|
"loss": 0.1881, |
|
"step": 238000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.627092841585473e-05, |
|
"loss": 0.1853, |
|
"step": 238400 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_accuracy": 0.9376743514309642, |
|
"eval_f1": 0.866771496887475, |
|
"eval_loss": 0.17852920293807983, |
|
"eval_precision": 0.8729003106270632, |
|
"eval_recall": 0.8607281464318972, |
|
"eval_runtime": 2157.4161, |
|
"eval_samples_per_second": 7.857, |
|
"eval_steps_per_second": 0.982, |
|
"step": 238400 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.6259201312042653e-05, |
|
"loss": 0.1922, |
|
"step": 238800 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.6247460038494717e-05, |
|
"loss": 0.1856, |
|
"step": 239200 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.623570462179111e-05, |
|
"loss": 0.1849, |
|
"step": 239600 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.6223935088544038e-05, |
|
"loss": 0.1914, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"eval_accuracy": 0.9379305623416851, |
|
"eval_f1": 0.8676183575712617, |
|
"eval_loss": 0.17675957083702087, |
|
"eval_precision": 0.8727782087302289, |
|
"eval_recall": 0.8625191577616302, |
|
"eval_runtime": 3524.4264, |
|
"eval_samples_per_second": 4.81, |
|
"eval_steps_per_second": 0.601, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.6212151465397665e-05, |
|
"loss": 0.1889, |
|
"step": 240400 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.620035377902805e-05, |
|
"loss": 0.188, |
|
"step": 240800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.6188542056143083e-05, |
|
"loss": 0.1891, |
|
"step": 241200 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.6176716323482446e-05, |
|
"loss": 0.186, |
|
"step": 241600 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_accuracy": 0.938026031853806, |
|
"eval_f1": 0.8677617064417832, |
|
"eval_loss": 0.1765325963497162, |
|
"eval_precision": 0.8744515242966248, |
|
"eval_recall": 0.8611734697085848, |
|
"eval_runtime": 2840.9085, |
|
"eval_samples_per_second": 5.967, |
|
"eval_steps_per_second": 0.746, |
|
"step": 241600 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.616487660781752e-05, |
|
"loss": 0.1865, |
|
"step": 242000 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.6153022935951352e-05, |
|
"loss": 0.1903, |
|
"step": 242400 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.614115533471858e-05, |
|
"loss": 0.1872, |
|
"step": 242800 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.6129273830985374e-05, |
|
"loss": 0.1851, |
|
"step": 243200 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"eval_accuracy": 0.9380453508317007, |
|
"eval_f1": 0.8677535835682475, |
|
"eval_loss": 0.17766611278057098, |
|
"eval_precision": 0.8736754209940448, |
|
"eval_recall": 0.8619114829951106, |
|
"eval_runtime": 2728.5566, |
|
"eval_samples_per_second": 6.212, |
|
"eval_steps_per_second": 0.777, |
|
"step": 243200 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.6117378451649374e-05, |
|
"loss": 0.1871, |
|
"step": 243600 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.610546922363964e-05, |
|
"loss": 0.191, |
|
"step": 244000 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 1.6093546173916586e-05, |
|
"loss": 0.1899, |
|
"step": 244400 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.60816093294719e-05, |
|
"loss": 0.1943, |
|
"step": 244800 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"eval_accuracy": 0.9375576873120268, |
|
"eval_f1": 0.8670070484465402, |
|
"eval_loss": 0.17991459369659424, |
|
"eval_precision": 0.8688986737601561, |
|
"eval_recall": 0.8651236415210505, |
|
"eval_runtime": 6003.7404, |
|
"eval_samples_per_second": 2.823, |
|
"eval_steps_per_second": 0.353, |
|
"step": 244800 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.6069658717328508e-05, |
|
"loss": 0.1935, |
|
"step": 245200 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.6057694364540508e-05, |
|
"loss": 0.1791, |
|
"step": 245600 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.6045716298193096e-05, |
|
"loss": 0.1838, |
|
"step": 246000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.6033724545402517e-05, |
|
"loss": 0.1884, |
|
"step": 246400 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"eval_accuracy": 0.9380753608944499, |
|
"eval_f1": 0.8679641821278946, |
|
"eval_loss": 0.1776251196861267, |
|
"eval_precision": 0.8738028433394031, |
|
"eval_recall": 0.8622030296845979, |
|
"eval_runtime": 3004.0762, |
|
"eval_samples_per_second": 5.643, |
|
"eval_steps_per_second": 0.705, |
|
"step": 246400 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.6021719133316004e-05, |
|
"loss": 0.1862, |
|
"step": 246800 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.6009700089111705e-05, |
|
"loss": 0.1883, |
|
"step": 247200 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.599766743999863e-05, |
|
"loss": 0.1855, |
|
"step": 247600 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.5985621213216593e-05, |
|
"loss": 0.1897, |
|
"step": 248000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"eval_accuracy": 0.9379052413512405, |
|
"eval_f1": 0.8676452917564406, |
|
"eval_loss": 0.1764792799949646, |
|
"eval_precision": 0.8705365106951102, |
|
"eval_recall": 0.8647732138334903, |
|
"eval_runtime": 3373.4427, |
|
"eval_samples_per_second": 5.025, |
|
"eval_steps_per_second": 0.628, |
|
"step": 248000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 1.5973561436036138e-05, |
|
"loss": 0.1836, |
|
"step": 248400 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.5961488135758494e-05, |
|
"loss": 0.1775, |
|
"step": 248800 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.59494013397155e-05, |
|
"loss": 0.1829, |
|
"step": 249200 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.5937301075269537e-05, |
|
"loss": 0.1857, |
|
"step": 249600 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_accuracy": 0.938110435155288, |
|
"eval_f1": 0.8678919680622887, |
|
"eval_loss": 0.17743539810180664, |
|
"eval_precision": 0.8754202625124966, |
|
"eval_recall": 0.86049205077943, |
|
"eval_runtime": 569.8141, |
|
"eval_samples_per_second": 29.748, |
|
"eval_steps_per_second": 3.719, |
|
"step": 249600 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.5925187369813496e-05, |
|
"loss": 0.1873, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.5913060250770676e-05, |
|
"loss": 0.1877, |
|
"step": 250400 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.5900919745594756e-05, |
|
"loss": 0.1909, |
|
"step": 250800 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.5888765881769718e-05, |
|
"loss": 0.1853, |
|
"step": 251200 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_accuracy": 0.9382769910035459, |
|
"eval_f1": 0.8685278445507639, |
|
"eval_loss": 0.17629072070121765, |
|
"eval_precision": 0.8730131777143226, |
|
"eval_recall": 0.8640883649432828, |
|
"eval_runtime": 561.5572, |
|
"eval_samples_per_second": 30.186, |
|
"eval_steps_per_second": 3.773, |
|
"step": 251200 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.587659868680977e-05, |
|
"loss": 0.1878, |
|
"step": 251600 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.5864418188259323e-05, |
|
"loss": 0.187, |
|
"step": 252000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.5852224413692886e-05, |
|
"loss": 0.1867, |
|
"step": 252400 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 1.5840017390715035e-05, |
|
"loss": 0.1885, |
|
"step": 252800 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"eval_accuracy": 0.9380318463034636, |
|
"eval_f1": 0.8679062324645087, |
|
"eval_loss": 0.17689689993858337, |
|
"eval_precision": 0.8742666856919459, |
|
"eval_recall": 0.8616376577710627, |
|
"eval_runtime": 562.1728, |
|
"eval_samples_per_second": 30.153, |
|
"eval_steps_per_second": 3.769, |
|
"step": 252800 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.5827797146960328e-05, |
|
"loss": 0.187, |
|
"step": 253200 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.5815563710093256e-05, |
|
"loss": 0.1829, |
|
"step": 253600 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.5803317107808183e-05, |
|
"loss": 0.1834, |
|
"step": 254000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.5791057367829273e-05, |
|
"loss": 0.1867, |
|
"step": 254400 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_accuracy": 0.938133130265242, |
|
"eval_f1": 0.8683492146871279, |
|
"eval_loss": 0.17677941918373108, |
|
"eval_precision": 0.870136385553234, |
|
"eval_recall": 0.8665693701048025, |
|
"eval_runtime": 539.5299, |
|
"eval_samples_per_second": 31.418, |
|
"eval_steps_per_second": 3.927, |
|
"step": 254400 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.5778784517910427e-05, |
|
"loss": 0.1843, |
|
"step": 254800 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.5766498585835227e-05, |
|
"loss": 0.1852, |
|
"step": 255200 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.575419959941687e-05, |
|
"loss": 0.189, |
|
"step": 255600 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.5741887586498122e-05, |
|
"loss": 0.1859, |
|
"step": 256000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"eval_accuracy": 0.9383792127797852, |
|
"eval_f1": 0.8685926920931453, |
|
"eval_loss": 0.17721563577651978, |
|
"eval_precision": 0.8733567973368934, |
|
"eval_recall": 0.8638802806394134, |
|
"eval_runtime": 541.5973, |
|
"eval_samples_per_second": 31.298, |
|
"eval_steps_per_second": 3.913, |
|
"step": 256000 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.5729562574951208e-05, |
|
"loss": 0.1854, |
|
"step": 256400 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 1.5717224592677808e-05, |
|
"loss": 0.1861, |
|
"step": 256800 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.570487366760895e-05, |
|
"loss": 0.188, |
|
"step": 257200 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.569250982770496e-05, |
|
"loss": 0.1836, |
|
"step": 257600 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_accuracy": 0.9382160330635866, |
|
"eval_f1": 0.8683363284140473, |
|
"eval_loss": 0.1770741045475006, |
|
"eval_precision": 0.8726775763825417, |
|
"eval_recall": 0.8640380588478418, |
|
"eval_runtime": 567.0661, |
|
"eval_samples_per_second": 29.892, |
|
"eval_steps_per_second": 3.737, |
|
"step": 257600 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.568013310095541e-05, |
|
"loss": 0.1847, |
|
"step": 258000 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.5667743515379047e-05, |
|
"loss": 0.1841, |
|
"step": 258400 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.5655341099023723e-05, |
|
"loss": 0.1853, |
|
"step": 258800 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.5642925879966328e-05, |
|
"loss": 0.1883, |
|
"step": 259200 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_accuracy": 0.9382659247929072, |
|
"eval_f1": 0.8685522914865363, |
|
"eval_loss": 0.17646153271198273, |
|
"eval_precision": 0.8711410785399025, |
|
"eval_recall": 0.8659788451435467, |
|
"eval_runtime": 584.0804, |
|
"eval_samples_per_second": 29.022, |
|
"eval_steps_per_second": 3.628, |
|
"step": 259200 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.5630497886312756e-05, |
|
"loss": 0.1883, |
|
"step": 259600 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.56180571461978e-05, |
|
"loss": 0.1836, |
|
"step": 260000 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.560560368778512e-05, |
|
"loss": 0.1891, |
|
"step": 260400 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.5593137539267167e-05, |
|
"loss": 0.1872, |
|
"step": 260800 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_accuracy": 0.9381590139443632, |
|
"eval_f1": 0.8683332486638728, |
|
"eval_loss": 0.17653478682041168, |
|
"eval_precision": 0.8719482882054085, |
|
"eval_recall": 0.8647480607857698, |
|
"eval_runtime": 568.7246, |
|
"eval_samples_per_second": 29.805, |
|
"eval_steps_per_second": 3.726, |
|
"step": 260800 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.5580658728865115e-05, |
|
"loss": 0.1898, |
|
"step": 261200 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.5568167284828812e-05, |
|
"loss": 0.1811, |
|
"step": 261600 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.5555663235436697e-05, |
|
"loss": 0.1855, |
|
"step": 262000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.5543146608995746e-05, |
|
"loss": 0.1814, |
|
"step": 262400 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_accuracy": 0.9382809298242817, |
|
"eval_f1": 0.868037752648405, |
|
"eval_loss": 0.1755363494157791, |
|
"eval_precision": 0.8751227345668181, |
|
"eval_recall": 0.8610665692557727, |
|
"eval_runtime": 568.4875, |
|
"eval_samples_per_second": 29.818, |
|
"eval_steps_per_second": 3.727, |
|
"step": 262400 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.5530617433841412e-05, |
|
"loss": 0.1869, |
|
"step": 262800 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.5518075738337554e-05, |
|
"loss": 0.1857, |
|
"step": 263200 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.550552155087637e-05, |
|
"loss": 0.1817, |
|
"step": 263600 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.5492954899878353e-05, |
|
"loss": 0.1851, |
|
"step": 264000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"eval_accuracy": 0.9380864271050887, |
|
"eval_f1": 0.8681394820209297, |
|
"eval_loss": 0.17657506465911865, |
|
"eval_precision": 0.873961482823805, |
|
"eval_recall": 0.8623945358433789, |
|
"eval_runtime": 556.3354, |
|
"eval_samples_per_second": 30.469, |
|
"eval_steps_per_second": 3.809, |
|
"step": 264000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.5480375813792193e-05, |
|
"loss": 0.1875, |
|
"step": 264400 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.5467784321094738e-05, |
|
"loss": 0.1827, |
|
"step": 264800 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.5455180450290927e-05, |
|
"loss": 0.1855, |
|
"step": 265200 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5442564229913712e-05, |
|
"loss": 0.1865, |
|
"step": 265600 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_accuracy": 0.9382207221358911, |
|
"eval_f1": 0.8682821817948081, |
|
"eval_loss": 0.17585815489292145, |
|
"eval_precision": 0.8717111195488778, |
|
"eval_recall": 0.8648801142863023, |
|
"eval_runtime": 546.154, |
|
"eval_samples_per_second": 31.037, |
|
"eval_steps_per_second": 3.88, |
|
"step": 265600 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5429935688524008e-05, |
|
"loss": 0.1892, |
|
"step": 266000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5417294854710626e-05, |
|
"loss": 0.1871, |
|
"step": 266400 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5404641757090195e-05, |
|
"loss": 0.1827, |
|
"step": 266800 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5391976424307117e-05, |
|
"loss": 0.1867, |
|
"step": 267200 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_accuracy": 0.9381501984884306, |
|
"eval_f1": 0.8684828571461304, |
|
"eval_loss": 0.17675794661045074, |
|
"eval_precision": 0.8702520329263176, |
|
"eval_recall": 0.8667208600513008, |
|
"eval_runtime": 545.512, |
|
"eval_samples_per_second": 31.074, |
|
"eval_steps_per_second": 3.884, |
|
"step": 267200 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5379298885033486e-05, |
|
"loss": 0.1874, |
|
"step": 267600 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.536660916796903e-05, |
|
"loss": 0.1918, |
|
"step": 268000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5353907301841046e-05, |
|
"loss": 0.1851, |
|
"step": 268400 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5341193315404338e-05, |
|
"loss": 0.1825, |
|
"step": 268800 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"eval_accuracy": 0.9384647414586204, |
|
"eval_f1": 0.8690078223475308, |
|
"eval_loss": 0.17516086995601654, |
|
"eval_precision": 0.8725653210073282, |
|
"eval_recall": 0.86547921415019, |
|
"eval_runtime": 558.7448, |
|
"eval_samples_per_second": 30.338, |
|
"eval_steps_per_second": 3.792, |
|
"step": 268800 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5328467237441144e-05, |
|
"loss": 0.1859, |
|
"step": 269200 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.5315729096761075e-05, |
|
"loss": 0.1852, |
|
"step": 269600 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.5302978922201055e-05, |
|
"loss": 0.1859, |
|
"step": 270000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.5290216742625242e-05, |
|
"loss": 0.1862, |
|
"step": 270400 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.9383621445565966, |
|
"eval_f1": 0.8681803401401886, |
|
"eval_loss": 0.17722995579242706, |
|
"eval_precision": 0.8748685240093154, |
|
"eval_recall": 0.8615936399375519, |
|
"eval_runtime": 539.8288, |
|
"eval_samples_per_second": 31.401, |
|
"eval_steps_per_second": 3.925, |
|
"step": 270400 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.527744258692498e-05, |
|
"loss": 0.1818, |
|
"step": 270800 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.526465648401872e-05, |
|
"loss": 0.1848, |
|
"step": 271200 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.5251858462851954e-05, |
|
"loss": 0.1868, |
|
"step": 271600 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.523904855239717e-05, |
|
"loss": 0.1867, |
|
"step": 272000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.9385112570558816, |
|
"eval_f1": 0.8686622998115704, |
|
"eval_loss": 0.1757044494152069, |
|
"eval_precision": 0.8775867510739054, |
|
"eval_recall": 0.8599175323030873, |
|
"eval_runtime": 545.6567, |
|
"eval_samples_per_second": 31.065, |
|
"eval_steps_per_second": 3.883, |
|
"step": 272000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.5226226781653762e-05, |
|
"loss": 0.1896, |
|
"step": 272400 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.5213393179647966e-05, |
|
"loss": 0.1867, |
|
"step": 272800 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.5200547775432817e-05, |
|
"loss": 0.1879, |
|
"step": 273200 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 1.5187690598088058e-05, |
|
"loss": 0.1859, |
|
"step": 273600 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.9385356402318652, |
|
"eval_f1": 0.8687952957532927, |
|
"eval_loss": 0.17426274716854095, |
|
"eval_precision": 0.8738683406030197, |
|
"eval_recall": 0.8637808117688823, |
|
"eval_runtime": 545.5662, |
|
"eval_samples_per_second": 31.07, |
|
"eval_steps_per_second": 3.884, |
|
"step": 273600 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.517482167672009e-05, |
|
"loss": 0.1851, |
|
"step": 274000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5161941040461897e-05, |
|
"loss": 0.182, |
|
"step": 274400 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.514904871847298e-05, |
|
"loss": 0.1862, |
|
"step": 274800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5136144739939305e-05, |
|
"loss": 0.1845, |
|
"step": 275200 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_accuracy": 0.9385176341942157, |
|
"eval_f1": 0.8692666808093146, |
|
"eval_loss": 0.17514793574810028, |
|
"eval_precision": 0.8706627898307612, |
|
"eval_recall": 0.8678750419455654, |
|
"eval_runtime": 561.3506, |
|
"eval_samples_per_second": 30.197, |
|
"eval_steps_per_second": 3.775, |
|
"step": 275200 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5123229134073212e-05, |
|
"loss": 0.1837, |
|
"step": 275600 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5110301930113382e-05, |
|
"loss": 0.1807, |
|
"step": 276000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5097363157324734e-05, |
|
"loss": 0.1868, |
|
"step": 276400 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5084412844998388e-05, |
|
"loss": 0.1839, |
|
"step": 276800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_accuracy": 0.9386039131246195, |
|
"eval_f1": 0.869252198361808, |
|
"eval_loss": 0.17545466125011444, |
|
"eval_precision": 0.872514411732351, |
|
"eval_recall": 0.8660142880744256, |
|
"eval_runtime": 604.8369, |
|
"eval_samples_per_second": 28.026, |
|
"eval_steps_per_second": 3.503, |
|
"step": 276800 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5071451022451578e-05, |
|
"loss": 0.1804, |
|
"step": 277200 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5058477719027607e-05, |
|
"loss": 0.1873, |
|
"step": 277600 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.504549296409576e-05, |
|
"loss": 0.1841, |
|
"step": 278000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.5032496787051245e-05, |
|
"loss": 0.1833, |
|
"step": 278400 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_accuracy": 0.938784348626899, |
|
"eval_f1": 0.8692810532908997, |
|
"eval_loss": 0.17319637537002563, |
|
"eval_precision": 0.8770962172215553, |
|
"eval_recall": 0.8616039298207102, |
|
"eval_runtime": 618.6793, |
|
"eval_samples_per_second": 27.399, |
|
"eval_steps_per_second": 3.425, |
|
"step": 278400 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.5019489217315138e-05, |
|
"loss": 0.1829, |
|
"step": 278800 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.5006470284334294e-05, |
|
"loss": 0.1816, |
|
"step": 279200 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4993440017581304e-05, |
|
"loss": 0.1823, |
|
"step": 279600 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4980398446554402e-05, |
|
"loss": 0.1834, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_accuracy": 0.9387138249794384, |
|
"eval_f1": 0.8692215390091689, |
|
"eval_loss": 0.1743682324886322, |
|
"eval_precision": 0.872991133337831, |
|
"eval_recall": 0.8654843590917691, |
|
"eval_runtime": 619.0907, |
|
"eval_samples_per_second": 27.38, |
|
"eval_steps_per_second": 3.423, |
|
"step": 280000 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.496734560077743e-05, |
|
"loss": 0.1822, |
|
"step": 280400 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4954281509799742e-05, |
|
"loss": 0.1858, |
|
"step": 280800 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4941206203196148e-05, |
|
"loss": 0.1871, |
|
"step": 281200 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.492811971056686e-05, |
|
"loss": 0.1823, |
|
"step": 281600 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"eval_accuracy": 0.9384544254995503, |
|
"eval_f1": 0.869219087148335, |
|
"eval_loss": 0.17768624424934387, |
|
"eval_precision": 0.8707284207482976, |
|
"eval_recall": 0.8677149770964351, |
|
"eval_runtime": 560.6694, |
|
"eval_samples_per_second": 30.234, |
|
"eval_steps_per_second": 3.779, |
|
"step": 281600 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 1.4915022061537402e-05, |
|
"loss": 0.1835, |
|
"step": 282000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4901913285758558e-05, |
|
"loss": 0.1881, |
|
"step": 282400 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4888793412906303e-05, |
|
"loss": 0.1833, |
|
"step": 282800 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4875662472681729e-05, |
|
"loss": 0.1844, |
|
"step": 283200 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval_accuracy": 0.93876896846974, |
|
"eval_f1": 0.8693240819341063, |
|
"eval_loss": 0.17536406219005585, |
|
"eval_precision": 0.8761287048692236, |
|
"eval_recall": 0.8626243432339159, |
|
"eval_runtime": 559.7176, |
|
"eval_samples_per_second": 30.285, |
|
"eval_steps_per_second": 3.786, |
|
"step": 283200 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4862520494810993e-05, |
|
"loss": 0.183, |
|
"step": 283600 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4849367509045223e-05, |
|
"loss": 0.1838, |
|
"step": 284000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.483620354516048e-05, |
|
"loss": 0.1824, |
|
"step": 284400 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4823028632957672e-05, |
|
"loss": 0.1839, |
|
"step": 284800 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"eval_accuracy": 0.9385120073074503, |
|
"eval_f1": 0.8684139374948006, |
|
"eval_loss": 0.17582936584949493, |
|
"eval_precision": 0.8777255169876764, |
|
"eval_recall": 0.859297852672883, |
|
"eval_runtime": 551.0927, |
|
"eval_samples_per_second": 30.759, |
|
"eval_steps_per_second": 3.845, |
|
"step": 284800 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4809842802262497e-05, |
|
"loss": 0.1845, |
|
"step": 285200 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.4796646082925363e-05, |
|
"loss": 0.1819, |
|
"step": 285600 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 1.478343850482133e-05, |
|
"loss": 0.1811, |
|
"step": 286000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.4770220097850052e-05, |
|
"loss": 0.1836, |
|
"step": 286400 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.9387203896806648, |
|
"eval_f1": 0.8693283022641248, |
|
"eval_loss": 0.17578087747097015, |
|
"eval_precision": 0.8739339668967908, |
|
"eval_recall": 0.8647709271927884, |
|
"eval_runtime": 557.1714, |
|
"eval_samples_per_second": 30.423, |
|
"eval_steps_per_second": 3.803, |
|
"step": 286400 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.4756990891935679e-05, |
|
"loss": 0.1867, |
|
"step": 286800 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.4743750917026818e-05, |
|
"loss": 0.184, |
|
"step": 287200 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.4730500203096458e-05, |
|
"loss": 0.1798, |
|
"step": 287600 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.471723878014189e-05, |
|
"loss": 0.1832, |
|
"step": 288000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.9385549592097601, |
|
"eval_f1": 0.8692415066070271, |
|
"eval_loss": 0.17377515137195587, |
|
"eval_precision": 0.8749265219015407, |
|
"eval_recall": 0.8636298934825595, |
|
"eval_runtime": 577.6247, |
|
"eval_samples_per_second": 29.346, |
|
"eval_steps_per_second": 3.668, |
|
"step": 288000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.470396667818466e-05, |
|
"loss": 0.1825, |
|
"step": 288400 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.469068392727048e-05, |
|
"loss": 0.1836, |
|
"step": 288800 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.467739055746917e-05, |
|
"loss": 0.1827, |
|
"step": 289200 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.4664086598874594e-05, |
|
"loss": 0.1858, |
|
"step": 289600 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"eval_accuracy": 0.9387108239731635, |
|
"eval_f1": 0.8691714958668679, |
|
"eval_loss": 0.1739749312400818, |
|
"eval_precision": 0.8744400690142785, |
|
"eval_recall": 0.8639660296657331, |
|
"eval_runtime": 582.35, |
|
"eval_samples_per_second": 29.108, |
|
"eval_steps_per_second": 3.639, |
|
"step": 289600 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.4650772081604583e-05, |
|
"loss": 0.1877, |
|
"step": 290000 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.4637447035800878e-05, |
|
"loss": 0.1854, |
|
"step": 290400 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.4624111491629045e-05, |
|
"loss": 0.1838, |
|
"step": 290800 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.4610765479278421e-05, |
|
"loss": 0.1798, |
|
"step": 291200 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_accuracy": 0.938752462935228, |
|
"eval_f1": 0.8692557733778254, |
|
"eval_loss": 0.17498846352100372, |
|
"eval_precision": 0.875637116229374, |
|
"eval_recall": 0.8629667676790197, |
|
"eval_runtime": 596.7225, |
|
"eval_samples_per_second": 28.407, |
|
"eval_steps_per_second": 3.551, |
|
"step": 291200 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.4597409028962043e-05, |
|
"loss": 0.1837, |
|
"step": 291600 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.4584042170916578e-05, |
|
"loss": 0.1831, |
|
"step": 292000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.4570664935402245e-05, |
|
"loss": 0.1814, |
|
"step": 292400 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.4557277352702772e-05, |
|
"loss": 0.1832, |
|
"step": 292800 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_accuracy": 0.9384574265058252, |
|
"eval_f1": 0.8689321028534405, |
|
"eval_loss": 0.17471010982990265, |
|
"eval_precision": 0.8715587202283902, |
|
"eval_recall": 0.8663212695886505, |
|
"eval_runtime": 564.6829, |
|
"eval_samples_per_second": 30.019, |
|
"eval_steps_per_second": 3.753, |
|
"step": 292800 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.4543879453125294e-05, |
|
"loss": 0.1842, |
|
"step": 293200 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.453047126700032e-05, |
|
"loss": 0.1826, |
|
"step": 293600 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.4517052824681624e-05, |
|
"loss": 0.1827, |
|
"step": 294000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.4503624156546217e-05, |
|
"loss": 0.1868, |
|
"step": 294400 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"eval_accuracy": 0.939146532571703, |
|
"eval_f1": 0.8701281629970424, |
|
"eval_loss": 0.17378772795200348, |
|
"eval_precision": 0.877606878758223, |
|
"eval_recall": 0.8627758331804143, |
|
"eval_runtime": 612.6148, |
|
"eval_samples_per_second": 27.67, |
|
"eval_steps_per_second": 3.459, |
|
"step": 294400 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.4490185292994253e-05, |
|
"loss": 0.1817, |
|
"step": 294800 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.4476736264448961e-05, |
|
"loss": 0.1819, |
|
"step": 295200 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.4463277101356592e-05, |
|
"loss": 0.1811, |
|
"step": 295600 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.444980783418633e-05, |
|
"loss": 0.1823, |
|
"step": 296000 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_accuracy": 0.9389529676669708, |
|
"eval_f1": 0.8698042419987801, |
|
"eval_loss": 0.17476864159107208, |
|
"eval_precision": 0.875563373877524, |
|
"eval_recall": 0.8641203779131088, |
|
"eval_runtime": 604.1888, |
|
"eval_samples_per_second": 28.056, |
|
"eval_steps_per_second": 3.507, |
|
"step": 296000 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.4436328493430242e-05, |
|
"loss": 0.1858, |
|
"step": 296400 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.4422839109603196e-05, |
|
"loss": 0.1872, |
|
"step": 296800 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.440933971324279e-05, |
|
"loss": 0.1825, |
|
"step": 297200 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.43958303349093e-05, |
|
"loss": 0.1851, |
|
"step": 297600 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"eval_accuracy": 0.9387669052779261, |
|
"eval_f1": 0.8692277440556693, |
|
"eval_loss": 0.17473439872264862, |
|
"eval_precision": 0.8749221043072554, |
|
"eval_recall": 0.8636070270755409, |
|
"eval_runtime": 607.4986, |
|
"eval_samples_per_second": 27.903, |
|
"eval_steps_per_second": 3.488, |
|
"step": 297600 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.4382311005185584e-05, |
|
"loss": 0.1873, |
|
"step": 298000 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.4368781754677049e-05, |
|
"loss": 0.1865, |
|
"step": 298400 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 1.435524261401154e-05, |
|
"loss": 0.1839, |
|
"step": 298800 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.4341693613839304e-05, |
|
"loss": 0.1828, |
|
"step": 299200 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_accuracy": 0.9390154261100675, |
|
"eval_f1": 0.8700955369181682, |
|
"eval_loss": 0.1748456209897995, |
|
"eval_precision": 0.8727505924034324, |
|
"eval_recall": 0.8674565866971248, |
|
"eval_runtime": 602.322, |
|
"eval_samples_per_second": 28.143, |
|
"eval_steps_per_second": 3.518, |
|
"step": 299200 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.4328134784832908e-05, |
|
"loss": 0.185, |
|
"step": 299600 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.4314566157687164e-05, |
|
"loss": 0.1826, |
|
"step": 300000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.4300987763119067e-05, |
|
"loss": 0.1819, |
|
"step": 300400 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.428739963186773e-05, |
|
"loss": 0.1825, |
|
"step": 300800 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_accuracy": 0.9388166094443543, |
|
"eval_f1": 0.8697936517670821, |
|
"eval_loss": 0.17547725141048431, |
|
"eval_precision": 0.8720537946624818, |
|
"eval_recall": 0.8675451940243218, |
|
"eval_runtime": 600.8049, |
|
"eval_samples_per_second": 28.214, |
|
"eval_steps_per_second": 3.527, |
|
"step": 300800 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.4273801794694295e-05, |
|
"loss": 0.1824, |
|
"step": 301200 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.4260194282381892e-05, |
|
"loss": 0.1842, |
|
"step": 301600 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.4246577125735543e-05, |
|
"loss": 0.1882, |
|
"step": 302000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.4232950355582108e-05, |
|
"loss": 0.184, |
|
"step": 302400 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_accuracy": 0.9390565023834555, |
|
"eval_f1": 0.8700368872239204, |
|
"eval_loss": 0.17385759949684143, |
|
"eval_precision": 0.8744840721441405, |
|
"eval_recall": 0.8656347057179166, |
|
"eval_runtime": 563.4453, |
|
"eval_samples_per_second": 30.085, |
|
"eval_steps_per_second": 3.761, |
|
"step": 302400 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 1.4219314002770204e-05, |
|
"loss": 0.1828, |
|
"step": 302800 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.4205668098170154e-05, |
|
"loss": 0.1856, |
|
"step": 303200 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.419201267267389e-05, |
|
"loss": 0.1851, |
|
"step": 303600 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.4178347757194906e-05, |
|
"loss": 0.1887, |
|
"step": 304000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_accuracy": 0.9391493460150857, |
|
"eval_f1": 0.870106505634811, |
|
"eval_loss": 0.17325620353221893, |
|
"eval_precision": 0.8757328642230235, |
|
"eval_recall": 0.8645519813455852, |
|
"eval_runtime": 551.7947, |
|
"eval_samples_per_second": 30.72, |
|
"eval_steps_per_second": 3.84, |
|
"step": 304000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.4164673382668182e-05, |
|
"loss": 0.1856, |
|
"step": 304400 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.4150989580050102e-05, |
|
"loss": 0.1832, |
|
"step": 304800 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.4137296380318403e-05, |
|
"loss": 0.1802, |
|
"step": 305200 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.4123593814472092e-05, |
|
"loss": 0.1847, |
|
"step": 305600 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_accuracy": 0.9392256841122039, |
|
"eval_f1": 0.8701730320570634, |
|
"eval_loss": 0.1737605631351471, |
|
"eval_precision": 0.8781310576469697, |
|
"eval_recall": 0.8623579495921491, |
|
"eval_runtime": 549.6988, |
|
"eval_samples_per_second": 30.837, |
|
"eval_steps_per_second": 3.855, |
|
"step": 305600 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.4109881913531378e-05, |
|
"loss": 0.1835, |
|
"step": 306000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.4096160708537603e-05, |
|
"loss": 0.1814, |
|
"step": 306400 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.408243023055317e-05, |
|
"loss": 0.1834, |
|
"step": 306800 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.4068690510661484e-05, |
|
"loss": 0.1776, |
|
"step": 307200 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"eval_accuracy": 0.9390709447261535, |
|
"eval_f1": 0.8698915645399712, |
|
"eval_loss": 0.1729295551776886, |
|
"eval_precision": 0.8768345562740518, |
|
"eval_recall": 0.8630576616469187, |
|
"eval_runtime": 532.8968, |
|
"eval_samples_per_second": 31.809, |
|
"eval_steps_per_second": 3.976, |
|
"step": 307200 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.4054941579966863e-05, |
|
"loss": 0.1848, |
|
"step": 307600 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.4041183469594472e-05, |
|
"loss": 0.1832, |
|
"step": 308000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.4027416210690275e-05, |
|
"loss": 0.1777, |
|
"step": 308400 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.4013639834420927e-05, |
|
"loss": 0.1863, |
|
"step": 308800 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"eval_accuracy": 0.9390154261100675, |
|
"eval_f1": 0.8698607963742311, |
|
"eval_loss": 0.17361733317375183, |
|
"eval_precision": 0.8749846002395711, |
|
"eval_recall": 0.8647966519006843, |
|
"eval_runtime": 548.7029, |
|
"eval_samples_per_second": 30.893, |
|
"eval_steps_per_second": 3.862, |
|
"step": 308800 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.3999854371973735e-05, |
|
"loss": 0.1826, |
|
"step": 309200 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.3986059854556573e-05, |
|
"loss": 0.1822, |
|
"step": 309600 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.3972256313397811e-05, |
|
"loss": 0.1836, |
|
"step": 310000 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.3958443779746254e-05, |
|
"loss": 0.1843, |
|
"step": 310400 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"eval_accuracy": 0.9391253379648864, |
|
"eval_f1": 0.8698062571822835, |
|
"eval_loss": 0.1738242655992508, |
|
"eval_precision": 0.8778375067609936, |
|
"eval_recall": 0.861920629557918, |
|
"eval_runtime": 555.0987, |
|
"eval_samples_per_second": 30.537, |
|
"eval_steps_per_second": 3.817, |
|
"step": 310400 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.3944622284871052e-05, |
|
"loss": 0.1822, |
|
"step": 310800 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 1.3930791860061657e-05, |
|
"loss": 0.1831, |
|
"step": 311200 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3916952536627726e-05, |
|
"loss": 0.1817, |
|
"step": 311600 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3903104345899062e-05, |
|
"loss": 0.1764, |
|
"step": 312000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_accuracy": 0.9390373709684529, |
|
"eval_f1": 0.8693857874808684, |
|
"eval_loss": 0.1739407181739807, |
|
"eval_precision": 0.8786256146074277, |
|
"eval_recall": 0.8603382741922299, |
|
"eval_runtime": 550.6316, |
|
"eval_samples_per_second": 30.785, |
|
"eval_steps_per_second": 3.848, |
|
"step": 312000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3889247319225548e-05, |
|
"loss": 0.1785, |
|
"step": 312400 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3875381487977062e-05, |
|
"loss": 0.1829, |
|
"step": 312800 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3861506883543422e-05, |
|
"loss": 0.1796, |
|
"step": 313200 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3847623537334306e-05, |
|
"loss": 0.1828, |
|
"step": 313600 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_accuracy": 0.9393299690802572, |
|
"eval_f1": 0.8706478942209294, |
|
"eval_loss": 0.1723974198102951, |
|
"eval_precision": 0.8753774942357367, |
|
"eval_recall": 0.8659691269205638, |
|
"eval_runtime": 548.8692, |
|
"eval_samples_per_second": 30.883, |
|
"eval_steps_per_second": 3.861, |
|
"step": 313600 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3833731480779171e-05, |
|
"loss": 0.18, |
|
"step": 314000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3819830745327207e-05, |
|
"loss": 0.1851, |
|
"step": 314400 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3805921362447241e-05, |
|
"loss": 0.1823, |
|
"step": 314800 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3792003363627684e-05, |
|
"loss": 0.1816, |
|
"step": 315200 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_accuracy": 0.9393211536243247, |
|
"eval_f1": 0.8700548316726608, |
|
"eval_loss": 0.17294801771640778, |
|
"eval_precision": 0.8798546604916081, |
|
"eval_recall": 0.8604708993529379, |
|
"eval_runtime": 524.8981, |
|
"eval_samples_per_second": 32.294, |
|
"eval_steps_per_second": 4.037, |
|
"step": 315200 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.3778076780376444e-05, |
|
"loss": 0.1824, |
|
"step": 315600 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.3764141644220872e-05, |
|
"loss": 0.1799, |
|
"step": 316000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.3750197986707673e-05, |
|
"loss": 0.1819, |
|
"step": 316400 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.3736245839402846e-05, |
|
"loss": 0.1822, |
|
"step": 316800 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_accuracy": 0.9392564444265218, |
|
"eval_f1": 0.8702477067806627, |
|
"eval_loss": 0.17339299619197845, |
|
"eval_precision": 0.8751859468416066, |
|
"eval_recall": 0.8653648821150969, |
|
"eval_runtime": 544.0862, |
|
"eval_samples_per_second": 31.155, |
|
"eval_steps_per_second": 3.895, |
|
"step": 316800 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.3722285233891617e-05, |
|
"loss": 0.1829, |
|
"step": 317200 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.3708316201778341e-05, |
|
"loss": 0.1825, |
|
"step": 317600 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.3694338774686468e-05, |
|
"loss": 0.1832, |
|
"step": 318000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.3680352984258447e-05, |
|
"loss": 0.183, |
|
"step": 318400 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"eval_accuracy": 0.9390264923207063, |
|
"eval_f1": 0.870117026206737, |
|
"eval_loss": 0.172457754611969, |
|
"eval_precision": 0.8713868822382754, |
|
"eval_recall": 0.8688508658650848, |
|
"eval_runtime": 534.1333, |
|
"eval_samples_per_second": 31.736, |
|
"eval_steps_per_second": 3.967, |
|
"step": 318400 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.3666358862155655e-05, |
|
"loss": 0.1806, |
|
"step": 318800 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.3652356440058334e-05, |
|
"loss": 0.1842, |
|
"step": 319200 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 1.3638345749665517e-05, |
|
"loss": 0.1867, |
|
"step": 319600 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.3624326822694954e-05, |
|
"loss": 0.1817, |
|
"step": 320000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.9393843623189901, |
|
"eval_f1": 0.8704399780476945, |
|
"eval_loss": 0.17297545075416565, |
|
"eval_precision": 0.8759715099022368, |
|
"eval_recall": 0.8649778681763068, |
|
"eval_runtime": 525.7603, |
|
"eval_samples_per_second": 32.241, |
|
"eval_steps_per_second": 4.03, |
|
"step": 320000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.3610299690883041e-05, |
|
"loss": 0.1838, |
|
"step": 320400 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.3596264385984749e-05, |
|
"loss": 0.1831, |
|
"step": 320800 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.3582220939773555e-05, |
|
"loss": 0.1851, |
|
"step": 321200 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.3568169384041352e-05, |
|
"loss": 0.1813, |
|
"step": 321600 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.9393303442060416, |
|
"eval_f1": 0.8706009969384383, |
|
"eval_loss": 0.17410622537136078, |
|
"eval_precision": 0.874548043000233, |
|
"eval_recall": 0.8666894187416502, |
|
"eval_runtime": 535.4685, |
|
"eval_samples_per_second": 31.656, |
|
"eval_steps_per_second": 3.957, |
|
"step": 321600 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.355410975059841e-05, |
|
"loss": 0.1855, |
|
"step": 322000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.3540042071273276e-05, |
|
"loss": 0.18, |
|
"step": 322400 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.3525966377912712e-05, |
|
"loss": 0.1825, |
|
"step": 322800 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.3511882702381622e-05, |
|
"loss": 0.1807, |
|
"step": 323200 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_accuracy": 0.9395974337645092, |
|
"eval_f1": 0.8711333905658395, |
|
"eval_loss": 0.1726941466331482, |
|
"eval_precision": 0.8765628671674324, |
|
"eval_recall": 0.8657707608396773, |
|
"eval_runtime": 548.9419, |
|
"eval_samples_per_second": 30.879, |
|
"eval_steps_per_second": 3.86, |
|
"step": 323200 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.3497791076562985e-05, |
|
"loss": 0.1849, |
|
"step": 323600 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.348369153235777e-05, |
|
"loss": 0.1803, |
|
"step": 324000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.3469584101684882e-05, |
|
"loss": 0.177, |
|
"step": 324400 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.3455468816481068e-05, |
|
"loss": 0.1812, |
|
"step": 324800 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_accuracy": 0.9394183112024751, |
|
"eval_f1": 0.8704002592548384, |
|
"eval_loss": 0.17248520255088806, |
|
"eval_precision": 0.8780404523035452, |
|
"eval_recall": 0.8628918801960337, |
|
"eval_runtime": 542.8504, |
|
"eval_samples_per_second": 31.226, |
|
"eval_steps_per_second": 3.903, |
|
"step": 324800 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.3441345708700869e-05, |
|
"loss": 0.1794, |
|
"step": 325200 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.3427214810316523e-05, |
|
"loss": 0.1833, |
|
"step": 325600 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.341307615331791e-05, |
|
"loss": 0.1813, |
|
"step": 326000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.3398929769712477e-05, |
|
"loss": 0.1816, |
|
"step": 326400 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_accuracy": 0.9395171568466552, |
|
"eval_f1": 0.8708799031361509, |
|
"eval_loss": 0.17236380279064178, |
|
"eval_precision": 0.8754663333059898, |
|
"eval_recall": 0.8663412776947917, |
|
"eval_runtime": 538.2723, |
|
"eval_samples_per_second": 31.491, |
|
"eval_steps_per_second": 3.937, |
|
"step": 326400 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.3384775691525156e-05, |
|
"loss": 0.1808, |
|
"step": 326800 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.3370613950798306e-05, |
|
"loss": 0.1815, |
|
"step": 327200 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.3356444579591623e-05, |
|
"loss": 0.1824, |
|
"step": 327600 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 1.3342267609982089e-05, |
|
"loss": 0.1801, |
|
"step": 328000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"eval_accuracy": 0.9393738587970278, |
|
"eval_f1": 0.870637096744065, |
|
"eval_loss": 0.17385226488113403, |
|
"eval_precision": 0.8753854564451554, |
|
"eval_recall": 0.8659399722516151, |
|
"eval_runtime": 543.1501, |
|
"eval_samples_per_second": 31.209, |
|
"eval_steps_per_second": 3.901, |
|
"step": 328000 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.3328083074063875e-05, |
|
"loss": 0.1863, |
|
"step": 328400 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.3313891003948292e-05, |
|
"loss": 0.1801, |
|
"step": 328800 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.3299691431763697e-05, |
|
"loss": 0.1813, |
|
"step": 329200 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.3285484389655437e-05, |
|
"loss": 0.1827, |
|
"step": 329600 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"eval_accuracy": 0.9397857469082602, |
|
"eval_f1": 0.8713272875455851, |
|
"eval_loss": 0.17110231518745422, |
|
"eval_precision": 0.8773277698091885, |
|
"eval_recall": 0.8654083282884323, |
|
"eval_runtime": 529.0966, |
|
"eval_samples_per_second": 32.038, |
|
"eval_steps_per_second": 4.005, |
|
"step": 329600 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.3271269909785769e-05, |
|
"loss": 0.1763, |
|
"step": 330000 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.325704802433379e-05, |
|
"loss": 0.1786, |
|
"step": 330400 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.3242818765495349e-05, |
|
"loss": 0.1785, |
|
"step": 330800 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.3228582165483006e-05, |
|
"loss": 0.1806, |
|
"step": 331200 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"eval_accuracy": 0.9397744931347293, |
|
"eval_f1": 0.8715061671049601, |
|
"eval_loss": 0.17200496792793274, |
|
"eval_precision": 0.8764334590560849, |
|
"eval_recall": 0.8666339677046301, |
|
"eval_runtime": 527.1524, |
|
"eval_samples_per_second": 32.156, |
|
"eval_steps_per_second": 4.02, |
|
"step": 331200 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.3214338256525924e-05, |
|
"loss": 0.1747, |
|
"step": 331600 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.3200087070869824e-05, |
|
"loss": 0.1836, |
|
"step": 332000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.3185828640776892e-05, |
|
"loss": 0.1848, |
|
"step": 332400 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.317156299852572e-05, |
|
"loss": 0.1807, |
|
"step": 332800 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_accuracy": 0.9397041570501609, |
|
"eval_f1": 0.8711431708313623, |
|
"eval_loss": 0.1722223460674286, |
|
"eval_precision": 0.8776170872416562, |
|
"eval_recall": 0.8647640672706828, |
|
"eval_runtime": 541.5718, |
|
"eval_samples_per_second": 31.3, |
|
"eval_steps_per_second": 3.913, |
|
"step": 332800 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.3157290176411221e-05, |
|
"loss": 0.1763, |
|
"step": 333200 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.3143010206744564e-05, |
|
"loss": 0.1822, |
|
"step": 333600 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.312872312185311e-05, |
|
"loss": 0.1828, |
|
"step": 334000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.3114428954080305e-05, |
|
"loss": 0.1822, |
|
"step": 334400 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_accuracy": 0.9397529234021282, |
|
"eval_f1": 0.8713556448343224, |
|
"eval_loss": 0.1710449457168579, |
|
"eval_precision": 0.877881166966545, |
|
"eval_recall": 0.864926418760515, |
|
"eval_runtime": 543.6597, |
|
"eval_samples_per_second": 31.179, |
|
"eval_steps_per_second": 3.898, |
|
"step": 334400 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.3100127735785648e-05, |
|
"loss": 0.1754, |
|
"step": 334800 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.3085819499344594e-05, |
|
"loss": 0.1791, |
|
"step": 335200 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.3071504277148485e-05, |
|
"loss": 0.1802, |
|
"step": 335600 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.305718210160448e-05, |
|
"loss": 0.1818, |
|
"step": 336000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"eval_accuracy": 0.9396105631669619, |
|
"eval_f1": 0.8714104520644, |
|
"eval_loss": 0.17099149525165558, |
|
"eval_precision": 0.8750631215600191, |
|
"eval_recall": 0.8677881495988946, |
|
"eval_runtime": 557.871, |
|
"eval_samples_per_second": 30.385, |
|
"eval_steps_per_second": 3.798, |
|
"step": 336000 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 1.3042853005135467e-05, |
|
"loss": 0.1773, |
|
"step": 336400 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.302851702018002e-05, |
|
"loss": 0.1779, |
|
"step": 336800 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.3014174179192299e-05, |
|
"loss": 0.1803, |
|
"step": 337200 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.2999824514641982e-05, |
|
"loss": 0.1755, |
|
"step": 337600 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_accuracy": 0.9392907684357912, |
|
"eval_f1": 0.8707195476790487, |
|
"eval_loss": 0.17353902757167816, |
|
"eval_precision": 0.8714702109627323, |
|
"eval_recall": 0.8699701764886459, |
|
"eval_runtime": 580.4555, |
|
"eval_samples_per_second": 29.203, |
|
"eval_steps_per_second": 3.651, |
|
"step": 337600 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.2985468059014195e-05, |
|
"loss": 0.1817, |
|
"step": 338000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.2971104844809446e-05, |
|
"loss": 0.181, |
|
"step": 338400 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.295673490454353e-05, |
|
"loss": 0.1783, |
|
"step": 338800 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.2942358270747483e-05, |
|
"loss": 0.1848, |
|
"step": 339200 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"eval_accuracy": 0.9399232305082298, |
|
"eval_f1": 0.8716477972657062, |
|
"eval_loss": 0.1710236817598343, |
|
"eval_precision": 0.8773480387813335, |
|
"eval_recall": 0.8660211479965312, |
|
"eval_runtime": 563.8654, |
|
"eval_samples_per_second": 30.062, |
|
"eval_steps_per_second": 3.758, |
|
"step": 339200 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.2927974975967484e-05, |
|
"loss": 0.1823, |
|
"step": 339600 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.2913585052764793e-05, |
|
"loss": 0.181, |
|
"step": 340000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 1.2899188533715672e-05, |
|
"loss": 0.185, |
|
"step": 340400 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2884785451411328e-05, |
|
"loss": 0.1814, |
|
"step": 340800 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"eval_accuracy": 0.9397538612165892, |
|
"eval_f1": 0.8711334640067796, |
|
"eval_loss": 0.1717814952135086, |
|
"eval_precision": 0.8776545006405615, |
|
"eval_recall": 0.8647086162336627, |
|
"eval_runtime": 560.4441, |
|
"eval_samples_per_second": 30.246, |
|
"eval_steps_per_second": 3.781, |
|
"step": 340800 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2870375838457814e-05, |
|
"loss": 0.1809, |
|
"step": 341200 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2855959727475968e-05, |
|
"loss": 0.1768, |
|
"step": 341600 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2841537151101347e-05, |
|
"loss": 0.1819, |
|
"step": 342000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2827108141984135e-05, |
|
"loss": 0.1826, |
|
"step": 342400 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"eval_accuracy": 0.9397939996755162, |
|
"eval_f1": 0.8714485471548011, |
|
"eval_loss": 0.17301397025585175, |
|
"eval_precision": 0.8759606598735562, |
|
"eval_recall": 0.8669826804116639, |
|
"eval_runtime": 558.6928, |
|
"eval_samples_per_second": 30.34, |
|
"eval_steps_per_second": 3.793, |
|
"step": 342400 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2812672732789082e-05, |
|
"loss": 0.182, |
|
"step": 342800 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2798230956195427e-05, |
|
"loss": 0.1796, |
|
"step": 343200 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2783782844896825e-05, |
|
"loss": 0.1787, |
|
"step": 343600 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2769328431601274e-05, |
|
"loss": 0.1796, |
|
"step": 344000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"eval_accuracy": 0.9397893106032117, |
|
"eval_f1": 0.8716993505489452, |
|
"eval_loss": 0.17165547609329224, |
|
"eval_precision": 0.8748905892975603, |
|
"eval_recall": 0.8685313078269996, |
|
"eval_runtime": 547.1787, |
|
"eval_samples_per_second": 30.979, |
|
"eval_steps_per_second": 3.873, |
|
"step": 344000 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2754867749031025e-05, |
|
"loss": 0.1819, |
|
"step": 344400 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.2740400829922539e-05, |
|
"loss": 0.1804, |
|
"step": 344800 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.2725927707026387e-05, |
|
"loss": 0.1793, |
|
"step": 345200 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.2711448413107182e-05, |
|
"loss": 0.1743, |
|
"step": 345600 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"eval_accuracy": 0.9396739594245196, |
|
"eval_f1": 0.8712117684995025, |
|
"eval_loss": 0.1720981001853943, |
|
"eval_precision": 0.8760093282126417, |
|
"eval_recall": 0.8664664712732187, |
|
"eval_runtime": 550.0817, |
|
"eval_samples_per_second": 30.815, |
|
"eval_steps_per_second": 3.852, |
|
"step": 345600 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.2696962980943513e-05, |
|
"loss": 0.1828, |
|
"step": 346000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.268247144332786e-05, |
|
"loss": 0.1784, |
|
"step": 346400 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.2667973833066532e-05, |
|
"loss": 0.1783, |
|
"step": 346800 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.2653470182979575e-05, |
|
"loss": 0.1792, |
|
"step": 347200 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"eval_accuracy": 0.9398316998168449, |
|
"eval_f1": 0.8713998637288572, |
|
"eval_loss": 0.17189179360866547, |
|
"eval_precision": 0.8772452644147352, |
|
"eval_recall": 0.8656318474170392, |
|
"eval_runtime": 561.5837, |
|
"eval_samples_per_second": 30.184, |
|
"eval_steps_per_second": 3.773, |
|
"step": 347200 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.2638960525900711e-05, |
|
"loss": 0.1786, |
|
"step": 347600 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.2624444894677278e-05, |
|
"loss": 0.178, |
|
"step": 348000 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.2609923322170102e-05, |
|
"loss": 0.1785, |
|
"step": 348400 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 1.2595395841253497e-05, |
|
"loss": 0.1816, |
|
"step": 348800 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"eval_accuracy": 0.9400790952716332, |
|
"eval_f1": 0.8719731413674832, |
|
"eval_loss": 0.17169509828090668, |
|
"eval_precision": 0.8785204913514486, |
|
"eval_recall": 0.8655226603235253, |
|
"eval_runtime": 553.0508, |
|
"eval_samples_per_second": 30.65, |
|
"eval_steps_per_second": 3.831, |
|
"step": 348800 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2580862484815126e-05, |
|
"loss": 0.179, |
|
"step": 349200 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2566323285755972e-05, |
|
"loss": 0.1794, |
|
"step": 349600 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.255177827699023e-05, |
|
"loss": 0.1824, |
|
"step": 350000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2537227491445255e-05, |
|
"loss": 0.1821, |
|
"step": 350400 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_accuracy": 0.9397842464051227, |
|
"eval_f1": 0.8716801579070432, |
|
"eval_loss": 0.17227433621883392, |
|
"eval_precision": 0.8749401026548347, |
|
"eval_recall": 0.8684444154803289, |
|
"eval_runtime": 547.0914, |
|
"eval_samples_per_second": 30.984, |
|
"eval_steps_per_second": 3.873, |
|
"step": 350400 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2522670962061481e-05, |
|
"loss": 0.1816, |
|
"step": 350800 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2508108721792336e-05, |
|
"loss": 0.178, |
|
"step": 351200 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2493540803604185e-05, |
|
"loss": 0.1778, |
|
"step": 351600 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2478967240476245e-05, |
|
"loss": 0.1803, |
|
"step": 352000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_accuracy": 0.9398776527254294, |
|
"eval_f1": 0.8714233910656006, |
|
"eval_loss": 0.17196086049079895, |
|
"eval_precision": 0.8774644728171191, |
|
"eval_recall": 0.8654649226458033, |
|
"eval_runtime": 579.9665, |
|
"eval_samples_per_second": 29.228, |
|
"eval_steps_per_second": 3.654, |
|
"step": 352000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2464388065400513e-05, |
|
"loss": 0.1805, |
|
"step": 352400 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2449803311381684e-05, |
|
"loss": 0.178, |
|
"step": 352800 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.2435213011437086e-05, |
|
"loss": 0.1796, |
|
"step": 353200 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.2420617198596611e-05, |
|
"loss": 0.1834, |
|
"step": 353600 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"eval_accuracy": 0.9399898153349545, |
|
"eval_f1": 0.8719287157692764, |
|
"eval_loss": 0.17157338559627533, |
|
"eval_precision": 0.876770731459196, |
|
"eval_recall": 0.867139886959917, |
|
"eval_runtime": 567.3199, |
|
"eval_samples_per_second": 29.879, |
|
"eval_steps_per_second": 3.735, |
|
"step": 353600 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.240601590590262e-05, |
|
"loss": 0.1786, |
|
"step": 354000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.2391409166409883e-05, |
|
"loss": 0.1783, |
|
"step": 354400 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.23767970131855e-05, |
|
"loss": 0.1765, |
|
"step": 354800 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.2362179479308829e-05, |
|
"loss": 0.1795, |
|
"step": 355200 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"eval_accuracy": 0.9402058877867485, |
|
"eval_f1": 0.8723043032769202, |
|
"eval_loss": 0.17043296992778778, |
|
"eval_precision": 0.8779126422991831, |
|
"eval_recall": 0.8667671645255135, |
|
"eval_runtime": 568.5227, |
|
"eval_samples_per_second": 29.816, |
|
"eval_steps_per_second": 3.727, |
|
"step": 355200 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.2347556597871404e-05, |
|
"loss": 0.1826, |
|
"step": 355600 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.2332928401976873e-05, |
|
"loss": 0.1817, |
|
"step": 356000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.231829492474091e-05, |
|
"loss": 0.181, |
|
"step": 356400 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.230365619929114e-05, |
|
"loss": 0.1776, |
|
"step": 356800 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"eval_accuracy": 0.9400903490451642, |
|
"eval_f1": 0.8717324992461586, |
|
"eval_loss": 0.17042849957942963, |
|
"eval_precision": 0.8818361545570464, |
|
"eval_recall": 0.8618577469386168, |
|
"eval_runtime": 560.8606, |
|
"eval_samples_per_second": 30.223, |
|
"eval_steps_per_second": 3.778, |
|
"step": 356800 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 1.228901225876708e-05, |
|
"loss": 0.1818, |
|
"step": 357200 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.2274363136320042e-05, |
|
"loss": 0.1798, |
|
"step": 357600 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.2259708865113083e-05, |
|
"loss": 0.1833, |
|
"step": 358000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.2245049478320899e-05, |
|
"loss": 0.1774, |
|
"step": 358400 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.9403002319215162, |
|
"eval_f1": 0.8727868090715237, |
|
"eval_loss": 0.1701146960258484, |
|
"eval_precision": 0.8780376894786384, |
|
"eval_recall": 0.8675983584206401, |
|
"eval_runtime": 553.3462, |
|
"eval_samples_per_second": 30.634, |
|
"eval_steps_per_second": 3.829, |
|
"step": 358400 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.2230385009129776e-05, |
|
"loss": 0.1787, |
|
"step": 358800 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.221571549073751e-05, |
|
"loss": 0.1829, |
|
"step": 359200 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.2201040956353313e-05, |
|
"loss": 0.1781, |
|
"step": 359600 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.2186361439197773e-05, |
|
"loss": 0.1803, |
|
"step": 360000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.9401349890135036, |
|
"eval_f1": 0.8720942584265103, |
|
"eval_loss": 0.17127087712287903, |
|
"eval_precision": 0.8783684106146904, |
|
"eval_recall": 0.8659091026021399, |
|
"eval_runtime": 573.0706, |
|
"eval_samples_per_second": 29.579, |
|
"eval_steps_per_second": 3.698, |
|
"step": 360000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.2171676972502737e-05, |
|
"loss": 0.1785, |
|
"step": 360400 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.2156987589511275e-05, |
|
"loss": 0.18, |
|
"step": 360800 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.214229332347757e-05, |
|
"loss": 0.179, |
|
"step": 361200 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 1.212759420766687e-05, |
|
"loss": 0.1771, |
|
"step": 361600 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"eval_accuracy": 0.9400547120956496, |
|
"eval_f1": 0.8719494827941502, |
|
"eval_loss": 0.172181636095047, |
|
"eval_precision": 0.8767911042161105, |
|
"eval_recall": 0.8671610383864091, |
|
"eval_runtime": 565.297, |
|
"eval_samples_per_second": 29.986, |
|
"eval_steps_per_second": 3.748, |
|
"step": 361600 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.2112890275355402e-05, |
|
"loss": 0.179, |
|
"step": 362000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.209818155983029e-05, |
|
"loss": 0.1812, |
|
"step": 362400 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.2083468094389499e-05, |
|
"loss": 0.1748, |
|
"step": 362800 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.2068749912341726e-05, |
|
"loss": 0.179, |
|
"step": 363200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_accuracy": 0.9399960049103965, |
|
"eval_f1": 0.8720186269910609, |
|
"eval_loss": 0.17170900106430054, |
|
"eval_precision": 0.8753633406951947, |
|
"eval_recall": 0.8686993759185865, |
|
"eval_runtime": 592.7422, |
|
"eval_samples_per_second": 28.598, |
|
"eval_steps_per_second": 3.575, |
|
"step": 363200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.2054027047006372e-05, |
|
"loss": 0.1836, |
|
"step": 363600 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.2039299531713415e-05, |
|
"loss": 0.1825, |
|
"step": 364000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.2024567399803374e-05, |
|
"loss": 0.1837, |
|
"step": 364400 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.2009830684627226e-05, |
|
"loss": 0.1813, |
|
"step": 364800 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"eval_accuracy": 0.9399601803979897, |
|
"eval_f1": 0.8717462916578087, |
|
"eval_loss": 0.1712733656167984, |
|
"eval_precision": 0.8774085651512747, |
|
"eval_recall": 0.8661566314581165, |
|
"eval_runtime": 590.2992, |
|
"eval_samples_per_second": 28.716, |
|
"eval_steps_per_second": 3.59, |
|
"step": 364800 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.1995089419546298e-05, |
|
"loss": 0.1799, |
|
"step": 365200 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 1.1980343637932243e-05, |
|
"loss": 0.1788, |
|
"step": 365600 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.1965593373166926e-05, |
|
"loss": 0.1829, |
|
"step": 366000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.1950838658642365e-05, |
|
"loss": 0.1816, |
|
"step": 366400 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_accuracy": 0.939879153228567, |
|
"eval_f1": 0.8720266197991047, |
|
"eval_loss": 0.17026934027671814, |
|
"eval_precision": 0.8724347768867425, |
|
"eval_recall": 0.8716188444346881, |
|
"eval_runtime": 588.9483, |
|
"eval_samples_per_second": 28.782, |
|
"eval_steps_per_second": 3.598, |
|
"step": 366400 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.1936079527760649e-05, |
|
"loss": 0.1788, |
|
"step": 366800 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.1921316013933866e-05, |
|
"loss": 0.1804, |
|
"step": 367200 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.190654815058403e-05, |
|
"loss": 0.1787, |
|
"step": 367600 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.1891775971142993e-05, |
|
"loss": 0.1817, |
|
"step": 368000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_accuracy": 0.9401548706800749, |
|
"eval_f1": 0.8722032586654064, |
|
"eval_loss": 0.17024698853492737, |
|
"eval_precision": 0.8772418610648981, |
|
"eval_recall": 0.867222206025184, |
|
"eval_runtime": 577.9987, |
|
"eval_samples_per_second": 29.327, |
|
"eval_steps_per_second": 3.666, |
|
"step": 368000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.1876999509052385e-05, |
|
"loss": 0.1803, |
|
"step": 368400 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.1862218797763532e-05, |
|
"loss": 0.1797, |
|
"step": 368800 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.184743387073737e-05, |
|
"loss": 0.1795, |
|
"step": 369200 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.1832644761444393e-05, |
|
"loss": 0.1769, |
|
"step": 369600 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_accuracy": 0.9402340222205758, |
|
"eval_f1": 0.8725280843517379, |
|
"eval_loss": 0.16995348036289215, |
|
"eval_precision": 0.8769948751474103, |
|
"eval_recall": 0.8681065643166288, |
|
"eval_runtime": 579.6787, |
|
"eval_samples_per_second": 29.242, |
|
"eval_steps_per_second": 3.655, |
|
"step": 369600 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.1817851503364543e-05, |
|
"loss": 0.173, |
|
"step": 370000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1.1803054129987178e-05, |
|
"loss": 0.1792, |
|
"step": 370400 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1.1788252674810958e-05, |
|
"loss": 0.1767, |
|
"step": 370800 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 1.177344717134378e-05, |
|
"loss": 0.1796, |
|
"step": 371200 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"eval_accuracy": 0.9403489982734836, |
|
"eval_f1": 0.872858618136751, |
|
"eval_loss": 0.17050202190876007, |
|
"eval_precision": 0.8752808058697337, |
|
"eval_recall": 0.8704497993758614, |
|
"eval_runtime": 584.1129, |
|
"eval_samples_per_second": 29.02, |
|
"eval_steps_per_second": 3.628, |
|
"step": 371200 |
|
} |
|
], |
|
"max_steps": 836196, |
|
"num_train_epochs": 2, |
|
"total_flos": 7.760853878812508e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|