|
{ |
|
"best_metric": 0.8370913190529876, |
|
"best_model_checkpoint": "vit-large-patch32-384-finetuned-galaxy10-decals/checkpoint-843", |
|
"epoch": 29.76, |
|
"eval_steps": 500, |
|
"global_step": 930, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 1.0752688172043012e-05, |
|
"loss": 2.2838, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 2.1505376344086024e-05, |
|
"loss": 1.8298, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 3.2258064516129034e-05, |
|
"loss": 1.3342, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_accuracy": 0.6313416009019166, |
|
"eval_f1": 0.6051741872345295, |
|
"eval_loss": 1.0491372346878052, |
|
"eval_precision": 0.6077378601373529, |
|
"eval_recall": 0.6313416009019166, |
|
"eval_runtime": 21.763, |
|
"eval_samples_per_second": 81.515, |
|
"eval_steps_per_second": 0.643, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.301075268817205e-05, |
|
"loss": 1.0557, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 5.3763440860215054e-05, |
|
"loss": 0.8471, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 6.451612903225807e-05, |
|
"loss": 0.7979, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"eval_accuracy": 0.7671927846674182, |
|
"eval_f1": 0.7652430186918779, |
|
"eval_loss": 0.6901012659072876, |
|
"eval_precision": 0.7716921452381017, |
|
"eval_recall": 0.7671927846674182, |
|
"eval_runtime": 21.1847, |
|
"eval_samples_per_second": 83.74, |
|
"eval_steps_per_second": 0.661, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 7.526881720430108e-05, |
|
"loss": 0.741, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 8.60215053763441e-05, |
|
"loss": 0.6928, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 9.677419354838711e-05, |
|
"loss": 0.7197, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"eval_accuracy": 0.7784667418263811, |
|
"eval_f1": 0.7704940588846539, |
|
"eval_loss": 0.6200318336486816, |
|
"eval_precision": 0.7715960207478135, |
|
"eval_recall": 0.7784667418263811, |
|
"eval_runtime": 21.4187, |
|
"eval_samples_per_second": 82.825, |
|
"eval_steps_per_second": 0.654, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 9.916367980884111e-05, |
|
"loss": 0.691, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 9.79689366786141e-05, |
|
"loss": 0.6486, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 9.677419354838711e-05, |
|
"loss": 0.6321, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8060879368658399, |
|
"eval_f1": 0.7957042518679185, |
|
"eval_loss": 0.5692886710166931, |
|
"eval_precision": 0.8035246330156649, |
|
"eval_recall": 0.8060879368658399, |
|
"eval_runtime": 21.6457, |
|
"eval_samples_per_second": 81.956, |
|
"eval_steps_per_second": 0.647, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 4.16, |
|
"learning_rate": 9.55794504181601e-05, |
|
"loss": 0.5927, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 4.48, |
|
"learning_rate": 9.438470728793309e-05, |
|
"loss": 0.5869, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"learning_rate": 9.31899641577061e-05, |
|
"loss": 0.5768, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"eval_accuracy": 0.8111612175873731, |
|
"eval_f1": 0.8134180707239236, |
|
"eval_loss": 0.5500875115394592, |
|
"eval_precision": 0.8213061319477135, |
|
"eval_recall": 0.8111612175873731, |
|
"eval_runtime": 21.6943, |
|
"eval_samples_per_second": 81.773, |
|
"eval_steps_per_second": 0.645, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 5.12, |
|
"learning_rate": 9.199522102747909e-05, |
|
"loss": 0.5286, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 5.44, |
|
"learning_rate": 9.080047789725208e-05, |
|
"loss": 0.5513, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 5.76, |
|
"learning_rate": 8.960573476702509e-05, |
|
"loss": 0.5173, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"eval_accuracy": 0.8213077790304397, |
|
"eval_f1": 0.8201874264260287, |
|
"eval_loss": 0.5164684057235718, |
|
"eval_precision": 0.8305619852770596, |
|
"eval_recall": 0.8213077790304397, |
|
"eval_runtime": 21.6677, |
|
"eval_samples_per_second": 81.873, |
|
"eval_steps_per_second": 0.646, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 6.08, |
|
"learning_rate": 8.84109916367981e-05, |
|
"loss": 0.5143, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 8.72162485065711e-05, |
|
"loss": 0.498, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 8.60215053763441e-05, |
|
"loss": 0.4781, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 6.98, |
|
"eval_accuracy": 0.8105975197294251, |
|
"eval_f1": 0.8089843496723177, |
|
"eval_loss": 0.5220049023628235, |
|
"eval_precision": 0.8161270722410587, |
|
"eval_recall": 0.8105975197294251, |
|
"eval_runtime": 21.7247, |
|
"eval_samples_per_second": 81.658, |
|
"eval_steps_per_second": 0.644, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 7.04, |
|
"learning_rate": 8.482676224611709e-05, |
|
"loss": 0.4999, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 8.363201911589009e-05, |
|
"loss": 0.4604, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 7.68, |
|
"learning_rate": 8.243727598566309e-05, |
|
"loss": 0.4807, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 8.124253285543608e-05, |
|
"loss": 0.451, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.818489289740699, |
|
"eval_f1": 0.8152695783103332, |
|
"eval_loss": 0.5133233666419983, |
|
"eval_precision": 0.8227391031387269, |
|
"eval_recall": 0.818489289740699, |
|
"eval_runtime": 21.9192, |
|
"eval_samples_per_second": 80.934, |
|
"eval_steps_per_second": 0.639, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"learning_rate": 8.004778972520908e-05, |
|
"loss": 0.4252, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 8.64, |
|
"learning_rate": 7.885304659498209e-05, |
|
"loss": 0.4359, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 8.96, |
|
"learning_rate": 7.765830346475508e-05, |
|
"loss": 0.4373, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 8.99, |
|
"eval_accuracy": 0.8303269447576099, |
|
"eval_f1": 0.8288008582358363, |
|
"eval_loss": 0.511769711971283, |
|
"eval_precision": 0.8325406030306683, |
|
"eval_recall": 0.8303269447576099, |
|
"eval_runtime": 21.4401, |
|
"eval_samples_per_second": 82.742, |
|
"eval_steps_per_second": 0.653, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 9.28, |
|
"learning_rate": 7.646356033452809e-05, |
|
"loss": 0.4023, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 9.6, |
|
"learning_rate": 7.526881720430108e-05, |
|
"loss": 0.396, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 9.92, |
|
"learning_rate": 7.407407407407407e-05, |
|
"loss": 0.3826, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 9.98, |
|
"eval_accuracy": 0.8258173618940248, |
|
"eval_f1": 0.8243103846150182, |
|
"eval_loss": 0.5279704928398132, |
|
"eval_precision": 0.8268898344898333, |
|
"eval_recall": 0.8258173618940248, |
|
"eval_runtime": 21.4879, |
|
"eval_samples_per_second": 82.558, |
|
"eval_steps_per_second": 0.652, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 10.24, |
|
"learning_rate": 7.287933094384708e-05, |
|
"loss": 0.363, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 10.56, |
|
"learning_rate": 7.168458781362007e-05, |
|
"loss": 0.376, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 10.88, |
|
"learning_rate": 7.048984468339306e-05, |
|
"loss": 0.378, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 10.98, |
|
"eval_accuracy": 0.8173618940248027, |
|
"eval_f1": 0.8141777832297877, |
|
"eval_loss": 0.5476882457733154, |
|
"eval_precision": 0.8156045012406116, |
|
"eval_recall": 0.8173618940248027, |
|
"eval_runtime": 21.5324, |
|
"eval_samples_per_second": 82.387, |
|
"eval_steps_per_second": 0.65, |
|
"step": 343 |
|
}, |
|
{ |
|
"epoch": 11.2, |
|
"learning_rate": 6.929510155316607e-05, |
|
"loss": 0.3416, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 11.52, |
|
"learning_rate": 6.810035842293908e-05, |
|
"loss": 0.337, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 11.84, |
|
"learning_rate": 6.690561529271207e-05, |
|
"loss": 0.3509, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.8280721533258174, |
|
"eval_f1": 0.8244469418718917, |
|
"eval_loss": 0.5437383055686951, |
|
"eval_precision": 0.8291545990959505, |
|
"eval_recall": 0.8280721533258174, |
|
"eval_runtime": 21.4443, |
|
"eval_samples_per_second": 82.726, |
|
"eval_steps_per_second": 0.653, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 12.16, |
|
"learning_rate": 6.571087216248507e-05, |
|
"loss": 0.3258, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 12.48, |
|
"learning_rate": 6.451612903225807e-05, |
|
"loss": 0.3173, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 6.332138590203107e-05, |
|
"loss": 0.3358, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 12.99, |
|
"eval_accuracy": 0.8258173618940248, |
|
"eval_f1": 0.8240960752396663, |
|
"eval_loss": 0.5626731514930725, |
|
"eval_precision": 0.8267626478179836, |
|
"eval_recall": 0.8258173618940248, |
|
"eval_runtime": 21.2051, |
|
"eval_samples_per_second": 83.659, |
|
"eval_steps_per_second": 0.66, |
|
"step": 406 |
|
}, |
|
{ |
|
"epoch": 13.12, |
|
"learning_rate": 6.212664277180407e-05, |
|
"loss": 0.3247, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 13.44, |
|
"learning_rate": 6.093189964157706e-05, |
|
"loss": 0.3109, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 13.76, |
|
"learning_rate": 5.9737156511350064e-05, |
|
"loss": 0.3027, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 13.98, |
|
"eval_accuracy": 0.8325817361894025, |
|
"eval_f1": 0.8309989322501777, |
|
"eval_loss": 0.5557559728622437, |
|
"eval_precision": 0.8341182995121916, |
|
"eval_recall": 0.8325817361894025, |
|
"eval_runtime": 21.7522, |
|
"eval_samples_per_second": 81.555, |
|
"eval_steps_per_second": 0.644, |
|
"step": 437 |
|
}, |
|
{ |
|
"epoch": 14.08, |
|
"learning_rate": 5.8542413381123063e-05, |
|
"loss": 0.308, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 14.4, |
|
"learning_rate": 5.7347670250896056e-05, |
|
"loss": 0.2783, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 14.72, |
|
"learning_rate": 5.615292712066906e-05, |
|
"loss": 0.3027, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 14.98, |
|
"eval_accuracy": 0.8325817361894025, |
|
"eval_f1": 0.8294913013624391, |
|
"eval_loss": 0.5702593922615051, |
|
"eval_precision": 0.835774810800051, |
|
"eval_recall": 0.8325817361894025, |
|
"eval_runtime": 21.7146, |
|
"eval_samples_per_second": 81.696, |
|
"eval_steps_per_second": 0.645, |
|
"step": 468 |
|
}, |
|
{ |
|
"epoch": 15.04, |
|
"learning_rate": 5.495818399044206e-05, |
|
"loss": 0.2816, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 15.36, |
|
"learning_rate": 5.3763440860215054e-05, |
|
"loss": 0.2662, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 15.68, |
|
"learning_rate": 5.256869772998806e-05, |
|
"loss": 0.2708, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 5.137395459976105e-05, |
|
"loss": 0.2786, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.8280721533258174, |
|
"eval_f1": 0.8248780707496759, |
|
"eval_loss": 0.5790648460388184, |
|
"eval_precision": 0.8268104951659752, |
|
"eval_recall": 0.8280721533258174, |
|
"eval_runtime": 21.3703, |
|
"eval_samples_per_second": 83.012, |
|
"eval_steps_per_second": 0.655, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 16.32, |
|
"learning_rate": 5.017921146953405e-05, |
|
"loss": 0.2498, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 16.64, |
|
"learning_rate": 4.898446833930705e-05, |
|
"loss": 0.2711, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 16.96, |
|
"learning_rate": 4.778972520908005e-05, |
|
"loss": 0.2379, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 16.99, |
|
"eval_accuracy": 0.8275084554678692, |
|
"eval_f1": 0.825106664588863, |
|
"eval_loss": 0.5864401459693909, |
|
"eval_precision": 0.8264428043886305, |
|
"eval_recall": 0.8275084554678692, |
|
"eval_runtime": 21.5135, |
|
"eval_samples_per_second": 82.46, |
|
"eval_steps_per_second": 0.651, |
|
"step": 531 |
|
}, |
|
{ |
|
"epoch": 17.28, |
|
"learning_rate": 4.659498207885305e-05, |
|
"loss": 0.2566, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 17.6, |
|
"learning_rate": 4.540023894862604e-05, |
|
"loss": 0.2602, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 17.92, |
|
"learning_rate": 4.420549581839905e-05, |
|
"loss": 0.2426, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 17.98, |
|
"eval_accuracy": 0.8320180383314544, |
|
"eval_f1": 0.8305311136335276, |
|
"eval_loss": 0.5984331369400024, |
|
"eval_precision": 0.8319552266233834, |
|
"eval_recall": 0.8320180383314544, |
|
"eval_runtime": 21.366, |
|
"eval_samples_per_second": 83.029, |
|
"eval_steps_per_second": 0.655, |
|
"step": 562 |
|
}, |
|
{ |
|
"epoch": 18.24, |
|
"learning_rate": 4.301075268817205e-05, |
|
"loss": 0.241, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 18.56, |
|
"learning_rate": 4.1816009557945046e-05, |
|
"loss": 0.2346, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 18.88, |
|
"learning_rate": 4.062126642771804e-05, |
|
"loss": 0.2325, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 18.98, |
|
"eval_accuracy": 0.826381059751973, |
|
"eval_f1": 0.8251810645598993, |
|
"eval_loss": 0.6216564774513245, |
|
"eval_precision": 0.8280960122554618, |
|
"eval_recall": 0.826381059751973, |
|
"eval_runtime": 21.6911, |
|
"eval_samples_per_second": 81.785, |
|
"eval_steps_per_second": 0.645, |
|
"step": 593 |
|
}, |
|
{ |
|
"epoch": 19.2, |
|
"learning_rate": 3.9426523297491045e-05, |
|
"loss": 0.2296, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 19.52, |
|
"learning_rate": 3.8231780167264044e-05, |
|
"loss": 0.2296, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 19.84, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.2208, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.8258173618940248, |
|
"eval_f1": 0.8235775028778175, |
|
"eval_loss": 0.6165610551834106, |
|
"eval_precision": 0.8229737232245232, |
|
"eval_recall": 0.8258173618940248, |
|
"eval_runtime": 22.0389, |
|
"eval_samples_per_second": 80.494, |
|
"eval_steps_per_second": 0.635, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 20.16, |
|
"learning_rate": 3.5842293906810036e-05, |
|
"loss": 0.2187, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 20.48, |
|
"learning_rate": 3.4647550776583035e-05, |
|
"loss": 0.2067, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 20.8, |
|
"learning_rate": 3.3452807646356034e-05, |
|
"loss": 0.2196, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 20.99, |
|
"eval_accuracy": 0.8286358511837655, |
|
"eval_f1": 0.825876975856749, |
|
"eval_loss": 0.6308055520057678, |
|
"eval_precision": 0.8279920279498516, |
|
"eval_recall": 0.8286358511837655, |
|
"eval_runtime": 21.4064, |
|
"eval_samples_per_second": 82.872, |
|
"eval_steps_per_second": 0.654, |
|
"step": 656 |
|
}, |
|
{ |
|
"epoch": 21.12, |
|
"learning_rate": 3.2258064516129034e-05, |
|
"loss": 0.2034, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 21.44, |
|
"learning_rate": 3.106332138590203e-05, |
|
"loss": 0.2098, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 21.76, |
|
"learning_rate": 2.9868578255675032e-05, |
|
"loss": 0.2077, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 21.98, |
|
"eval_accuracy": 0.8325817361894025, |
|
"eval_f1": 0.8305408181674179, |
|
"eval_loss": 0.624207079410553, |
|
"eval_precision": 0.8307265402722898, |
|
"eval_recall": 0.8325817361894025, |
|
"eval_runtime": 21.4301, |
|
"eval_samples_per_second": 82.781, |
|
"eval_steps_per_second": 0.653, |
|
"step": 687 |
|
}, |
|
{ |
|
"epoch": 22.08, |
|
"learning_rate": 2.8673835125448028e-05, |
|
"loss": 0.2177, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 22.4, |
|
"learning_rate": 2.747909199522103e-05, |
|
"loss": 0.2023, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 22.72, |
|
"learning_rate": 2.628434886499403e-05, |
|
"loss": 0.2048, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 22.98, |
|
"eval_accuracy": 0.8275084554678692, |
|
"eval_f1": 0.8263134782090512, |
|
"eval_loss": 0.6801280379295349, |
|
"eval_precision": 0.8303435791241508, |
|
"eval_recall": 0.8275084554678692, |
|
"eval_runtime": 21.9211, |
|
"eval_samples_per_second": 80.927, |
|
"eval_steps_per_second": 0.639, |
|
"step": 718 |
|
}, |
|
{ |
|
"epoch": 23.04, |
|
"learning_rate": 2.5089605734767026e-05, |
|
"loss": 0.2072, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 23.36, |
|
"learning_rate": 2.3894862604540025e-05, |
|
"loss": 0.2009, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 23.68, |
|
"learning_rate": 2.270011947431302e-05, |
|
"loss": 0.2083, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 2.1505376344086024e-05, |
|
"loss": 0.1886, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.826381059751973, |
|
"eval_f1": 0.8255542142311617, |
|
"eval_loss": 0.6615379452705383, |
|
"eval_precision": 0.8279561568090393, |
|
"eval_recall": 0.826381059751973, |
|
"eval_runtime": 21.4625, |
|
"eval_samples_per_second": 82.656, |
|
"eval_steps_per_second": 0.652, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 24.32, |
|
"learning_rate": 2.031063321385902e-05, |
|
"loss": 0.2118, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 24.64, |
|
"learning_rate": 1.9115890083632022e-05, |
|
"loss": 0.1872, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 24.96, |
|
"learning_rate": 1.7921146953405018e-05, |
|
"loss": 0.2007, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 24.99, |
|
"eval_accuracy": 0.8275084554678692, |
|
"eval_f1": 0.8267436914020984, |
|
"eval_loss": 0.6847040057182312, |
|
"eval_precision": 0.8279552479088658, |
|
"eval_recall": 0.8275084554678692, |
|
"eval_runtime": 21.4978, |
|
"eval_samples_per_second": 82.52, |
|
"eval_steps_per_second": 0.651, |
|
"step": 781 |
|
}, |
|
{ |
|
"epoch": 25.28, |
|
"learning_rate": 1.6726403823178017e-05, |
|
"loss": 0.1844, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 25.6, |
|
"learning_rate": 1.5531660692951016e-05, |
|
"loss": 0.1852, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 25.92, |
|
"learning_rate": 1.4336917562724014e-05, |
|
"loss": 0.1815, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 25.98, |
|
"eval_accuracy": 0.8325817361894025, |
|
"eval_f1": 0.830537122348923, |
|
"eval_loss": 0.6668907999992371, |
|
"eval_precision": 0.8311252959744954, |
|
"eval_recall": 0.8325817361894025, |
|
"eval_runtime": 21.8156, |
|
"eval_samples_per_second": 81.318, |
|
"eval_steps_per_second": 0.642, |
|
"step": 812 |
|
}, |
|
{ |
|
"epoch": 26.24, |
|
"learning_rate": 1.3142174432497015e-05, |
|
"loss": 0.197, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 26.56, |
|
"learning_rate": 1.1947431302270013e-05, |
|
"loss": 0.1913, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 26.88, |
|
"learning_rate": 1.0752688172043012e-05, |
|
"loss": 0.1958, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 26.98, |
|
"eval_accuracy": 0.8370913190529876, |
|
"eval_f1": 0.8357334072148467, |
|
"eval_loss": 0.6766165494918823, |
|
"eval_precision": 0.8373589976039267, |
|
"eval_recall": 0.8370913190529876, |
|
"eval_runtime": 21.0261, |
|
"eval_samples_per_second": 84.371, |
|
"eval_steps_per_second": 0.666, |
|
"step": 843 |
|
}, |
|
{ |
|
"epoch": 27.2, |
|
"learning_rate": 9.557945041816011e-06, |
|
"loss": 0.1884, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 27.52, |
|
"learning_rate": 8.363201911589009e-06, |
|
"loss": 0.1969, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 27.84, |
|
"learning_rate": 7.168458781362007e-06, |
|
"loss": 0.1806, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.8359639233370914, |
|
"eval_f1": 0.8341906674178488, |
|
"eval_loss": 0.6678570508956909, |
|
"eval_precision": 0.8353079467237093, |
|
"eval_recall": 0.8359639233370914, |
|
"eval_runtime": 21.3886, |
|
"eval_samples_per_second": 82.941, |
|
"eval_steps_per_second": 0.655, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 28.16, |
|
"learning_rate": 5.973715651135006e-06, |
|
"loss": 0.1958, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 28.48, |
|
"learning_rate": 4.7789725209080055e-06, |
|
"loss": 0.1751, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 28.8, |
|
"learning_rate": 3.5842293906810035e-06, |
|
"loss": 0.1835, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 28.99, |
|
"eval_accuracy": 0.8348365276211951, |
|
"eval_f1": 0.8327921869627218, |
|
"eval_loss": 0.6767047047615051, |
|
"eval_precision": 0.8333572524880747, |
|
"eval_recall": 0.8348365276211951, |
|
"eval_runtime": 21.4548, |
|
"eval_samples_per_second": 82.685, |
|
"eval_steps_per_second": 0.653, |
|
"step": 906 |
|
}, |
|
{ |
|
"epoch": 29.12, |
|
"learning_rate": 2.3894862604540028e-06, |
|
"loss": 0.1893, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 29.44, |
|
"learning_rate": 1.1947431302270014e-06, |
|
"loss": 0.1696, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 29.76, |
|
"learning_rate": 0.0, |
|
"loss": 0.1796, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 29.76, |
|
"eval_accuracy": 0.8342728297632469, |
|
"eval_f1": 0.8325855632553308, |
|
"eval_loss": 0.6786999702453613, |
|
"eval_precision": 0.8335690065497708, |
|
"eval_recall": 0.8342728297632469, |
|
"eval_runtime": 21.4373, |
|
"eval_samples_per_second": 82.753, |
|
"eval_steps_per_second": 0.653, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 29.76, |
|
"step": 930, |
|
"total_flos": 3.853544865721595e+20, |
|
"train_loss": 0.3928444991829575, |
|
"train_runtime": 10565.0301, |
|
"train_samples_per_second": 45.325, |
|
"train_steps_per_second": 0.088 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 930, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 30, |
|
"save_steps": 500, |
|
"total_flos": 3.853544865721595e+20, |
|
"train_batch_size": 128, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|