|
{ |
|
"best_metric": 0.8328016996383667, |
|
"best_model_checkpoint": "./vit-base-GTZAN\\checkpoint-550", |
|
"epoch": 16.0, |
|
"eval_steps": 10, |
|
"global_step": 1712, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.0001988317757009346, |
|
"loss": 2.3756, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"eval_accuracy": 0.21164021164021163, |
|
"eval_loss": 2.286055326461792, |
|
"eval_runtime": 3.2931, |
|
"eval_samples_per_second": 57.393, |
|
"eval_steps_per_second": 7.288, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00019766355140186917, |
|
"loss": 2.3051, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"eval_accuracy": 0.3439153439153439, |
|
"eval_loss": 2.1907191276550293, |
|
"eval_runtime": 3.5708, |
|
"eval_samples_per_second": 52.929, |
|
"eval_steps_per_second": 6.721, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 0.00019649532710280373, |
|
"loss": 2.1219, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"eval_accuracy": 0.31746031746031744, |
|
"eval_loss": 2.021427631378174, |
|
"eval_runtime": 3.5214, |
|
"eval_samples_per_second": 53.671, |
|
"eval_steps_per_second": 6.815, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 0.00019532710280373834, |
|
"loss": 2.0542, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"eval_accuracy": 0.4074074074074074, |
|
"eval_loss": 1.905888319015503, |
|
"eval_runtime": 3.4441, |
|
"eval_samples_per_second": 54.877, |
|
"eval_steps_per_second": 6.969, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 0.00019415887850467292, |
|
"loss": 1.8132, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"eval_accuracy": 0.3862433862433862, |
|
"eval_loss": 1.8471989631652832, |
|
"eval_runtime": 3.2687, |
|
"eval_samples_per_second": 57.821, |
|
"eval_steps_per_second": 7.342, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 0.0001929906542056075, |
|
"loss": 1.8854, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"eval_accuracy": 0.4603174603174603, |
|
"eval_loss": 1.6832294464111328, |
|
"eval_runtime": 3.1893, |
|
"eval_samples_per_second": 59.261, |
|
"eval_steps_per_second": 7.525, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 0.00019182242990654205, |
|
"loss": 1.6981, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"eval_accuracy": 0.4973544973544973, |
|
"eval_loss": 1.600830078125, |
|
"eval_runtime": 3.1721, |
|
"eval_samples_per_second": 59.583, |
|
"eval_steps_per_second": 7.566, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00019065420560747664, |
|
"loss": 1.5251, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"eval_accuracy": 0.5026455026455027, |
|
"eval_loss": 1.468518614768982, |
|
"eval_runtime": 3.3583, |
|
"eval_samples_per_second": 56.278, |
|
"eval_steps_per_second": 7.146, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00018948598130841122, |
|
"loss": 1.4463, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"eval_accuracy": 0.6137566137566137, |
|
"eval_loss": 1.3712642192840576, |
|
"eval_runtime": 3.2333, |
|
"eval_samples_per_second": 58.454, |
|
"eval_steps_per_second": 7.423, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.0001883177570093458, |
|
"loss": 1.4335, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"eval_accuracy": 0.4973544973544973, |
|
"eval_loss": 1.4269553422927856, |
|
"eval_runtime": 3.7842, |
|
"eval_samples_per_second": 49.944, |
|
"eval_steps_per_second": 6.342, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00018726635514018693, |
|
"loss": 1.1147, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"eval_accuracy": 0.5925925925925926, |
|
"eval_loss": 1.2793240547180176, |
|
"eval_runtime": 3.3494, |
|
"eval_samples_per_second": 56.427, |
|
"eval_steps_per_second": 7.165, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.0001860981308411215, |
|
"loss": 1.3568, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"eval_accuracy": 0.5661375661375662, |
|
"eval_loss": 1.3360047340393066, |
|
"eval_runtime": 3.3593, |
|
"eval_samples_per_second": 56.261, |
|
"eval_steps_per_second": 7.144, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0001849299065420561, |
|
"loss": 1.3077, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"eval_accuracy": 0.5079365079365079, |
|
"eval_loss": 1.452016830444336, |
|
"eval_runtime": 3.1904, |
|
"eval_samples_per_second": 59.241, |
|
"eval_steps_per_second": 7.523, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00018376168224299065, |
|
"loss": 1.2801, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"eval_accuracy": 0.5661375661375662, |
|
"eval_loss": 1.2764976024627686, |
|
"eval_runtime": 3.1251, |
|
"eval_samples_per_second": 60.477, |
|
"eval_steps_per_second": 7.68, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00018259345794392523, |
|
"loss": 1.2894, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"eval_accuracy": 0.6137566137566137, |
|
"eval_loss": 1.194944977760315, |
|
"eval_runtime": 3.3657, |
|
"eval_samples_per_second": 56.154, |
|
"eval_steps_per_second": 7.131, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 0.0001814252336448598, |
|
"loss": 1.2657, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"eval_accuracy": 0.6349206349206349, |
|
"eval_loss": 1.1937017440795898, |
|
"eval_runtime": 3.4004, |
|
"eval_samples_per_second": 55.582, |
|
"eval_steps_per_second": 7.058, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 0.0001802570093457944, |
|
"loss": 0.8784, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"eval_accuracy": 0.6031746031746031, |
|
"eval_loss": 1.2189592123031616, |
|
"eval_runtime": 3.069, |
|
"eval_samples_per_second": 61.583, |
|
"eval_steps_per_second": 7.82, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 0.000179088785046729, |
|
"loss": 1.1575, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"eval_accuracy": 0.6137566137566137, |
|
"eval_loss": 1.2268363237380981, |
|
"eval_runtime": 3.1612, |
|
"eval_samples_per_second": 59.788, |
|
"eval_steps_per_second": 7.592, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 0.00017792056074766356, |
|
"loss": 0.9848, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"eval_accuracy": 0.656084656084656, |
|
"eval_loss": 1.0571767091751099, |
|
"eval_runtime": 3.0146, |
|
"eval_samples_per_second": 62.695, |
|
"eval_steps_per_second": 7.961, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 0.00017675233644859814, |
|
"loss": 0.9409, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_accuracy": 0.6349206349206349, |
|
"eval_loss": 1.160926103591919, |
|
"eval_runtime": 3.1953, |
|
"eval_samples_per_second": 59.149, |
|
"eval_steps_per_second": 7.511, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 0.00017558411214953272, |
|
"loss": 0.9448, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"eval_accuracy": 0.6084656084656085, |
|
"eval_loss": 1.2327442169189453, |
|
"eval_runtime": 3.0705, |
|
"eval_samples_per_second": 61.554, |
|
"eval_steps_per_second": 7.816, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"learning_rate": 0.0001744158878504673, |
|
"loss": 1.0819, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.06, |
|
"eval_accuracy": 0.582010582010582, |
|
"eval_loss": 1.169877290725708, |
|
"eval_runtime": 3.2052, |
|
"eval_samples_per_second": 58.967, |
|
"eval_steps_per_second": 7.488, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 0.00017324766355140188, |
|
"loss": 0.7485, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"eval_accuracy": 0.6507936507936508, |
|
"eval_loss": 1.1040607690811157, |
|
"eval_runtime": 3.1129, |
|
"eval_samples_per_second": 60.714, |
|
"eval_steps_per_second": 7.71, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 0.00017207943925233644, |
|
"loss": 0.8934, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"eval_accuracy": 0.5873015873015873, |
|
"eval_loss": 1.1672364473342896, |
|
"eval_runtime": 3.2331, |
|
"eval_samples_per_second": 58.458, |
|
"eval_steps_per_second": 7.423, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 0.00017091121495327102, |
|
"loss": 0.8609, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"eval_accuracy": 0.6190476190476191, |
|
"eval_loss": 1.1899827718734741, |
|
"eval_runtime": 3.138, |
|
"eval_samples_per_second": 60.23, |
|
"eval_steps_per_second": 7.648, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 0.00016974299065420563, |
|
"loss": 0.7935, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"eval_accuracy": 0.6402116402116402, |
|
"eval_loss": 1.0623040199279785, |
|
"eval_runtime": 3.1191, |
|
"eval_samples_per_second": 60.594, |
|
"eval_steps_per_second": 7.694, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 0.0001685747663551402, |
|
"loss": 0.8013, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"eval_accuracy": 0.6878306878306878, |
|
"eval_loss": 0.9872872233390808, |
|
"eval_runtime": 3.0196, |
|
"eval_samples_per_second": 62.59, |
|
"eval_steps_per_second": 7.948, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 0.00016740654205607477, |
|
"loss": 0.6669, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"eval_accuracy": 0.656084656084656, |
|
"eval_loss": 1.0078377723693848, |
|
"eval_runtime": 3.0773, |
|
"eval_samples_per_second": 61.418, |
|
"eval_steps_per_second": 7.799, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 0.00016623831775700935, |
|
"loss": 0.7847, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"eval_accuracy": 0.6084656084656085, |
|
"eval_loss": 1.148421049118042, |
|
"eval_runtime": 3.1668, |
|
"eval_samples_per_second": 59.682, |
|
"eval_steps_per_second": 7.579, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.00016507009345794393, |
|
"loss": 0.7222, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"eval_accuracy": 0.6243386243386243, |
|
"eval_loss": 1.1295346021652222, |
|
"eval_runtime": 3.1049, |
|
"eval_samples_per_second": 60.871, |
|
"eval_steps_per_second": 7.73, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.0001639018691588785, |
|
"loss": 0.7844, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"eval_accuracy": 0.7248677248677249, |
|
"eval_loss": 0.9414218068122864, |
|
"eval_runtime": 3.1524, |
|
"eval_samples_per_second": 59.954, |
|
"eval_steps_per_second": 7.613, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.0001627336448598131, |
|
"loss": 0.8057, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"eval_accuracy": 0.6666666666666666, |
|
"eval_loss": 1.050380825996399, |
|
"eval_runtime": 3.1613, |
|
"eval_samples_per_second": 59.785, |
|
"eval_steps_per_second": 7.592, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 0.00016156542056074767, |
|
"loss": 0.4843, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"eval_accuracy": 0.6507936507936508, |
|
"eval_loss": 0.9873789548873901, |
|
"eval_runtime": 3.1677, |
|
"eval_samples_per_second": 59.664, |
|
"eval_steps_per_second": 7.576, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 0.00016039719626168226, |
|
"loss": 0.6766, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"eval_accuracy": 0.6507936507936508, |
|
"eval_loss": 1.1496278047561646, |
|
"eval_runtime": 3.0545, |
|
"eval_samples_per_second": 61.877, |
|
"eval_steps_per_second": 7.857, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 0.00015922897196261684, |
|
"loss": 0.4818, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"eval_accuracy": 0.6878306878306878, |
|
"eval_loss": 1.0967848300933838, |
|
"eval_runtime": 3.1038, |
|
"eval_samples_per_second": 60.892, |
|
"eval_steps_per_second": 7.732, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 0.00015806074766355142, |
|
"loss": 0.5351, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"eval_accuracy": 0.6296296296296297, |
|
"eval_loss": 1.1393951177597046, |
|
"eval_runtime": 3.0544, |
|
"eval_samples_per_second": 61.877, |
|
"eval_steps_per_second": 7.857, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 0.000156892523364486, |
|
"loss": 0.5035, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"eval_accuracy": 0.708994708994709, |
|
"eval_loss": 0.9814600944519043, |
|
"eval_runtime": 3.6264, |
|
"eval_samples_per_second": 52.118, |
|
"eval_steps_per_second": 6.618, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 0.00015572429906542056, |
|
"loss": 0.4032, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"eval_accuracy": 0.6402116402116402, |
|
"eval_loss": 1.0881891250610352, |
|
"eval_runtime": 3.1861, |
|
"eval_samples_per_second": 59.32, |
|
"eval_steps_per_second": 7.533, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 0.00015455607476635514, |
|
"loss": 0.639, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"eval_accuracy": 0.6084656084656085, |
|
"eval_loss": 1.2611372470855713, |
|
"eval_runtime": 3.2278, |
|
"eval_samples_per_second": 58.554, |
|
"eval_steps_per_second": 7.435, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"learning_rate": 0.00015338785046728972, |
|
"loss": 0.5156, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.74, |
|
"eval_accuracy": 0.656084656084656, |
|
"eval_loss": 1.037644624710083, |
|
"eval_runtime": 3.6337, |
|
"eval_samples_per_second": 52.013, |
|
"eval_steps_per_second": 6.605, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"learning_rate": 0.00015221962616822433, |
|
"loss": 0.4884, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.83, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 0.950617790222168, |
|
"eval_runtime": 3.4889, |
|
"eval_samples_per_second": 54.172, |
|
"eval_steps_per_second": 6.879, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"learning_rate": 0.00015105140186915888, |
|
"loss": 0.5875, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.93, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 0.8479233980178833, |
|
"eval_runtime": 3.0768, |
|
"eval_samples_per_second": 61.428, |
|
"eval_steps_per_second": 7.8, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"learning_rate": 0.00014988317757009346, |
|
"loss": 0.6982, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 4.02, |
|
"eval_accuracy": 0.6825396825396826, |
|
"eval_loss": 1.0894999504089355, |
|
"eval_runtime": 3.1014, |
|
"eval_samples_per_second": 60.941, |
|
"eval_steps_per_second": 7.738, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"learning_rate": 0.00014871495327102805, |
|
"loss": 0.3966, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 4.11, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 0.9708942770957947, |
|
"eval_runtime": 3.0279, |
|
"eval_samples_per_second": 62.42, |
|
"eval_steps_per_second": 7.926, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 0.00014754672897196263, |
|
"loss": 0.377, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"eval_accuracy": 0.6772486772486772, |
|
"eval_loss": 0.9754199385643005, |
|
"eval_runtime": 3.1346, |
|
"eval_samples_per_second": 60.295, |
|
"eval_steps_per_second": 7.657, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 0.0001463785046728972, |
|
"loss": 0.3417, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"eval_accuracy": 0.6507936507936508, |
|
"eval_loss": 1.1686595678329468, |
|
"eval_runtime": 3.0945, |
|
"eval_samples_per_second": 61.076, |
|
"eval_steps_per_second": 7.756, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"learning_rate": 0.00014521028037383176, |
|
"loss": 0.336, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 4.39, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 0.9825548529624939, |
|
"eval_runtime": 3.1296, |
|
"eval_samples_per_second": 60.39, |
|
"eval_steps_per_second": 7.669, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"learning_rate": 0.00014404205607476635, |
|
"loss": 0.5201, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 4.49, |
|
"eval_accuracy": 0.6613756613756614, |
|
"eval_loss": 1.177049160003662, |
|
"eval_runtime": 3.0811, |
|
"eval_samples_per_second": 61.341, |
|
"eval_steps_per_second": 7.789, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"learning_rate": 0.00014287383177570095, |
|
"loss": 0.1737, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 4.58, |
|
"eval_accuracy": 0.6878306878306878, |
|
"eval_loss": 1.0491447448730469, |
|
"eval_runtime": 3.1745, |
|
"eval_samples_per_second": 59.537, |
|
"eval_steps_per_second": 7.56, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 0.00014170560747663554, |
|
"loss": 0.2545, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 1.1352397203445435, |
|
"eval_runtime": 3.0543, |
|
"eval_samples_per_second": 61.879, |
|
"eval_steps_per_second": 7.858, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"learning_rate": 0.0001405373831775701, |
|
"loss": 0.3752, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 4.77, |
|
"eval_accuracy": 0.6931216931216931, |
|
"eval_loss": 1.0300217866897583, |
|
"eval_runtime": 3.2562, |
|
"eval_samples_per_second": 58.043, |
|
"eval_steps_per_second": 7.371, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"learning_rate": 0.00013936915887850467, |
|
"loss": 0.3667, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 4.86, |
|
"eval_accuracy": 0.6825396825396826, |
|
"eval_loss": 1.0355250835418701, |
|
"eval_runtime": 3.0388, |
|
"eval_samples_per_second": 62.196, |
|
"eval_steps_per_second": 7.898, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 0.00013820093457943925, |
|
"loss": 0.2797, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 0.9882386326789856, |
|
"eval_runtime": 3.3521, |
|
"eval_samples_per_second": 56.383, |
|
"eval_steps_per_second": 7.16, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"learning_rate": 0.00013703271028037384, |
|
"loss": 0.1646, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 5.05, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 1.0728471279144287, |
|
"eval_runtime": 3.1345, |
|
"eval_samples_per_second": 60.297, |
|
"eval_steps_per_second": 7.657, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"learning_rate": 0.00013586448598130842, |
|
"loss": 0.2199, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 5.14, |
|
"eval_accuracy": 0.7566137566137566, |
|
"eval_loss": 0.8328016996383667, |
|
"eval_runtime": 3.3035, |
|
"eval_samples_per_second": 57.213, |
|
"eval_steps_per_second": 7.265, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"learning_rate": 0.000134696261682243, |
|
"loss": 0.2191, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 0.9280108213424683, |
|
"eval_runtime": 3.0545, |
|
"eval_samples_per_second": 61.876, |
|
"eval_steps_per_second": 7.857, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 0.00013352803738317758, |
|
"loss": 0.12, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"eval_accuracy": 0.7037037037037037, |
|
"eval_loss": 1.0978211164474487, |
|
"eval_runtime": 3.1612, |
|
"eval_samples_per_second": 59.788, |
|
"eval_steps_per_second": 7.592, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"learning_rate": 0.00013235981308411216, |
|
"loss": 0.2608, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 5.42, |
|
"eval_accuracy": 0.6878306878306878, |
|
"eval_loss": 1.1158148050308228, |
|
"eval_runtime": 3.1212, |
|
"eval_samples_per_second": 60.553, |
|
"eval_steps_per_second": 7.689, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 0.00013119158878504674, |
|
"loss": 0.2, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"eval_accuracy": 0.7354497354497355, |
|
"eval_loss": 1.087318778038025, |
|
"eval_runtime": 3.0411, |
|
"eval_samples_per_second": 62.148, |
|
"eval_steps_per_second": 7.892, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"learning_rate": 0.00013002336448598133, |
|
"loss": 0.1899, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.61, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 1.0559666156768799, |
|
"eval_runtime": 3.1128, |
|
"eval_samples_per_second": 60.718, |
|
"eval_steps_per_second": 7.71, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"learning_rate": 0.00012885514018691588, |
|
"loss": 0.1113, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 5.7, |
|
"eval_accuracy": 0.7037037037037037, |
|
"eval_loss": 1.1143887042999268, |
|
"eval_runtime": 3.038, |
|
"eval_samples_per_second": 62.211, |
|
"eval_steps_per_second": 7.9, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"learning_rate": 0.00012768691588785046, |
|
"loss": 0.2279, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 5.79, |
|
"eval_accuracy": 0.6666666666666666, |
|
"eval_loss": 1.2534881830215454, |
|
"eval_runtime": 3.4146, |
|
"eval_samples_per_second": 55.351, |
|
"eval_steps_per_second": 7.029, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"learning_rate": 0.00012651869158878504, |
|
"loss": 0.1563, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 5.89, |
|
"eval_accuracy": 0.7354497354497355, |
|
"eval_loss": 1.0802773237228394, |
|
"eval_runtime": 3.0678, |
|
"eval_samples_per_second": 61.608, |
|
"eval_steps_per_second": 7.823, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"learning_rate": 0.00012535046728971965, |
|
"loss": 0.2182, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 5.98, |
|
"eval_accuracy": 0.6349206349206349, |
|
"eval_loss": 1.3904473781585693, |
|
"eval_runtime": 3.1818, |
|
"eval_samples_per_second": 59.399, |
|
"eval_steps_per_second": 7.543, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"learning_rate": 0.0001241822429906542, |
|
"loss": 0.1781, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 6.07, |
|
"eval_accuracy": 0.671957671957672, |
|
"eval_loss": 1.3461418151855469, |
|
"eval_runtime": 3.0144, |
|
"eval_samples_per_second": 62.699, |
|
"eval_steps_per_second": 7.962, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 0.0001230140186915888, |
|
"loss": 0.1395, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"eval_accuracy": 0.6825396825396826, |
|
"eval_loss": 1.276949405670166, |
|
"eval_runtime": 3.1879, |
|
"eval_samples_per_second": 59.287, |
|
"eval_steps_per_second": 7.529, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"learning_rate": 0.00012184579439252337, |
|
"loss": 0.2308, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 6.26, |
|
"eval_accuracy": 0.6931216931216931, |
|
"eval_loss": 1.2213280200958252, |
|
"eval_runtime": 3.1475, |
|
"eval_samples_per_second": 60.047, |
|
"eval_steps_per_second": 7.625, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"learning_rate": 0.00012067757009345795, |
|
"loss": 0.1899, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 1.0947775840759277, |
|
"eval_runtime": 3.3614, |
|
"eval_samples_per_second": 56.226, |
|
"eval_steps_per_second": 7.14, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 0.00011950934579439252, |
|
"loss": 0.1702, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"eval_accuracy": 0.6931216931216931, |
|
"eval_loss": 1.2383404970169067, |
|
"eval_runtime": 3.5678, |
|
"eval_samples_per_second": 52.974, |
|
"eval_steps_per_second": 6.727, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 6.54, |
|
"learning_rate": 0.0001183411214953271, |
|
"loss": 0.1055, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.54, |
|
"eval_accuracy": 0.6349206349206349, |
|
"eval_loss": 1.4009695053100586, |
|
"eval_runtime": 3.1702, |
|
"eval_samples_per_second": 59.618, |
|
"eval_steps_per_second": 7.571, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 0.00011717289719626168, |
|
"loss": 0.1151, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"eval_accuracy": 0.671957671957672, |
|
"eval_loss": 1.2606865167617798, |
|
"eval_runtime": 3.2176, |
|
"eval_samples_per_second": 58.74, |
|
"eval_steps_per_second": 7.459, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 0.00011600467289719628, |
|
"loss": 0.2415, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"eval_accuracy": 0.7301587301587301, |
|
"eval_loss": 1.0520366430282593, |
|
"eval_runtime": 3.1343, |
|
"eval_samples_per_second": 60.3, |
|
"eval_steps_per_second": 7.657, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 0.00011483644859813085, |
|
"loss": 0.117, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"eval_accuracy": 0.7354497354497355, |
|
"eval_loss": 1.0547845363616943, |
|
"eval_runtime": 3.1212, |
|
"eval_samples_per_second": 60.554, |
|
"eval_steps_per_second": 7.689, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 6.92, |
|
"learning_rate": 0.00011366822429906543, |
|
"loss": 0.184, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 6.92, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 1.1872239112854004, |
|
"eval_runtime": 3.2412, |
|
"eval_samples_per_second": 58.311, |
|
"eval_steps_per_second": 7.405, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"learning_rate": 0.00011250000000000001, |
|
"loss": 0.1997, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 7.01, |
|
"eval_accuracy": 0.7248677248677249, |
|
"eval_loss": 1.1127938032150269, |
|
"eval_runtime": 3.2145, |
|
"eval_samples_per_second": 58.796, |
|
"eval_steps_per_second": 7.466, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 0.00011133177570093458, |
|
"loss": 0.0645, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 1.1514036655426025, |
|
"eval_runtime": 4.5617, |
|
"eval_samples_per_second": 41.432, |
|
"eval_steps_per_second": 5.261, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"learning_rate": 0.00011016355140186916, |
|
"loss": 0.1025, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 7.2, |
|
"eval_accuracy": 0.7037037037037037, |
|
"eval_loss": 1.2251726388931274, |
|
"eval_runtime": 3.3597, |
|
"eval_samples_per_second": 56.255, |
|
"eval_steps_per_second": 7.143, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"learning_rate": 0.00010899532710280373, |
|
"loss": 0.0407, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 7.29, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.0571043491363525, |
|
"eval_runtime": 3.3442, |
|
"eval_samples_per_second": 56.516, |
|
"eval_steps_per_second": 7.177, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 0.00010782710280373834, |
|
"loss": 0.1752, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"eval_accuracy": 0.7354497354497355, |
|
"eval_loss": 1.081169843673706, |
|
"eval_runtime": 3.2833, |
|
"eval_samples_per_second": 57.564, |
|
"eval_steps_per_second": 7.31, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"learning_rate": 0.0001066588785046729, |
|
"loss": 0.1143, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.48, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 1.2182053327560425, |
|
"eval_runtime": 3.3002, |
|
"eval_samples_per_second": 57.269, |
|
"eval_steps_per_second": 7.272, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 7.57, |
|
"learning_rate": 0.00010549065420560749, |
|
"loss": 0.1542, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 7.57, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 1.1789014339447021, |
|
"eval_runtime": 3.2576, |
|
"eval_samples_per_second": 58.018, |
|
"eval_steps_per_second": 7.367, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 0.00010432242990654207, |
|
"loss": 0.0859, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"eval_accuracy": 0.7195767195767195, |
|
"eval_loss": 1.1392204761505127, |
|
"eval_runtime": 3.3449, |
|
"eval_samples_per_second": 56.505, |
|
"eval_steps_per_second": 7.175, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 7.76, |
|
"learning_rate": 0.00010315420560747664, |
|
"loss": 0.119, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 7.76, |
|
"eval_accuracy": 0.7354497354497355, |
|
"eval_loss": 1.1567529439926147, |
|
"eval_runtime": 3.3747, |
|
"eval_samples_per_second": 56.004, |
|
"eval_steps_per_second": 7.112, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"learning_rate": 0.00010198598130841122, |
|
"loss": 0.0913, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 7.85, |
|
"eval_accuracy": 0.6984126984126984, |
|
"eval_loss": 1.109655499458313, |
|
"eval_runtime": 3.3749, |
|
"eval_samples_per_second": 56.001, |
|
"eval_steps_per_second": 7.111, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"learning_rate": 0.00010081775700934579, |
|
"loss": 0.085, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.118941068649292, |
|
"eval_runtime": 3.2544, |
|
"eval_samples_per_second": 58.076, |
|
"eval_steps_per_second": 7.375, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 9.964953271028038e-05, |
|
"loss": 0.0201, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 1.1282655000686646, |
|
"eval_runtime": 3.2809, |
|
"eval_samples_per_second": 57.606, |
|
"eval_steps_per_second": 7.315, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 8.13, |
|
"learning_rate": 9.848130841121495e-05, |
|
"loss": 0.0509, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 8.13, |
|
"eval_accuracy": 0.7407407407407407, |
|
"eval_loss": 1.1005280017852783, |
|
"eval_runtime": 3.2115, |
|
"eval_samples_per_second": 58.851, |
|
"eval_steps_per_second": 7.473, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 8.22, |
|
"learning_rate": 9.731308411214953e-05, |
|
"loss": 0.0326, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 8.22, |
|
"eval_accuracy": 0.7301587301587301, |
|
"eval_loss": 1.0489766597747803, |
|
"eval_runtime": 3.6148, |
|
"eval_samples_per_second": 52.285, |
|
"eval_steps_per_second": 6.639, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"learning_rate": 9.614485981308413e-05, |
|
"loss": 0.0728, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 8.32, |
|
"eval_accuracy": 0.7195767195767195, |
|
"eval_loss": 1.2510557174682617, |
|
"eval_runtime": 3.2769, |
|
"eval_samples_per_second": 57.676, |
|
"eval_steps_per_second": 7.324, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"learning_rate": 9.49766355140187e-05, |
|
"loss": 0.0486, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.41, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 1.1832597255706787, |
|
"eval_runtime": 3.441, |
|
"eval_samples_per_second": 54.925, |
|
"eval_steps_per_second": 6.975, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"learning_rate": 9.380841121495328e-05, |
|
"loss": 0.0645, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 8.5, |
|
"eval_accuracy": 0.7724867724867724, |
|
"eval_loss": 0.9881472587585449, |
|
"eval_runtime": 3.3474, |
|
"eval_samples_per_second": 56.463, |
|
"eval_steps_per_second": 7.17, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"learning_rate": 9.264018691588785e-05, |
|
"loss": 0.0194, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 8.6, |
|
"eval_accuracy": 0.7566137566137566, |
|
"eval_loss": 1.0412092208862305, |
|
"eval_runtime": 3.305, |
|
"eval_samples_per_second": 57.187, |
|
"eval_steps_per_second": 7.262, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 8.69, |
|
"learning_rate": 9.147196261682244e-05, |
|
"loss": 0.0215, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 8.69, |
|
"eval_accuracy": 0.7195767195767195, |
|
"eval_loss": 1.2484722137451172, |
|
"eval_runtime": 3.5183, |
|
"eval_samples_per_second": 53.719, |
|
"eval_steps_per_second": 6.821, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"learning_rate": 9.030373831775701e-05, |
|
"loss": 0.0853, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 8.79, |
|
"eval_accuracy": 0.7671957671957672, |
|
"eval_loss": 1.0863738059997559, |
|
"eval_runtime": 3.3344, |
|
"eval_samples_per_second": 56.681, |
|
"eval_steps_per_second": 7.198, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"learning_rate": 8.913551401869159e-05, |
|
"loss": 0.0412, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 8.88, |
|
"eval_accuracy": 0.7248677248677249, |
|
"eval_loss": 1.179593801498413, |
|
"eval_runtime": 3.4549, |
|
"eval_samples_per_second": 54.705, |
|
"eval_steps_per_second": 6.947, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"learning_rate": 8.796728971962617e-05, |
|
"loss": 0.0645, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 8.97, |
|
"eval_accuracy": 0.6878306878306878, |
|
"eval_loss": 1.3152216672897339, |
|
"eval_runtime": 3.4461, |
|
"eval_samples_per_second": 54.844, |
|
"eval_steps_per_second": 6.964, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"learning_rate": 8.679906542056075e-05, |
|
"loss": 0.0654, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 9.07, |
|
"eval_accuracy": 0.6931216931216931, |
|
"eval_loss": 1.2788838148117065, |
|
"eval_runtime": 3.1953, |
|
"eval_samples_per_second": 59.15, |
|
"eval_steps_per_second": 7.511, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 9.16, |
|
"learning_rate": 8.563084112149534e-05, |
|
"loss": 0.0352, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 9.16, |
|
"eval_accuracy": 0.7195767195767195, |
|
"eval_loss": 1.1927844285964966, |
|
"eval_runtime": 3.4374, |
|
"eval_samples_per_second": 54.983, |
|
"eval_steps_per_second": 6.982, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"learning_rate": 8.44626168224299e-05, |
|
"loss": 0.0137, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"eval_accuracy": 0.7354497354497355, |
|
"eval_loss": 1.1643190383911133, |
|
"eval_runtime": 3.1879, |
|
"eval_samples_per_second": 59.287, |
|
"eval_steps_per_second": 7.529, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"learning_rate": 8.32943925233645e-05, |
|
"loss": 0.0227, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.35, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 1.2255942821502686, |
|
"eval_runtime": 3.4426, |
|
"eval_samples_per_second": 54.901, |
|
"eval_steps_per_second": 6.972, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"learning_rate": 8.212616822429907e-05, |
|
"loss": 0.0391, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"eval_accuracy": 0.7195767195767195, |
|
"eval_loss": 1.2088606357574463, |
|
"eval_runtime": 3.1411, |
|
"eval_samples_per_second": 60.17, |
|
"eval_steps_per_second": 7.641, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"learning_rate": 8.095794392523365e-05, |
|
"loss": 0.0163, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 9.53, |
|
"eval_accuracy": 0.6931216931216931, |
|
"eval_loss": 1.3879644870758057, |
|
"eval_runtime": 3.5148, |
|
"eval_samples_per_second": 53.773, |
|
"eval_steps_per_second": 6.828, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 9.63, |
|
"learning_rate": 7.978971962616823e-05, |
|
"loss": 0.0225, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 9.63, |
|
"eval_accuracy": 0.6931216931216931, |
|
"eval_loss": 1.3944264650344849, |
|
"eval_runtime": 3.2816, |
|
"eval_samples_per_second": 57.594, |
|
"eval_steps_per_second": 7.314, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 9.72, |
|
"learning_rate": 7.862149532710281e-05, |
|
"loss": 0.0348, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 9.72, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 1.3256537914276123, |
|
"eval_runtime": 3.2964, |
|
"eval_samples_per_second": 57.334, |
|
"eval_steps_per_second": 7.281, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 9.81, |
|
"learning_rate": 7.74532710280374e-05, |
|
"loss": 0.0354, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 9.81, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.1538487672805786, |
|
"eval_runtime": 3.3622, |
|
"eval_samples_per_second": 56.213, |
|
"eval_steps_per_second": 7.138, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"learning_rate": 7.628504672897196e-05, |
|
"loss": 0.0412, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"eval_accuracy": 0.7248677248677249, |
|
"eval_loss": 1.237243890762329, |
|
"eval_runtime": 3.4338, |
|
"eval_samples_per_second": 55.041, |
|
"eval_steps_per_second": 6.989, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 7.511682242990654e-05, |
|
"loss": 0.055, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.708994708994709, |
|
"eval_loss": 1.2265738248825073, |
|
"eval_runtime": 3.4414, |
|
"eval_samples_per_second": 54.919, |
|
"eval_steps_per_second": 6.974, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 10.09, |
|
"learning_rate": 7.394859813084113e-05, |
|
"loss": 0.0115, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 10.09, |
|
"eval_accuracy": 0.7248677248677249, |
|
"eval_loss": 1.235258936882019, |
|
"eval_runtime": 3.3434, |
|
"eval_samples_per_second": 56.53, |
|
"eval_steps_per_second": 7.178, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 10.19, |
|
"learning_rate": 7.278037383177571e-05, |
|
"loss": 0.011, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 10.19, |
|
"eval_accuracy": 0.7248677248677249, |
|
"eval_loss": 1.2655125856399536, |
|
"eval_runtime": 3.2569, |
|
"eval_samples_per_second": 58.031, |
|
"eval_steps_per_second": 7.369, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 10.28, |
|
"learning_rate": 7.161214953271029e-05, |
|
"loss": 0.0105, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 10.28, |
|
"eval_accuracy": 0.7354497354497355, |
|
"eval_loss": 1.2830661535263062, |
|
"eval_runtime": 3.3769, |
|
"eval_samples_per_second": 55.968, |
|
"eval_steps_per_second": 7.107, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 10.37, |
|
"learning_rate": 7.044392523364486e-05, |
|
"loss": 0.0248, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 10.37, |
|
"eval_accuracy": 0.7142857142857143, |
|
"eval_loss": 1.3138365745544434, |
|
"eval_runtime": 3.2282, |
|
"eval_samples_per_second": 58.547, |
|
"eval_steps_per_second": 7.435, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 10.47, |
|
"learning_rate": 6.927570093457945e-05, |
|
"loss": 0.0287, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 10.47, |
|
"eval_accuracy": 0.7195767195767195, |
|
"eval_loss": 1.247241735458374, |
|
"eval_runtime": 3.7473, |
|
"eval_samples_per_second": 50.437, |
|
"eval_steps_per_second": 6.405, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 10.56, |
|
"learning_rate": 6.810747663551402e-05, |
|
"loss": 0.017, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 10.56, |
|
"eval_accuracy": 0.7619047619047619, |
|
"eval_loss": 1.1517435312271118, |
|
"eval_runtime": 3.3479, |
|
"eval_samples_per_second": 56.453, |
|
"eval_steps_per_second": 7.169, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 10.65, |
|
"learning_rate": 6.69392523364486e-05, |
|
"loss": 0.0326, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 10.65, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.1729180812835693, |
|
"eval_runtime": 3.563, |
|
"eval_samples_per_second": 53.045, |
|
"eval_steps_per_second": 6.736, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 10.75, |
|
"learning_rate": 6.577102803738317e-05, |
|
"loss": 0.0298, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 10.75, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.1990931034088135, |
|
"eval_runtime": 3.1999, |
|
"eval_samples_per_second": 59.065, |
|
"eval_steps_per_second": 7.5, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 10.84, |
|
"learning_rate": 6.460280373831777e-05, |
|
"loss": 0.0087, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 10.84, |
|
"eval_accuracy": 0.7195767195767195, |
|
"eval_loss": 1.1964682340621948, |
|
"eval_runtime": 3.3211, |
|
"eval_samples_per_second": 56.908, |
|
"eval_steps_per_second": 7.226, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 10.93, |
|
"learning_rate": 6.343457943925233e-05, |
|
"loss": 0.0104, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 10.93, |
|
"eval_accuracy": 0.7301587301587301, |
|
"eval_loss": 1.200627088546753, |
|
"eval_runtime": 3.2863, |
|
"eval_samples_per_second": 57.511, |
|
"eval_steps_per_second": 7.303, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 11.03, |
|
"learning_rate": 6.226635514018691e-05, |
|
"loss": 0.0176, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 11.03, |
|
"eval_accuracy": 0.7195767195767195, |
|
"eval_loss": 1.2818912267684937, |
|
"eval_runtime": 3.3882, |
|
"eval_samples_per_second": 55.783, |
|
"eval_steps_per_second": 7.083, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 11.12, |
|
"learning_rate": 6.109813084112151e-05, |
|
"loss": 0.0088, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 11.12, |
|
"eval_accuracy": 0.7248677248677249, |
|
"eval_loss": 1.2860090732574463, |
|
"eval_runtime": 3.4546, |
|
"eval_samples_per_second": 54.709, |
|
"eval_steps_per_second": 6.947, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 11.21, |
|
"learning_rate": 5.992990654205608e-05, |
|
"loss": 0.0218, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 11.21, |
|
"eval_accuracy": 0.7407407407407407, |
|
"eval_loss": 1.1996196508407593, |
|
"eval_runtime": 3.4627, |
|
"eval_samples_per_second": 54.582, |
|
"eval_steps_per_second": 6.931, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 11.31, |
|
"learning_rate": 5.876168224299065e-05, |
|
"loss": 0.011, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 11.31, |
|
"eval_accuracy": 0.7407407407407407, |
|
"eval_loss": 1.1904582977294922, |
|
"eval_runtime": 3.6413, |
|
"eval_samples_per_second": 51.905, |
|
"eval_steps_per_second": 6.591, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 11.4, |
|
"learning_rate": 5.7593457943925235e-05, |
|
"loss": 0.0195, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 11.4, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.1777358055114746, |
|
"eval_runtime": 3.4353, |
|
"eval_samples_per_second": 55.016, |
|
"eval_steps_per_second": 6.986, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"learning_rate": 5.6425233644859823e-05, |
|
"loss": 0.012, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 11.5, |
|
"eval_accuracy": 0.7566137566137566, |
|
"eval_loss": 1.1417244672775269, |
|
"eval_runtime": 3.4149, |
|
"eval_samples_per_second": 55.346, |
|
"eval_steps_per_second": 7.028, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 11.59, |
|
"learning_rate": 5.52570093457944e-05, |
|
"loss": 0.0075, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 11.59, |
|
"eval_accuracy": 0.7619047619047619, |
|
"eval_loss": 1.1428831815719604, |
|
"eval_runtime": 3.4904, |
|
"eval_samples_per_second": 54.148, |
|
"eval_steps_per_second": 6.876, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 11.68, |
|
"learning_rate": 5.408878504672897e-05, |
|
"loss": 0.0131, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 11.68, |
|
"eval_accuracy": 0.7671957671957672, |
|
"eval_loss": 1.1381064653396606, |
|
"eval_runtime": 3.3671, |
|
"eval_samples_per_second": 56.131, |
|
"eval_steps_per_second": 7.128, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 11.78, |
|
"learning_rate": 5.292056074766355e-05, |
|
"loss": 0.0078, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 11.78, |
|
"eval_accuracy": 0.7566137566137566, |
|
"eval_loss": 1.156213641166687, |
|
"eval_runtime": 3.7773, |
|
"eval_samples_per_second": 50.036, |
|
"eval_steps_per_second": 6.354, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 11.87, |
|
"learning_rate": 5.175233644859814e-05, |
|
"loss": 0.0071, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 11.87, |
|
"eval_accuracy": 0.7619047619047619, |
|
"eval_loss": 1.1708426475524902, |
|
"eval_runtime": 3.4015, |
|
"eval_samples_per_second": 55.564, |
|
"eval_steps_per_second": 7.056, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 11.96, |
|
"learning_rate": 5.058411214953271e-05, |
|
"loss": 0.04, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 11.96, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.1964821815490723, |
|
"eval_runtime": 3.6594, |
|
"eval_samples_per_second": 51.648, |
|
"eval_steps_per_second": 6.559, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 12.06, |
|
"learning_rate": 4.941588785046729e-05, |
|
"loss": 0.0066, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 12.06, |
|
"eval_accuracy": 0.7354497354497355, |
|
"eval_loss": 1.2294511795043945, |
|
"eval_runtime": 3.2967, |
|
"eval_samples_per_second": 57.329, |
|
"eval_steps_per_second": 7.28, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 12.15, |
|
"learning_rate": 4.8247663551401875e-05, |
|
"loss": 0.0179, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 12.15, |
|
"eval_accuracy": 0.7354497354497355, |
|
"eval_loss": 1.2337099313735962, |
|
"eval_runtime": 3.344, |
|
"eval_samples_per_second": 56.519, |
|
"eval_steps_per_second": 7.177, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 12.24, |
|
"learning_rate": 4.707943925233645e-05, |
|
"loss": 0.0072, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 12.24, |
|
"eval_accuracy": 0.7407407407407407, |
|
"eval_loss": 1.2375584840774536, |
|
"eval_runtime": 3.3433, |
|
"eval_samples_per_second": 56.531, |
|
"eval_steps_per_second": 7.179, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 12.34, |
|
"learning_rate": 4.591121495327103e-05, |
|
"loss": 0.0189, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 12.34, |
|
"eval_accuracy": 0.7354497354497355, |
|
"eval_loss": 1.2402395009994507, |
|
"eval_runtime": 3.4557, |
|
"eval_samples_per_second": 54.692, |
|
"eval_steps_per_second": 6.945, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 12.43, |
|
"learning_rate": 4.4742990654205606e-05, |
|
"loss": 0.0067, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 12.43, |
|
"eval_accuracy": 0.7407407407407407, |
|
"eval_loss": 1.2426130771636963, |
|
"eval_runtime": 3.468, |
|
"eval_samples_per_second": 54.498, |
|
"eval_steps_per_second": 6.92, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 12.52, |
|
"learning_rate": 4.357476635514019e-05, |
|
"loss": 0.014, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 12.52, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.219921588897705, |
|
"eval_runtime": 3.4683, |
|
"eval_samples_per_second": 54.494, |
|
"eval_steps_per_second": 6.92, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 12.62, |
|
"learning_rate": 4.240654205607476e-05, |
|
"loss": 0.0065, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 12.62, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.2070165872573853, |
|
"eval_runtime": 3.3345, |
|
"eval_samples_per_second": 56.68, |
|
"eval_steps_per_second": 7.197, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 12.71, |
|
"learning_rate": 4.123831775700935e-05, |
|
"loss": 0.0119, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 12.71, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.2172141075134277, |
|
"eval_runtime": 3.335, |
|
"eval_samples_per_second": 56.672, |
|
"eval_steps_per_second": 7.196, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"learning_rate": 4.0070093457943926e-05, |
|
"loss": 0.0065, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 12.8, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2299124002456665, |
|
"eval_runtime": 3.4554, |
|
"eval_samples_per_second": 54.697, |
|
"eval_steps_per_second": 6.946, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 12.9, |
|
"learning_rate": 3.890186915887851e-05, |
|
"loss": 0.0139, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 12.9, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.2095272541046143, |
|
"eval_runtime": 3.5215, |
|
"eval_samples_per_second": 53.67, |
|
"eval_steps_per_second": 6.815, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 12.99, |
|
"learning_rate": 3.773364485981308e-05, |
|
"loss": 0.0195, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 12.99, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.1914021968841553, |
|
"eval_runtime": 3.3578, |
|
"eval_samples_per_second": 56.287, |
|
"eval_steps_per_second": 7.148, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 13.08, |
|
"learning_rate": 3.6565420560747665e-05, |
|
"loss": 0.0102, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 13.08, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.1972215175628662, |
|
"eval_runtime": 3.4283, |
|
"eval_samples_per_second": 55.13, |
|
"eval_steps_per_second": 7.001, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 13.18, |
|
"learning_rate": 3.5397196261682246e-05, |
|
"loss": 0.0162, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 13.18, |
|
"eval_accuracy": 0.7566137566137566, |
|
"eval_loss": 1.2006089687347412, |
|
"eval_runtime": 3.2014, |
|
"eval_samples_per_second": 59.036, |
|
"eval_steps_per_second": 7.497, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 13.27, |
|
"learning_rate": 3.422897196261682e-05, |
|
"loss": 0.0057, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 13.27, |
|
"eval_accuracy": 0.7566137566137566, |
|
"eval_loss": 1.213531494140625, |
|
"eval_runtime": 3.2962, |
|
"eval_samples_per_second": 57.34, |
|
"eval_steps_per_second": 7.281, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 13.36, |
|
"learning_rate": 3.30607476635514e-05, |
|
"loss": 0.0099, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 13.36, |
|
"eval_accuracy": 0.7566137566137566, |
|
"eval_loss": 1.2060108184814453, |
|
"eval_runtime": 3.4059, |
|
"eval_samples_per_second": 55.493, |
|
"eval_steps_per_second": 7.047, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 13.46, |
|
"learning_rate": 3.1892523364485985e-05, |
|
"loss": 0.0092, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 13.46, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.209419846534729, |
|
"eval_runtime": 3.3619, |
|
"eval_samples_per_second": 56.218, |
|
"eval_steps_per_second": 7.139, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 13.55, |
|
"learning_rate": 3.0724299065420566e-05, |
|
"loss": 0.0059, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 13.55, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.21532142162323, |
|
"eval_runtime": 3.4612, |
|
"eval_samples_per_second": 54.605, |
|
"eval_steps_per_second": 6.934, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"learning_rate": 2.955607476635514e-05, |
|
"loss": 0.0132, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.2271349430084229, |
|
"eval_runtime": 3.3079, |
|
"eval_samples_per_second": 57.136, |
|
"eval_steps_per_second": 7.255, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 13.74, |
|
"learning_rate": 2.8387850467289723e-05, |
|
"loss": 0.0224, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 13.74, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2394039630889893, |
|
"eval_runtime": 3.2507, |
|
"eval_samples_per_second": 58.141, |
|
"eval_steps_per_second": 7.383, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 13.83, |
|
"learning_rate": 2.7219626168224298e-05, |
|
"loss": 0.0116, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 13.83, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2353821992874146, |
|
"eval_runtime": 3.2965, |
|
"eval_samples_per_second": 57.333, |
|
"eval_steps_per_second": 7.28, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 13.93, |
|
"learning_rate": 2.605140186915888e-05, |
|
"loss": 0.0096, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 13.93, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2316288948059082, |
|
"eval_runtime": 3.263, |
|
"eval_samples_per_second": 57.923, |
|
"eval_steps_per_second": 7.355, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 14.02, |
|
"learning_rate": 2.488317757009346e-05, |
|
"loss": 0.0055, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 14.02, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2332398891448975, |
|
"eval_runtime": 3.4255, |
|
"eval_samples_per_second": 55.175, |
|
"eval_steps_per_second": 7.006, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 14.11, |
|
"learning_rate": 2.371495327102804e-05, |
|
"loss": 0.009, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 14.11, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2354868650436401, |
|
"eval_runtime": 3.3723, |
|
"eval_samples_per_second": 56.045, |
|
"eval_steps_per_second": 7.117, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 14.21, |
|
"learning_rate": 2.2546728971962618e-05, |
|
"loss": 0.0058, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 14.21, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2446562051773071, |
|
"eval_runtime": 3.5668, |
|
"eval_samples_per_second": 52.988, |
|
"eval_steps_per_second": 6.729, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"learning_rate": 2.13785046728972e-05, |
|
"loss": 0.01, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 14.3, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2436867952346802, |
|
"eval_runtime": 3.2984, |
|
"eval_samples_per_second": 57.3, |
|
"eval_steps_per_second": 7.276, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 14.39, |
|
"learning_rate": 2.0210280373831778e-05, |
|
"loss": 0.0055, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 14.39, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2422124147415161, |
|
"eval_runtime": 3.3462, |
|
"eval_samples_per_second": 56.481, |
|
"eval_steps_per_second": 7.172, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 14.49, |
|
"learning_rate": 1.9042056074766356e-05, |
|
"loss": 0.0187, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 14.49, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.221534252166748, |
|
"eval_runtime": 3.4706, |
|
"eval_samples_per_second": 54.457, |
|
"eval_steps_per_second": 6.915, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 14.58, |
|
"learning_rate": 1.7873831775700935e-05, |
|
"loss": 0.0103, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 14.58, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.217819333076477, |
|
"eval_runtime": 3.2699, |
|
"eval_samples_per_second": 57.799, |
|
"eval_steps_per_second": 7.34, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"learning_rate": 1.6705607476635516e-05, |
|
"loss": 0.0053, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2216895818710327, |
|
"eval_runtime": 3.2903, |
|
"eval_samples_per_second": 57.442, |
|
"eval_steps_per_second": 7.294, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 14.77, |
|
"learning_rate": 1.5537383177570095e-05, |
|
"loss": 0.01, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 14.77, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2266777753829956, |
|
"eval_runtime": 3.2812, |
|
"eval_samples_per_second": 57.602, |
|
"eval_steps_per_second": 7.315, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 14.86, |
|
"learning_rate": 1.4369158878504675e-05, |
|
"loss": 0.0238, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 14.86, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.227875828742981, |
|
"eval_runtime": 3.3613, |
|
"eval_samples_per_second": 56.228, |
|
"eval_steps_per_second": 7.14, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 14.95, |
|
"learning_rate": 1.3200934579439253e-05, |
|
"loss": 0.0091, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 14.95, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.224220871925354, |
|
"eval_runtime": 3.2469, |
|
"eval_samples_per_second": 58.209, |
|
"eval_steps_per_second": 7.392, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 15.05, |
|
"learning_rate": 1.2032710280373833e-05, |
|
"loss": 0.0053, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 15.05, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.2231720685958862, |
|
"eval_runtime": 3.1879, |
|
"eval_samples_per_second": 59.287, |
|
"eval_steps_per_second": 7.528, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 15.14, |
|
"learning_rate": 1.0864485981308411e-05, |
|
"loss": 0.0101, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 15.14, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2256537675857544, |
|
"eval_runtime": 3.2787, |
|
"eval_samples_per_second": 57.646, |
|
"eval_steps_per_second": 7.32, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 15.23, |
|
"learning_rate": 9.696261682242991e-06, |
|
"loss": 0.0189, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 15.23, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.227663278579712, |
|
"eval_runtime": 3.188, |
|
"eval_samples_per_second": 59.286, |
|
"eval_steps_per_second": 7.528, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 15.33, |
|
"learning_rate": 8.52803738317757e-06, |
|
"loss": 0.0056, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 15.33, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.233586072921753, |
|
"eval_runtime": 3.2945, |
|
"eval_samples_per_second": 57.368, |
|
"eval_steps_per_second": 7.285, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 15.42, |
|
"learning_rate": 7.3598130841121496e-06, |
|
"loss": 0.0052, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 15.42, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.235275149345398, |
|
"eval_runtime": 3.3479, |
|
"eval_samples_per_second": 56.453, |
|
"eval_steps_per_second": 7.169, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 15.51, |
|
"learning_rate": 6.1915887850467296e-06, |
|
"loss": 0.0054, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 15.51, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2359087467193604, |
|
"eval_runtime": 3.3879, |
|
"eval_samples_per_second": 55.786, |
|
"eval_steps_per_second": 7.084, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 15.61, |
|
"learning_rate": 5.023364485981309e-06, |
|
"loss": 0.0054, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 15.61, |
|
"eval_accuracy": 0.746031746031746, |
|
"eval_loss": 1.2361844778060913, |
|
"eval_runtime": 3.1737, |
|
"eval_samples_per_second": 59.551, |
|
"eval_steps_per_second": 7.562, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 15.7, |
|
"learning_rate": 3.855140186915888e-06, |
|
"loss": 0.0102, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 15.7, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.2348004579544067, |
|
"eval_runtime": 3.2806, |
|
"eval_samples_per_second": 57.612, |
|
"eval_steps_per_second": 7.316, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 15.79, |
|
"learning_rate": 2.6869158878504674e-06, |
|
"loss": 0.0193, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 15.79, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.2326399087905884, |
|
"eval_runtime": 3.2413, |
|
"eval_samples_per_second": 58.309, |
|
"eval_steps_per_second": 7.404, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 15.89, |
|
"learning_rate": 1.5186915887850468e-06, |
|
"loss": 0.0104, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 15.89, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.2315497398376465, |
|
"eval_runtime": 3.3871, |
|
"eval_samples_per_second": 55.8, |
|
"eval_steps_per_second": 7.086, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 15.98, |
|
"learning_rate": 3.5046728971962617e-07, |
|
"loss": 0.0095, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 15.98, |
|
"eval_accuracy": 0.7513227513227513, |
|
"eval_loss": 1.2312463521957397, |
|
"eval_runtime": 3.1744, |
|
"eval_samples_per_second": 59.54, |
|
"eval_steps_per_second": 7.561, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"step": 1712, |
|
"total_flos": 2.1042510824822538e+18, |
|
"train_loss": 0.32865437336057146, |
|
"train_runtime": 1354.021, |
|
"train_samples_per_second": 20.053, |
|
"train_steps_per_second": 1.264 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1712, |
|
"num_train_epochs": 16, |
|
"save_steps": 10, |
|
"total_flos": 2.1042510824822538e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|