|
{ |
|
"best_metric": 0.3279295265674591, |
|
"best_model_checkpoint": "output/eminem/checkpoint-1365", |
|
"epoch": 3.0, |
|
"global_step": 1365, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 1.0194653534426477e-06, |
|
"loss": 0.6762, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 6.530415424531046e-07, |
|
"loss": 0.6579, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 3.675914059099763e-07, |
|
"loss": 0.7016, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.6345512013444254e-07, |
|
"loss": 0.6176, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 4.087595819659287e-08, |
|
"loss": 0.563, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.0, |
|
"loss": 0.689, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.0875958196577634e-08, |
|
"loss": 0.6532, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.634551201344197e-07, |
|
"loss": 0.5922, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 3.6759140590974026e-07, |
|
"loss": 0.6565, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 6.530415424530588e-07, |
|
"loss": 0.7043, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 1.0194653534425943e-06, |
|
"loss": 0.597, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.4664261646975495e-06, |
|
"loss": 0.7246, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1.993391324572832e-06, |
|
"loss": 0.639, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 2.5997328387288936e-06, |
|
"loss": 0.6834, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 3.2847281185253694e-06, |
|
"loss": 0.6745, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 4.0475608421405796e-06, |
|
"loss": 0.5851, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.887321927404397e-06, |
|
"loss": 0.6805, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 5.803010615159864e-06, |
|
"loss": 0.6536, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 6.793535661893871e-06, |
|
"loss": 0.6119, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 7.857716640189427e-06, |
|
"loss": 0.6954, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 8.994285345464919e-06, |
|
"loss": 0.7077, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 1.0201887307313696e-05, |
|
"loss": 0.6852, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 1.147908340365762e-05, |
|
"loss": 0.6743, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.2824351575772418e-05, |
|
"loss": 0.7093, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 1.4236088642155879e-05, |
|
"loss": 0.696, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 1.5712612209063624e-05, |
|
"loss": 0.6607, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"learning_rate": 1.7252162675462687e-05, |
|
"loss": 0.5847, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.8852905329964338e-05, |
|
"loss": 0.6638, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 2.051293253729783e-05, |
|
"loss": 0.6221, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 2.2230266011669234e-05, |
|
"loss": 0.5733, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 2.4002859174324688e-05, |
|
"loss": 0.6478, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 2.5828599592491126e-05, |
|
"loss": 0.5237, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 2.770531149681108e-05, |
|
"loss": 0.6559, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 2.9630758374242324e-05, |
|
"loss": 0.6755, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.1602645633354207e-05, |
|
"loss": 0.6611, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 3.3618623338835595e-05, |
|
"loss": 0.651, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 3.5676289011958925e-05, |
|
"loss": 0.7284, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.7773190493652644e-05, |
|
"loss": 0.6301, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"learning_rate": 3.990682886679578e-05, |
|
"loss": 0.6188, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.2074661434217846e-05, |
|
"loss": 0.586, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 4.427410474888269e-05, |
|
"loss": 0.6809, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 4.650253769262196e-05, |
|
"loss": 0.6343, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"learning_rate": 4.875730459979135e-05, |
|
"loss": 0.7333, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 5.103571842205178e-05, |
|
"loss": 0.6158, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 5.3335063930595955e-05, |
|
"loss": 0.6216, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 5.565260095192864e-05, |
|
"loss": 0.7031, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 5.7985567633386964e-05, |
|
"loss": 0.7186, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 6.033118373448471e-05, |
|
"loss": 0.6841, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 6.268665394018899e-05, |
|
"loss": 0.6929, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 6.504917119214232e-05, |
|
"loss": 0.8001, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 6.741592003389098e-05, |
|
"loss": 0.6497, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 6.978407996610794e-05, |
|
"loss": 0.6001, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 7.21508288078566e-05, |
|
"loss": 0.7792, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 7.451334605980994e-05, |
|
"loss": 0.6229, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"learning_rate": 7.686881626551423e-05, |
|
"loss": 0.6103, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 7.921443236661197e-05, |
|
"loss": 0.6509, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 8.15473990480703e-05, |
|
"loss": 0.7008, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 8.3864936069403e-05, |
|
"loss": 0.6919, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 8.616428157794718e-05, |
|
"loss": 0.7347, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 8.844269540020762e-05, |
|
"loss": 0.7356, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 9.069746230737702e-05, |
|
"loss": 0.7, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 9.29258952511163e-05, |
|
"loss": 0.6359, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 9.512533856578116e-05, |
|
"loss": 0.7165, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 9.729317113320324e-05, |
|
"loss": 0.7637, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 9.942680950634639e-05, |
|
"loss": 0.6223, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 0.00010152371098804014, |
|
"loss": 0.5555, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 0.00010358137666116348, |
|
"loss": 0.7219, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 0.00010559735436664489, |
|
"loss": 0.7501, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 0.0001075692416257568, |
|
"loss": 0.7454, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 0.00010949468850318805, |
|
"loss": 0.6956, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 0.00011137140040750957, |
|
"loss": 0.7294, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 0.00011319714082567451, |
|
"loss": 0.7416, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 0.00011496973398832998, |
|
"loss": 0.6926, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 0.00011668706746270142, |
|
"loss": 0.7841, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 0.00011834709467003491, |
|
"loss": 0.7603, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 0.00011994783732453659, |
|
"loss": 0.7249, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 0.0001214873877909357, |
|
"loss": 0.7231, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 0.00012296391135784465, |
|
"loss": 0.7101, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 0.00012437564842422694, |
|
"loss": 0.7521, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 0.00012572091659634178, |
|
"loss": 0.6694, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 0.00012699811269268675, |
|
"loss": 0.6763, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 0.00012820571465453455, |
|
"loss": 0.7881, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 0.00012934228335981007, |
|
"loss": 0.7906, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 0.00013040646433810568, |
|
"loss": 0.7662, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 0.00013139698938483972, |
|
"loss": 0.7165, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 0.00013231267807259521, |
|
"loss": 0.7159, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 0.00013315243915785907, |
|
"loss": 0.7796, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 0.0001339152718814749, |
|
"loss": 0.8222, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 0.0001346002671612708, |
|
"loss": 0.7428, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 0.00013520660867542692, |
|
"loss": 0.7597, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 0.00013573357383530262, |
|
"loss": 0.8769, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 0.36263224482536316, |
|
"eval_runtime": 29.2619, |
|
"eval_samples_per_second": 22.008, |
|
"eval_steps_per_second": 2.768, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 0.0001361805346465572, |
|
"loss": 0.6009, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 0.00013654695845754679, |
|
"loss": 0.6886, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 1.03, |
|
"learning_rate": 0.00013683240859409016, |
|
"loss": 0.648, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 0.0001370365448798655, |
|
"loss": 0.6639, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"learning_rate": 0.00013715912404180336, |
|
"loss": 0.6411, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 0.0001372, |
|
"loss": 0.5669, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"learning_rate": 0.00013715912404180345, |
|
"loss": 0.7041, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 0.00013703654487986564, |
|
"loss": 0.7196, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 0.00013683240859409013, |
|
"loss": 0.6656, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 0.00013654695845754679, |
|
"loss": 0.7371, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 0.00013618053464655754, |
|
"loss": 0.6728, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 0.0001357335738353026, |
|
"loss": 0.663, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 0.00013520660867542687, |
|
"loss": 0.5962, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 0.0001346002671612713, |
|
"loss": 0.6391, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 0.00013391527188147485, |
|
"loss": 0.7195, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 0.00013315243915785902, |
|
"loss": 0.7751, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 0.0001323126780725959, |
|
"loss": 0.6413, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 0.00013139698938484045, |
|
"loss": 0.7913, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 0.0001304064643381056, |
|
"loss": 0.7761, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 0.00012934228335981002, |
|
"loss": 0.6472, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 0.00012820571465453544, |
|
"loss": 0.6222, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 0.0001269981126926867, |
|
"loss": 0.6736, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 0.00012572091659634172, |
|
"loss": 0.73, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 0.000124375648424228, |
|
"loss": 0.6426, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 0.00012296391135784457, |
|
"loss": 0.7043, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 0.00012148738779093562, |
|
"loss": 0.7227, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 1.3, |
|
"learning_rate": 0.00011994783732453781, |
|
"loss": 0.7366, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 0.00011834709467003617, |
|
"loss": 0.7144, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 0.00011668706746270132, |
|
"loss": 0.6817, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 0.00011496973398833133, |
|
"loss": 0.6274, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 0.00011319714082567588, |
|
"loss": 0.6754, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 0.00011137140040750945, |
|
"loss": 0.6134, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 0.00010949468850318951, |
|
"loss": 0.698, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 0.00010756924162575829, |
|
"loss": 0.8638, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 0.00010559735436664478, |
|
"loss": 0.7123, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 0.00010358137666116336, |
|
"loss": 0.666, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 0.00010152371098804174, |
|
"loss": 0.6977, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 1.42, |
|
"learning_rate": 9.942680950634801e-05, |
|
"loss": 0.5854, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 9.729317113320311e-05, |
|
"loss": 0.6888, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 9.512533856578284e-05, |
|
"loss": 0.6905, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 9.2925895251118e-05, |
|
"loss": 0.664, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 9.069746230737689e-05, |
|
"loss": 0.7013, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"learning_rate": 8.844269540020936e-05, |
|
"loss": 0.6946, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 8.616428157794893e-05, |
|
"loss": 0.7819, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 8.386493606940288e-05, |
|
"loss": 0.6662, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"learning_rate": 8.154739904807017e-05, |
|
"loss": 0.5889, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 7.921443236661376e-05, |
|
"loss": 0.7833, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 7.686881626551602e-05, |
|
"loss": 0.7548, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 7.45133460598098e-05, |
|
"loss": 0.6886, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 7.215082880785842e-05, |
|
"loss": 0.7179, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"learning_rate": 6.978407996610975e-05, |
|
"loss": 0.745, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 6.741592003389085e-05, |
|
"loss": 0.5811, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 6.504917119214413e-05, |
|
"loss": 0.6962, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 1.59, |
|
"learning_rate": 6.268665394019079e-05, |
|
"loss": 0.6763, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 6.033118373448457e-05, |
|
"loss": 0.6247, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 1.62, |
|
"learning_rate": 5.7985567633386836e-05, |
|
"loss": 0.7763, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 5.5652600951930425e-05, |
|
"loss": 0.6239, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 5.333506393059772e-05, |
|
"loss": 0.7218, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 5.103571842205165e-05, |
|
"loss": 0.695, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 4.8757304599793096e-05, |
|
"loss": 0.7938, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 4.650253769262368e-05, |
|
"loss": 0.7333, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 4.4274104748882565e-05, |
|
"loss": 0.7092, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"learning_rate": 4.2074661434219527e-05, |
|
"loss": 0.764, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 3.9906828866797437e-05, |
|
"loss": 0.7929, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 3.777319049365253e-05, |
|
"loss": 0.6747, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 3.56762890119588e-05, |
|
"loss": 0.6921, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 3.361862333883716e-05, |
|
"loss": 0.7049, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 1.75, |
|
"learning_rate": 3.160264563335574e-05, |
|
"loss": 0.778, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 2.9630758374242215e-05, |
|
"loss": 0.6684, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 2.7705311496812532e-05, |
|
"loss": 0.6652, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 2.582859959249102e-05, |
|
"loss": 0.7384, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"learning_rate": 2.400285917432458e-05, |
|
"loss": 0.6104, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 2.2230266011670566e-05, |
|
"loss": 0.7144, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 2.0512932537299123e-05, |
|
"loss": 0.5937, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 1.885290532996424e-05, |
|
"loss": 0.7779, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 1.7252162675462595e-05, |
|
"loss": 0.7421, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 1.571261220906478e-05, |
|
"loss": 0.7221, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"learning_rate": 1.4236088642155802e-05, |
|
"loss": 0.757, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.2824351575772341e-05, |
|
"loss": 0.752, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 1.1479083403658627e-05, |
|
"loss": 0.743, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 1.0201887307313627e-05, |
|
"loss": 0.5773, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 8.994285345464858e-06, |
|
"loss": 0.7125, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 7.857716640190273e-06, |
|
"loss": 0.6377, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 6.793535661894656e-06, |
|
"loss": 0.7089, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 5.803010615159811e-06, |
|
"loss": 0.61, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 4.887321927404351e-06, |
|
"loss": 0.696, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 4.047560842141189e-06, |
|
"loss": 0.6485, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 3.2847281185253237e-06, |
|
"loss": 0.6823, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 2.599732838728863e-06, |
|
"loss": 0.7038, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 1.993391324573266e-06, |
|
"loss": 0.6816, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 1.4664261646975266e-06, |
|
"loss": 0.6658, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 0.3614741861820221, |
|
"eval_runtime": 29.3257, |
|
"eval_samples_per_second": 21.96, |
|
"eval_steps_per_second": 2.762, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 1.0194653534425715e-06, |
|
"loss": 0.6519, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 6.530415424533102e-07, |
|
"loss": 0.5683, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 3.6759140590993064e-07, |
|
"loss": 0.6592, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"learning_rate": 1.6345512013440448e-07, |
|
"loss": 0.5654, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 4.0875958196577634e-08, |
|
"loss": 0.6149, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 0.0, |
|
"loss": 0.6412, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 4.0875958196600484e-08, |
|
"loss": 0.6781, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.6345512013445016e-07, |
|
"loss": 0.6225, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"learning_rate": 3.675914059097936e-07, |
|
"loss": 0.5624, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 6.530415424531273e-07, |
|
"loss": 0.6362, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 1.0194653534426706e-06, |
|
"loss": 0.598, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.4664261646972448e-06, |
|
"loss": 0.623, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 1.9933913245729463e-06, |
|
"loss": 0.6036, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 2.599732838729023e-06, |
|
"loss": 0.6551, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 2.16, |
|
"learning_rate": 3.2847281185249125e-06, |
|
"loss": 0.674, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 4.047560842140732e-06, |
|
"loss": 0.6451, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 4.887321927404572e-06, |
|
"loss": 0.5867, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 5.803010615159263e-06, |
|
"loss": 0.5721, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.21, |
|
"learning_rate": 6.7935356618940695e-06, |
|
"loss": 0.7243, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"learning_rate": 7.857716640189648e-06, |
|
"loss": 0.6183, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 8.994285345464189e-06, |
|
"loss": 0.6326, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.0201887307312919e-05, |
|
"loss": 0.6777, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 2.25, |
|
"learning_rate": 1.147908340365788e-05, |
|
"loss": 0.5987, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 1.2824351575772692e-05, |
|
"loss": 0.6252, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 1.423608864215498e-05, |
|
"loss": 0.5721, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 1.571261220906393e-05, |
|
"loss": 0.5737, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.7252162675463e-05, |
|
"loss": 0.7001, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.885290532996332e-05, |
|
"loss": 0.5738, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 2.0512932537298164e-05, |
|
"loss": 0.6935, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.2230266011669576e-05, |
|
"loss": 0.6562, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 2.34, |
|
"learning_rate": 2.400285917432356e-05, |
|
"loss": 0.6026, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 2.35, |
|
"learning_rate": 2.5828599592489968e-05, |
|
"loss": 0.5634, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 2.7705311496811458e-05, |
|
"loss": 0.6095, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 2.9630758374242713e-05, |
|
"loss": 0.6006, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 2.38, |
|
"learning_rate": 3.16026456333546e-05, |
|
"loss": 0.675, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 3.3618623338836e-05, |
|
"loss": 0.6409, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 2.41, |
|
"learning_rate": 3.567628901195933e-05, |
|
"loss": 0.5407, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 3.777319049365133e-05, |
|
"loss": 0.6891, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.43, |
|
"learning_rate": 3.990682886679622e-05, |
|
"loss": 0.6107, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 4.207466143421829e-05, |
|
"loss": 0.7285, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 4.42741047488813e-05, |
|
"loss": 0.6553, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 4.650253769262241e-05, |
|
"loss": 0.5883, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 4.87573045997918e-05, |
|
"loss": 0.6072, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"learning_rate": 5.103571842205223e-05, |
|
"loss": 0.6408, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 2.49, |
|
"learning_rate": 5.333506393059641e-05, |
|
"loss": 0.6753, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 5.565260095192911e-05, |
|
"loss": 0.6189, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 2.52, |
|
"learning_rate": 5.7985567633387425e-05, |
|
"loss": 0.5952, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 6.033118373448324e-05, |
|
"loss": 0.6261, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 6.268665394018945e-05, |
|
"loss": 0.6251, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 6.504917119214278e-05, |
|
"loss": 0.6952, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 6.74159200338895e-05, |
|
"loss": 0.6817, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 2.57, |
|
"learning_rate": 6.978407996610841e-05, |
|
"loss": 0.7109, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"learning_rate": 7.215082880785708e-05, |
|
"loss": 0.6167, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 7.451334605981042e-05, |
|
"loss": 0.6916, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 7.686881626551469e-05, |
|
"loss": 0.717, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 7.921443236661243e-05, |
|
"loss": 0.6463, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 2.63, |
|
"learning_rate": 8.154739904807078e-05, |
|
"loss": 0.6973, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 8.386493606940157e-05, |
|
"loss": 0.6248, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 8.616428157794764e-05, |
|
"loss": 0.5914, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 2.66, |
|
"learning_rate": 8.844269540020808e-05, |
|
"loss": 0.7203, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 9.069746230737563e-05, |
|
"loss": 0.6267, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 9.292589525111675e-05, |
|
"loss": 0.6014, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 9.512533856578159e-05, |
|
"loss": 0.648, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 2.7, |
|
"learning_rate": 9.729317113320367e-05, |
|
"loss": 0.7405, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 9.942680950634681e-05, |
|
"loss": 0.6092, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 0.00010152371098804056, |
|
"loss": 0.7414, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 2.74, |
|
"learning_rate": 0.0001035813766611639, |
|
"loss": 0.6629, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 0.00010559735436664364, |
|
"loss": 0.6391, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 0.00010756924162575718, |
|
"loss": 0.6851, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 0.00010949468850318844, |
|
"loss": 0.6383, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.00011137140040750841, |
|
"loss": 0.6092, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 2.79, |
|
"learning_rate": 0.00011319714082567486, |
|
"loss": 0.7265, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"learning_rate": 0.00011496973398833032, |
|
"loss": 0.7022, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 0.00011668706746270174, |
|
"loss": 0.6906, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 0.00011834709467003525, |
|
"loss": 0.7019, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 2.84, |
|
"learning_rate": 0.0001199478373245369, |
|
"loss": 0.7799, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 2.85, |
|
"learning_rate": 0.00012148738779093598, |
|
"loss": 0.6424, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 0.00012296391135784376, |
|
"loss": 0.6426, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"learning_rate": 0.00012437564842422724, |
|
"loss": 0.679, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 0.00012572091659634205, |
|
"loss": 0.732, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 2.89, |
|
"learning_rate": 0.00012699811269268599, |
|
"loss": 0.719, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 0.00012820571465453477, |
|
"loss": 0.682, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 0.0001293422833598103, |
|
"loss": 0.715, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.00013040646433810587, |
|
"loss": 0.59, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 2.93, |
|
"learning_rate": 0.00013139698938483988, |
|
"loss": 0.6825, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 0.00013231267807259538, |
|
"loss": 0.7497, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 2.96, |
|
"learning_rate": 0.00013315243915785924, |
|
"loss": 0.7488, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 0.00013391527188147444, |
|
"loss": 0.7079, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 2.98, |
|
"learning_rate": 0.00013460026716127094, |
|
"loss": 0.6717, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 0.00013520660867542703, |
|
"loss": 0.6389, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 0.00013573357383530232, |
|
"loss": 0.6948, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 0.3279295265674591, |
|
"eval_runtime": 8.375, |
|
"eval_samples_per_second": 76.179, |
|
"eval_steps_per_second": 9.552, |
|
"step": 1365 |
|
} |
|
], |
|
"max_steps": 1365, |
|
"num_train_epochs": 3, |
|
"total_flos": 1424956096512000.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|