{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.9928047404063205, "global_step": 56500, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.02, "learning_rate": 4.955911399548533e-05, "loss": 1.0947, "step": 500 }, { "epoch": 0.04, "learning_rate": 4.911822799097065e-05, "loss": 0.9881, "step": 1000 }, { "epoch": 0.05, "learning_rate": 4.867734198645598e-05, "loss": 0.9375, "step": 1500 }, { "epoch": 0.07, "learning_rate": 4.8236455981941316e-05, "loss": 0.9275, "step": 2000 }, { "epoch": 0.09, "learning_rate": 4.779556997742664e-05, "loss": 0.9047, "step": 2500 }, { "epoch": 0.11, "learning_rate": 4.7354683972911966e-05, "loss": 0.8813, "step": 3000 }, { "epoch": 0.12, "learning_rate": 4.6913797968397295e-05, "loss": 0.8806, "step": 3500 }, { "epoch": 0.14, "learning_rate": 4.647291196388262e-05, "loss": 0.8718, "step": 4000 }, { "epoch": 0.16, "learning_rate": 4.6032025959367945e-05, "loss": 0.8828, "step": 4500 }, { "epoch": 0.18, "learning_rate": 4.559113995485327e-05, "loss": 0.8524, "step": 5000 }, { "epoch": 0.19, "learning_rate": 4.51502539503386e-05, "loss": 0.8464, "step": 5500 }, { "epoch": 0.21, "learning_rate": 4.470936794582393e-05, "loss": 0.8411, "step": 6000 }, { "epoch": 0.23, "learning_rate": 4.426848194130926e-05, "loss": 0.839, "step": 6500 }, { "epoch": 0.25, "learning_rate": 4.382759593679459e-05, "loss": 0.838, "step": 7000 }, { "epoch": 0.26, "learning_rate": 4.338670993227991e-05, "loss": 0.8531, "step": 7500 }, { "epoch": 0.28, "learning_rate": 4.294582392776524e-05, "loss": 0.8109, "step": 8000 }, { "epoch": 0.3, "learning_rate": 4.2504937923250566e-05, "loss": 0.8364, "step": 8500 }, { "epoch": 0.32, "learning_rate": 4.2064051918735894e-05, "loss": 0.8249, "step": 9000 }, { "epoch": 0.34, "learning_rate": 4.1623165914221216e-05, "loss": 0.8349, "step": 9500 }, { "epoch": 0.35, "learning_rate": 4.118227990970655e-05, "loss": 0.8136, "step": 10000 }, { "epoch": 0.37, "learning_rate": 4.074139390519188e-05, "loss": 0.8099, "step": 10500 }, { "epoch": 0.39, "learning_rate": 4.03005079006772e-05, "loss": 0.8054, "step": 11000 }, { "epoch": 0.41, "learning_rate": 3.985962189616253e-05, "loss": 0.8195, "step": 11500 }, { "epoch": 0.42, "learning_rate": 3.941873589164786e-05, "loss": 0.8646, "step": 12000 }, { "epoch": 0.44, "learning_rate": 3.897784988713318e-05, "loss": 0.8038, "step": 12500 }, { "epoch": 0.46, "learning_rate": 3.853696388261851e-05, "loss": 0.7897, "step": 13000 }, { "epoch": 0.48, "learning_rate": 3.8096077878103843e-05, "loss": 0.7991, "step": 13500 }, { "epoch": 0.49, "learning_rate": 3.7655191873589165e-05, "loss": 0.7834, "step": 14000 }, { "epoch": 0.51, "learning_rate": 3.7214305869074494e-05, "loss": 0.8035, "step": 14500 }, { "epoch": 0.53, "learning_rate": 3.677341986455982e-05, "loss": 0.7965, "step": 15000 }, { "epoch": 0.55, "learning_rate": 3.633253386004515e-05, "loss": 0.8022, "step": 15500 }, { "epoch": 0.56, "learning_rate": 3.589164785553047e-05, "loss": 0.7959, "step": 16000 }, { "epoch": 0.58, "learning_rate": 3.54507618510158e-05, "loss": 0.8167, "step": 16500 }, { "epoch": 0.6, "learning_rate": 3.500987584650113e-05, "loss": 0.8003, "step": 17000 }, { "epoch": 0.62, "learning_rate": 3.456898984198646e-05, "loss": 0.8143, "step": 17500 }, { "epoch": 0.63, "learning_rate": 3.4128103837471786e-05, "loss": 0.797, "step": 18000 }, { "epoch": 0.65, "learning_rate": 3.3687217832957114e-05, "loss": 0.7655, "step": 18500 }, { "epoch": 0.67, "learning_rate": 3.324633182844244e-05, "loss": 0.7821, "step": 19000 }, { "epoch": 0.69, "learning_rate": 3.2805445823927765e-05, "loss": 0.7721, "step": 19500 }, { "epoch": 0.71, "learning_rate": 3.236455981941309e-05, "loss": 0.835, "step": 20000 }, { "epoch": 0.72, "learning_rate": 3.192367381489842e-05, "loss": 0.7945, "step": 20500 }, { "epoch": 0.74, "learning_rate": 3.148278781038374e-05, "loss": 0.8042, "step": 21000 }, { "epoch": 0.76, "learning_rate": 3.104190180586908e-05, "loss": 0.7661, "step": 21500 }, { "epoch": 0.78, "learning_rate": 3.060101580135441e-05, "loss": 0.7981, "step": 22000 }, { "epoch": 0.79, "learning_rate": 3.0160129796839732e-05, "loss": 0.7588, "step": 22500 }, { "epoch": 0.81, "learning_rate": 2.9719243792325057e-05, "loss": 0.837, "step": 23000 }, { "epoch": 0.83, "learning_rate": 2.9278357787810385e-05, "loss": 0.7686, "step": 23500 }, { "epoch": 0.85, "learning_rate": 2.883747178329571e-05, "loss": 0.7711, "step": 24000 }, { "epoch": 0.86, "learning_rate": 2.839658577878104e-05, "loss": 0.7785, "step": 24500 }, { "epoch": 0.88, "learning_rate": 2.795569977426637e-05, "loss": 0.7949, "step": 25000 }, { "epoch": 0.9, "learning_rate": 2.7514813769751696e-05, "loss": 0.7737, "step": 25500 }, { "epoch": 0.92, "learning_rate": 2.7073927765237024e-05, "loss": 0.7719, "step": 26000 }, { "epoch": 0.93, "learning_rate": 2.663304176072235e-05, "loss": 0.7666, "step": 26500 }, { "epoch": 0.95, "learning_rate": 2.6192155756207674e-05, "loss": 0.7678, "step": 27000 }, { "epoch": 0.97, "learning_rate": 2.5751269751693003e-05, "loss": 0.8034, "step": 27500 }, { "epoch": 0.99, "learning_rate": 2.5310383747178328e-05, "loss": 0.748, "step": 28000 }, { "epoch": 1.0, "eval_accuracy": 0.6928090453147888, "eval_loss": 0.7356609106063843, "eval_runtime": 352.934, "eval_samples_per_second": 66.945, "eval_steps_per_second": 11.158, "step": 28352 }, { "epoch": 1.01, "learning_rate": 2.486949774266366e-05, "loss": 0.7526, "step": 28500 }, { "epoch": 1.02, "learning_rate": 2.4428611738148985e-05, "loss": 0.6793, "step": 29000 }, { "epoch": 1.04, "learning_rate": 2.398772573363431e-05, "loss": 0.7199, "step": 29500 }, { "epoch": 1.06, "learning_rate": 2.3546839729119642e-05, "loss": 0.6832, "step": 30000 }, { "epoch": 1.08, "learning_rate": 2.3105953724604967e-05, "loss": 0.6787, "step": 30500 }, { "epoch": 1.09, "learning_rate": 2.2665067720090295e-05, "loss": 0.6795, "step": 31000 }, { "epoch": 1.11, "learning_rate": 2.222418171557562e-05, "loss": 0.6592, "step": 31500 }, { "epoch": 1.13, "learning_rate": 2.178329571106095e-05, "loss": 0.6692, "step": 32000 }, { "epoch": 1.15, "learning_rate": 2.1342409706546277e-05, "loss": 0.6884, "step": 32500 }, { "epoch": 1.16, "learning_rate": 2.0901523702031602e-05, "loss": 0.6915, "step": 33000 }, { "epoch": 1.18, "learning_rate": 2.046063769751693e-05, "loss": 0.659, "step": 33500 }, { "epoch": 1.2, "learning_rate": 2.001975169300226e-05, "loss": 0.6741, "step": 34000 }, { "epoch": 1.22, "learning_rate": 1.9578865688487584e-05, "loss": 0.6489, "step": 34500 }, { "epoch": 1.23, "learning_rate": 1.9137979683972913e-05, "loss": 0.6568, "step": 35000 }, { "epoch": 1.25, "learning_rate": 1.869709367945824e-05, "loss": 0.6982, "step": 35500 }, { "epoch": 1.27, "learning_rate": 1.8256207674943566e-05, "loss": 0.6673, "step": 36000 }, { "epoch": 1.29, "learning_rate": 1.7815321670428895e-05, "loss": 0.675, "step": 36500 }, { "epoch": 1.31, "learning_rate": 1.737443566591422e-05, "loss": 0.6711, "step": 37000 }, { "epoch": 1.32, "learning_rate": 1.693354966139955e-05, "loss": 0.6643, "step": 37500 }, { "epoch": 1.34, "learning_rate": 1.6492663656884877e-05, "loss": 0.6788, "step": 38000 }, { "epoch": 1.36, "learning_rate": 1.6051777652370205e-05, "loss": 0.6976, "step": 38500 }, { "epoch": 1.38, "learning_rate": 1.561089164785553e-05, "loss": 0.6543, "step": 39000 }, { "epoch": 1.39, "learning_rate": 1.5170005643340859e-05, "loss": 0.6651, "step": 39500 }, { "epoch": 1.41, "learning_rate": 1.4729119638826185e-05, "loss": 0.6512, "step": 40000 }, { "epoch": 1.43, "learning_rate": 1.4288233634311512e-05, "loss": 0.6417, "step": 40500 }, { "epoch": 1.45, "learning_rate": 1.3847347629796839e-05, "loss": 0.662, "step": 41000 }, { "epoch": 1.46, "learning_rate": 1.3406461625282169e-05, "loss": 0.6715, "step": 41500 }, { "epoch": 1.48, "learning_rate": 1.2965575620767496e-05, "loss": 0.6519, "step": 42000 }, { "epoch": 1.5, "learning_rate": 1.2524689616252821e-05, "loss": 0.627, "step": 42500 }, { "epoch": 1.52, "learning_rate": 1.208380361173815e-05, "loss": 0.6357, "step": 43000 }, { "epoch": 1.53, "learning_rate": 1.1642917607223478e-05, "loss": 0.6612, "step": 43500 }, { "epoch": 1.55, "learning_rate": 1.1202031602708805e-05, "loss": 0.688, "step": 44000 }, { "epoch": 1.57, "learning_rate": 1.0761145598194131e-05, "loss": 0.6556, "step": 44500 }, { "epoch": 1.59, "learning_rate": 1.0320259593679458e-05, "loss": 0.6514, "step": 45000 }, { "epoch": 1.6, "learning_rate": 9.879373589164786e-06, "loss": 0.6476, "step": 45500 }, { "epoch": 1.62, "learning_rate": 9.438487584650113e-06, "loss": 0.6519, "step": 46000 }, { "epoch": 1.64, "learning_rate": 8.997601580135442e-06, "loss": 0.612, "step": 46500 }, { "epoch": 1.66, "learning_rate": 8.556715575620767e-06, "loss": 0.6314, "step": 47000 }, { "epoch": 1.68, "learning_rate": 8.115829571106095e-06, "loss": 0.6279, "step": 47500 }, { "epoch": 1.69, "learning_rate": 7.674943566591422e-06, "loss": 0.6498, "step": 48000 }, { "epoch": 1.71, "learning_rate": 7.23405756207675e-06, "loss": 0.6608, "step": 48500 }, { "epoch": 1.73, "learning_rate": 6.793171557562076e-06, "loss": 0.645, "step": 49000 }, { "epoch": 1.75, "learning_rate": 6.352285553047405e-06, "loss": 0.6612, "step": 49500 }, { "epoch": 1.76, "learning_rate": 5.9113995485327315e-06, "loss": 0.6263, "step": 50000 }, { "epoch": 1.78, "learning_rate": 5.470513544018059e-06, "loss": 0.6374, "step": 50500 }, { "epoch": 1.8, "learning_rate": 5.029627539503387e-06, "loss": 0.6385, "step": 51000 }, { "epoch": 1.82, "learning_rate": 4.5887415349887135e-06, "loss": 0.6362, "step": 51500 }, { "epoch": 1.83, "learning_rate": 4.147855530474041e-06, "loss": 0.6342, "step": 52000 }, { "epoch": 1.85, "learning_rate": 3.7069695259593683e-06, "loss": 0.6714, "step": 52500 }, { "epoch": 1.87, "learning_rate": 3.2660835214446955e-06, "loss": 0.6355, "step": 53000 }, { "epoch": 1.89, "learning_rate": 2.8251975169300226e-06, "loss": 0.6151, "step": 53500 }, { "epoch": 1.9, "learning_rate": 2.38431151241535e-06, "loss": 0.6461, "step": 54000 }, { "epoch": 1.92, "learning_rate": 1.943425507900677e-06, "loss": 0.6533, "step": 54500 }, { "epoch": 1.94, "learning_rate": 1.5025395033860046e-06, "loss": 0.6376, "step": 55000 }, { "epoch": 1.96, "learning_rate": 1.0616534988713318e-06, "loss": 0.5976, "step": 55500 }, { "epoch": 1.98, "learning_rate": 6.207674943566591e-07, "loss": 0.6318, "step": 56000 }, { "epoch": 1.99, "learning_rate": 1.7988148984198645e-07, "loss": 0.6471, "step": 56500 } ], "max_steps": 56704, "num_train_epochs": 2, "total_flos": 7.589219989624102e+16, "trial_name": null, "trial_params": null }