{ "best_metric": null, "best_model_checkpoint": null, "epoch": 5.0, "eval_steps": 500, "global_step": 1560, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0, "grad_norm": 134.19052124023438, "learning_rate": 4.996794871794872e-05, "loss": 9.1177, "step": 1 }, { "epoch": 0.1, "grad_norm": 46.553192138671875, "learning_rate": 4.9006410256410256e-05, "loss": 2.6116, "step": 31 }, { "epoch": 0.2, "grad_norm": 17.68166732788086, "learning_rate": 4.8012820512820516e-05, "loss": 0.8879, "step": 62 }, { "epoch": 0.3, "grad_norm": 17.172128677368164, "learning_rate": 4.701923076923077e-05, "loss": 1.0567, "step": 93 }, { "epoch": 0.4, "grad_norm": 104.65974426269531, "learning_rate": 4.602564102564102e-05, "loss": 0.9872, "step": 124 }, { "epoch": 0.5, "grad_norm": 16.036008834838867, "learning_rate": 4.503205128205128e-05, "loss": 0.9717, "step": 155 }, { "epoch": 0.6, "grad_norm": 27.394895553588867, "learning_rate": 4.403846153846154e-05, "loss": 0.8679, "step": 186 }, { "epoch": 0.7, "grad_norm": 37.157169342041016, "learning_rate": 4.30448717948718e-05, "loss": 0.8257, "step": 217 }, { "epoch": 0.79, "grad_norm": 8.90817928314209, "learning_rate": 4.205128205128206e-05, "loss": 0.8604, "step": 248 }, { "epoch": 0.89, "grad_norm": 13.38546085357666, "learning_rate": 4.105769230769231e-05, "loss": 0.8587, "step": 279 }, { "epoch": 0.99, "grad_norm": 32.03287124633789, "learning_rate": 4.006410256410257e-05, "loss": 0.8512, "step": 310 }, { "epoch": 1.09, "grad_norm": 20.29245376586914, "learning_rate": 3.9070512820512824e-05, "loss": 0.6728, "step": 341 }, { "epoch": 1.19, "grad_norm": 10.910124778747559, "learning_rate": 3.807692307692308e-05, "loss": 0.611, "step": 372 }, { "epoch": 1.29, "grad_norm": 14.703102111816406, "learning_rate": 3.708333333333334e-05, "loss": 0.6468, "step": 403 }, { "epoch": 1.39, "grad_norm": 9.841747283935547, "learning_rate": 3.608974358974359e-05, "loss": 0.5327, "step": 434 }, { "epoch": 1.49, "grad_norm": 15.09598445892334, "learning_rate": 3.5096153846153845e-05, "loss": 0.5669, "step": 465 }, { "epoch": 1.59, "grad_norm": 18.652040481567383, "learning_rate": 3.4102564102564105e-05, "loss": 0.4854, "step": 496 }, { "epoch": 1.69, "grad_norm": 8.733405113220215, "learning_rate": 3.310897435897436e-05, "loss": 0.5627, "step": 527 }, { "epoch": 1.79, "grad_norm": 21.200965881347656, "learning_rate": 3.211538461538462e-05, "loss": 0.5001, "step": 558 }, { "epoch": 1.89, "grad_norm": 7.368778228759766, "learning_rate": 3.112179487179487e-05, "loss": 0.4897, "step": 589 }, { "epoch": 1.99, "grad_norm": 16.03817367553711, "learning_rate": 3.012820512820513e-05, "loss": 0.5294, "step": 620 }, { "epoch": 2.09, "grad_norm": 35.705631256103516, "learning_rate": 2.913461538461539e-05, "loss": 0.4197, "step": 651 }, { "epoch": 2.19, "grad_norm": 10.462698936462402, "learning_rate": 2.8141025641025643e-05, "loss": 0.3589, "step": 682 }, { "epoch": 2.29, "grad_norm": 7.011005878448486, "learning_rate": 2.7147435897435896e-05, "loss": 0.4396, "step": 713 }, { "epoch": 2.38, "grad_norm": 4.32899284362793, "learning_rate": 2.6153846153846157e-05, "loss": 0.4604, "step": 744 }, { "epoch": 2.48, "grad_norm": 5.010735988616943, "learning_rate": 2.516025641025641e-05, "loss": 0.3764, "step": 775 }, { "epoch": 2.58, "grad_norm": 6.4428582191467285, "learning_rate": 2.4166666666666667e-05, "loss": 0.3035, "step": 806 }, { "epoch": 2.68, "grad_norm": 11.917937278747559, "learning_rate": 2.3173076923076924e-05, "loss": 0.3287, "step": 837 }, { "epoch": 2.78, "grad_norm": 18.710346221923828, "learning_rate": 2.217948717948718e-05, "loss": 0.3209, "step": 868 }, { "epoch": 2.88, "grad_norm": 5.8296990394592285, "learning_rate": 2.1185897435897437e-05, "loss": 0.329, "step": 899 }, { "epoch": 2.98, "grad_norm": 11.532828330993652, "learning_rate": 2.0192307692307694e-05, "loss": 0.2934, "step": 930 }, { "epoch": 3.08, "grad_norm": 15.149397850036621, "learning_rate": 1.919871794871795e-05, "loss": 0.2682, "step": 961 }, { "epoch": 3.18, "grad_norm": 3.612698793411255, "learning_rate": 1.8205128205128204e-05, "loss": 0.264, "step": 992 }, { "epoch": 3.28, "grad_norm": 1.7986979484558105, "learning_rate": 1.721153846153846e-05, "loss": 0.2085, "step": 1023 }, { "epoch": 3.38, "grad_norm": 3.6060192584991455, "learning_rate": 1.6217948717948718e-05, "loss": 0.247, "step": 1054 }, { "epoch": 3.48, "grad_norm": 2.7116451263427734, "learning_rate": 1.5224358974358973e-05, "loss": 0.2104, "step": 1085 }, { "epoch": 3.58, "grad_norm": 4.838766574859619, "learning_rate": 1.423076923076923e-05, "loss": 0.234, "step": 1116 }, { "epoch": 3.68, "grad_norm": 2.237657070159912, "learning_rate": 1.3237179487179489e-05, "loss": 0.2033, "step": 1147 }, { "epoch": 3.78, "grad_norm": 1.6461944580078125, "learning_rate": 1.2243589743589744e-05, "loss": 0.2098, "step": 1178 }, { "epoch": 3.88, "grad_norm": 6.327276229858398, "learning_rate": 1.125e-05, "loss": 0.2073, "step": 1209 }, { "epoch": 3.97, "grad_norm": 2.9778146743774414, "learning_rate": 1.0256410256410256e-05, "loss": 0.1988, "step": 1240 }, { "epoch": 4.07, "grad_norm": 1.4347281455993652, "learning_rate": 9.262820512820514e-06, "loss": 0.1664, "step": 1271 }, { "epoch": 4.17, "grad_norm": 2.844505786895752, "learning_rate": 8.26923076923077e-06, "loss": 0.1529, "step": 1302 }, { "epoch": 4.27, "grad_norm": 1.985013723373413, "learning_rate": 7.275641025641026e-06, "loss": 0.1447, "step": 1333 }, { "epoch": 4.37, "grad_norm": 2.9127843379974365, "learning_rate": 6.282051282051282e-06, "loss": 0.1375, "step": 1364 }, { "epoch": 4.47, "grad_norm": 2.6174566745758057, "learning_rate": 5.288461538461538e-06, "loss": 0.1515, "step": 1395 }, { "epoch": 4.57, "grad_norm": 1.2411088943481445, "learning_rate": 4.294871794871795e-06, "loss": 0.1408, "step": 1426 }, { "epoch": 4.67, "grad_norm": 1.8333454132080078, "learning_rate": 3.3012820512820517e-06, "loss": 0.1372, "step": 1457 }, { "epoch": 4.77, "grad_norm": 1.785672903060913, "learning_rate": 2.307692307692308e-06, "loss": 0.1409, "step": 1488 }, { "epoch": 4.87, "grad_norm": 3.533236026763916, "learning_rate": 1.3141025641025643e-06, "loss": 0.1276, "step": 1519 }, { "epoch": 4.97, "grad_norm": 1.3145009279251099, "learning_rate": 3.205128205128205e-07, "loss": 0.1329, "step": 1550 }, { "epoch": 5.0, "step": 1560, "total_flos": 1.845867535870722e+19, "train_loss": 0.4759287901413746, "train_runtime": 3877.7535, "train_samples_per_second": 3.216, "train_steps_per_second": 0.402 } ], "logging_steps": 31, "max_steps": 1560, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 1.845867535870722e+19, "train_batch_size": 8, "trial_name": null, "trial_params": null }