{ "best_metric": null, "best_model_checkpoint": null, "epoch": 4.0, "eval_steps": 500, "global_step": 808, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.25, "grad_norm": 0.4402466118335724, "learning_rate": 0.00019876883405951377, "loss": 1.5784, "step": 50 }, { "epoch": 0.5, "grad_norm": 0.5029518604278564, "learning_rate": 0.00019510565162951537, "loss": 1.4446, "step": 100 }, { "epoch": 0.74, "grad_norm": 0.5096282958984375, "learning_rate": 0.0001891006524188368, "loss": 1.4047, "step": 150 }, { "epoch": 0.99, "grad_norm": 0.508622407913208, "learning_rate": 0.00018090169943749476, "loss": 1.3806, "step": 200 }, { "epoch": 1.24, "grad_norm": 0.6428921222686768, "learning_rate": 0.00017071067811865476, "loss": 1.2506, "step": 250 }, { "epoch": 1.49, "grad_norm": 0.6709182262420654, "learning_rate": 0.00015877852522924732, "loss": 1.254, "step": 300 }, { "epoch": 1.73, "grad_norm": 0.6514674425125122, "learning_rate": 0.00014539904997395468, "loss": 1.2417, "step": 350 }, { "epoch": 1.98, "grad_norm": 0.7174981236457825, "learning_rate": 0.00013090169943749476, "loss": 1.252, "step": 400 }, { "epoch": 2.23, "grad_norm": 0.8790501952171326, "learning_rate": 0.0001156434465040231, "loss": 1.0917, "step": 450 }, { "epoch": 2.48, "grad_norm": 0.9474716186523438, "learning_rate": 0.0001, "loss": 1.072, "step": 500 }, { "epoch": 2.72, "grad_norm": 0.86595219373703, "learning_rate": 8.435655349597689e-05, "loss": 1.0909, "step": 550 }, { "epoch": 2.97, "grad_norm": 0.8805217742919922, "learning_rate": 6.909830056250527e-05, "loss": 1.0807, "step": 600 }, { "epoch": 3.22, "grad_norm": 0.9318429231643677, "learning_rate": 5.4600950026045326e-05, "loss": 0.9672, "step": 650 }, { "epoch": 3.47, "grad_norm": 1.0167059898376465, "learning_rate": 4.12214747707527e-05, "loss": 0.9215, "step": 700 }, { "epoch": 3.71, "grad_norm": 1.0535578727722168, "learning_rate": 2.9289321881345254e-05, "loss": 0.9481, "step": 750 }, { "epoch": 3.96, "grad_norm": 1.0781499147415161, "learning_rate": 1.9098300562505266e-05, "loss": 0.9456, "step": 800 } ], "logging_steps": 50, "max_steps": 1000, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 1.051029971828736e+16, "train_batch_size": 7, "trial_name": null, "trial_params": null }