{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.07155635062611806, "eval_steps": 500, "global_step": 200, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0035778175313059034, "grad_norm": 0.5825825929641724, "learning_rate": 0.00019928443649373882, "loss": 1.248, "num_input_tokens_seen": 6646, "step": 10 }, { "epoch": 0.007155635062611807, "grad_norm": 0.5380188822746277, "learning_rate": 0.00019856887298747765, "loss": 0.5478, "num_input_tokens_seen": 13063, "step": 20 }, { "epoch": 0.01073345259391771, "grad_norm": 0.3872911036014557, "learning_rate": 0.00019785330948121648, "loss": 0.5135, "num_input_tokens_seen": 19512, "step": 30 }, { "epoch": 0.014311270125223614, "grad_norm": 0.4991438686847687, "learning_rate": 0.0001971377459749553, "loss": 0.5092, "num_input_tokens_seen": 26884, "step": 40 }, { "epoch": 0.017889087656529516, "grad_norm": 0.6744784116744995, "learning_rate": 0.0001964221824686941, "loss": 0.4799, "num_input_tokens_seen": 34831, "step": 50 }, { "epoch": 0.02146690518783542, "grad_norm": 0.5413841009140015, "learning_rate": 0.00019570661896243293, "loss": 0.4738, "num_input_tokens_seen": 40074, "step": 60 }, { "epoch": 0.025044722719141325, "grad_norm": 0.33517029881477356, "learning_rate": 0.00019499105545617174, "loss": 0.4907, "num_input_tokens_seen": 47194, "step": 70 }, { "epoch": 0.028622540250447227, "grad_norm": 0.34275758266448975, "learning_rate": 0.00019427549194991057, "loss": 0.4642, "num_input_tokens_seen": 53439, "step": 80 }, { "epoch": 0.03220035778175313, "grad_norm": 0.4074145257472992, "learning_rate": 0.00019355992844364938, "loss": 0.4431, "num_input_tokens_seen": 59366, "step": 90 }, { "epoch": 0.03577817531305903, "grad_norm": 0.372760534286499, "learning_rate": 0.0001928443649373882, "loss": 0.4824, "num_input_tokens_seen": 66414, "step": 100 }, { "epoch": 0.03935599284436494, "grad_norm": 0.35169002413749695, "learning_rate": 0.00019212880143112702, "loss": 0.4863, "num_input_tokens_seen": 73451, "step": 110 }, { "epoch": 0.04293381037567084, "grad_norm": 0.4088020324707031, "learning_rate": 0.00019141323792486585, "loss": 0.4792, "num_input_tokens_seen": 81934, "step": 120 }, { "epoch": 0.046511627906976744, "grad_norm": 0.40062326192855835, "learning_rate": 0.00019069767441860466, "loss": 0.4607, "num_input_tokens_seen": 88335, "step": 130 }, { "epoch": 0.05008944543828265, "grad_norm": 0.5044320225715637, "learning_rate": 0.0001899821109123435, "loss": 0.456, "num_input_tokens_seen": 96192, "step": 140 }, { "epoch": 0.05366726296958855, "grad_norm": 0.4566495418548584, "learning_rate": 0.0001892665474060823, "loss": 0.429, "num_input_tokens_seen": 101609, "step": 150 }, { "epoch": 0.057245080500894455, "grad_norm": 0.4657338559627533, "learning_rate": 0.0001885509838998211, "loss": 0.4445, "num_input_tokens_seen": 107467, "step": 160 }, { "epoch": 0.06082289803220036, "grad_norm": 0.5721924304962158, "learning_rate": 0.00018783542039355994, "loss": 0.4304, "num_input_tokens_seen": 113612, "step": 170 }, { "epoch": 0.06440071556350627, "grad_norm": 0.2883516848087311, "learning_rate": 0.00018711985688729877, "loss": 0.4525, "num_input_tokens_seen": 121416, "step": 180 }, { "epoch": 0.06797853309481217, "grad_norm": 0.5061659216880798, "learning_rate": 0.00018640429338103758, "loss": 0.4439, "num_input_tokens_seen": 128284, "step": 190 }, { "epoch": 0.07155635062611806, "grad_norm": 0.3323754072189331, "learning_rate": 0.00018568872987477638, "loss": 0.4489, "num_input_tokens_seen": 135571, "step": 200 } ], "logging_steps": 10, "max_steps": 2795, "num_input_tokens_seen": 135571, "num_train_epochs": 1, "save_steps": 20, "total_flos": 3048513035655168.0, "train_batch_size": 1, "trial_name": null, "trial_params": null }