{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.007058657443354274, "eval_steps": 3, "global_step": 25, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.000282346297734171, "grad_norm": 15.19815444946289, "learning_rate": 2e-05, "loss": 12.8505, "step": 1 }, { "epoch": 0.000282346297734171, "eval_loss": NaN, "eval_runtime": 129.5074, "eval_samples_per_second": 23.033, "eval_steps_per_second": 11.521, "step": 1 }, { "epoch": 0.000564692595468342, "grad_norm": 14.651299476623535, "learning_rate": 4e-05, "loss": 13.507, "step": 2 }, { "epoch": 0.0008470388932025129, "grad_norm": 17.63684844970703, "learning_rate": 6e-05, "loss": 12.885, "step": 3 }, { "epoch": 0.0008470388932025129, "eval_loss": NaN, "eval_runtime": 127.7833, "eval_samples_per_second": 23.344, "eval_steps_per_second": 11.676, "step": 3 }, { "epoch": 0.001129385190936684, "grad_norm": 18.415891647338867, "learning_rate": 8e-05, "loss": 14.0113, "step": 4 }, { "epoch": 0.0014117314886708547, "grad_norm": 13.788007736206055, "learning_rate": 0.0001, "loss": 11.6638, "step": 5 }, { "epoch": 0.0016940777864050257, "grad_norm": 8.320622444152832, "learning_rate": 0.00012, "loss": 10.1862, "step": 6 }, { "epoch": 0.0016940777864050257, "eval_loss": NaN, "eval_runtime": 128.1603, "eval_samples_per_second": 23.276, "eval_steps_per_second": 11.642, "step": 6 }, { "epoch": 0.0019764240841391965, "grad_norm": 15.466882705688477, "learning_rate": 0.00014, "loss": 16.0399, "step": 7 }, { "epoch": 0.002258770381873368, "grad_norm": 8.012770652770996, "learning_rate": 0.00016, "loss": 8.4435, "step": 8 }, { "epoch": 0.0025411166796075386, "grad_norm": 10.118023872375488, "learning_rate": 0.00018, "loss": 8.8868, "step": 9 }, { "epoch": 0.0025411166796075386, "eval_loss": NaN, "eval_runtime": 127.7698, "eval_samples_per_second": 23.347, "eval_steps_per_second": 11.677, "step": 9 }, { "epoch": 0.0028234629773417094, "grad_norm": 11.526106834411621, "learning_rate": 0.0002, "loss": 7.4083, "step": 10 }, { "epoch": 0.0031058092750758807, "grad_norm": 29.451435089111328, "learning_rate": 0.00019781476007338058, "loss": 11.2583, "step": 11 }, { "epoch": 0.0033881555728100515, "grad_norm": 20.30539321899414, "learning_rate": 0.0001913545457642601, "loss": 6.8495, "step": 12 }, { "epoch": 0.0033881555728100515, "eval_loss": NaN, "eval_runtime": 128.0998, "eval_samples_per_second": 23.287, "eval_steps_per_second": 11.647, "step": 12 }, { "epoch": 0.0036705018705442227, "grad_norm": 15.124122619628906, "learning_rate": 0.00018090169943749476, "loss": 5.6325, "step": 13 }, { "epoch": 0.003952848168278393, "grad_norm": 26.152379989624023, "learning_rate": 0.00016691306063588583, "loss": 5.1915, "step": 14 }, { "epoch": 0.004235194466012565, "grad_norm": 10.615568161010742, "learning_rate": 0.00015000000000000001, "loss": 3.4316, "step": 15 }, { "epoch": 0.004235194466012565, "eval_loss": NaN, "eval_runtime": 127.6488, "eval_samples_per_second": 23.369, "eval_steps_per_second": 11.688, "step": 15 }, { "epoch": 0.004517540763746736, "grad_norm": 17.02305793762207, "learning_rate": 0.00013090169943749476, "loss": 5.1498, "step": 16 }, { "epoch": 0.004799887061480906, "grad_norm": 18.758045196533203, "learning_rate": 0.00011045284632676536, "loss": 2.8793, "step": 17 }, { "epoch": 0.005082233359215077, "grad_norm": 19.13789176940918, "learning_rate": 8.954715367323468e-05, "loss": 3.4841, "step": 18 }, { "epoch": 0.005082233359215077, "eval_loss": NaN, "eval_runtime": 127.5619, "eval_samples_per_second": 23.385, "eval_steps_per_second": 11.696, "step": 18 }, { "epoch": 0.005364579656949248, "grad_norm": 12.120722770690918, "learning_rate": 6.909830056250527e-05, "loss": 2.6428, "step": 19 }, { "epoch": 0.005646925954683419, "grad_norm": 13.740382194519043, "learning_rate": 5.000000000000002e-05, "loss": 1.3224, "step": 20 }, { "epoch": 0.0059292722524175905, "grad_norm": 19.52411460876465, "learning_rate": 3.308693936411421e-05, "loss": 2.2475, "step": 21 }, { "epoch": 0.0059292722524175905, "eval_loss": NaN, "eval_runtime": 127.529, "eval_samples_per_second": 23.391, "eval_steps_per_second": 11.699, "step": 21 }, { "epoch": 0.006211618550151761, "grad_norm": 15.904952049255371, "learning_rate": 1.9098300562505266e-05, "loss": 2.4317, "step": 22 }, { "epoch": 0.006493964847885932, "grad_norm": 14.597325325012207, "learning_rate": 8.645454235739903e-06, "loss": 1.2465, "step": 23 }, { "epoch": 0.006776311145620103, "grad_norm": 13.051502227783203, "learning_rate": 2.1852399266194314e-06, "loss": 0.67, "step": 24 }, { "epoch": 0.006776311145620103, "eval_loss": NaN, "eval_runtime": 127.5334, "eval_samples_per_second": 23.39, "eval_steps_per_second": 11.699, "step": 24 }, { "epoch": 0.007058657443354274, "grad_norm": 14.694950103759766, "learning_rate": 0.0, "loss": 2.2391, "step": 25 } ], "logging_steps": 1, "max_steps": 25, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 6531344184115200.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }