{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.0, "eval_steps": 5, "global_step": 32, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0625, "grad_norm": 1.8187159299850464, "learning_rate": 0.00019375000000000002, "loss": 10.0878, "step": 1 }, { "epoch": 0.125, "grad_norm": 1.9498610496520996, "learning_rate": 0.0001875, "loss": 9.9916, "step": 2 }, { "epoch": 0.1875, "grad_norm": 2.7474117279052734, "learning_rate": 0.00018125000000000001, "loss": 9.7044, "step": 3 }, { "epoch": 0.25, "grad_norm": 3.391249418258667, "learning_rate": 0.000175, "loss": 8.9738, "step": 4 }, { "epoch": 0.3125, "grad_norm": 3.6622118949890137, "learning_rate": 0.00016875, "loss": 8.2705, "step": 5 }, { "epoch": 0.3125, "eval_clap": 0.31716692447662354, "eval_loss": 3.0108680725097656, "eval_runtime": 265.8708, "eval_samples_per_second": 0.03, "eval_steps_per_second": 0.03, "step": 5 }, { "epoch": 0.375, "grad_norm": 4.368948936462402, "learning_rate": 0.00016250000000000002, "loss": 7.8107, "step": 6 }, { "epoch": 0.4375, "grad_norm": 6.55327033996582, "learning_rate": 0.00015625, "loss": 7.507, "step": 7 }, { "epoch": 0.5, "grad_norm": 5.326369762420654, "learning_rate": 0.00015000000000000001, "loss": 7.0676, "step": 8 }, { "epoch": 0.5625, "grad_norm": 3.2963500022888184, "learning_rate": 0.00014375, "loss": 7.0271, "step": 9 }, { "epoch": 0.625, "grad_norm": 2.0462214946746826, "learning_rate": 0.0001375, "loss": 6.4565, "step": 10 }, { "epoch": 0.625, "eval_clap": 0.20497097074985504, "eval_loss": 3.3557252883911133, "eval_runtime": 503.1747, "eval_samples_per_second": 0.016, "eval_steps_per_second": 0.016, "step": 10 }, { "epoch": 0.6875, "grad_norm": 2.9796876907348633, "learning_rate": 0.00013125000000000002, "loss": 7.025, "step": 11 }, { "epoch": 0.75, "grad_norm": 1.7379363775253296, "learning_rate": 0.000125, "loss": 6.4613, "step": 12 }, { "epoch": 0.8125, "grad_norm": 2.6363821029663086, "learning_rate": 0.00011875, "loss": 6.8828, "step": 13 }, { "epoch": 0.875, "grad_norm": 1.376232624053955, "learning_rate": 0.00011250000000000001, "loss": 6.3572, "step": 14 }, { "epoch": 0.9375, "grad_norm": 1.8433048725128174, "learning_rate": 0.00010625000000000001, "loss": 6.0972, "step": 15 }, { "epoch": 0.9375, "eval_clap": 0.12289813160896301, "eval_loss": 4.520351409912109, "eval_runtime": 513.434, "eval_samples_per_second": 0.016, "eval_steps_per_second": 0.016, "step": 15 }, { "epoch": 1.0, "grad_norm": 2.2514262199401855, "learning_rate": 0.0001, "loss": 6.3686, "step": 16 }, { "epoch": 1.0625, "grad_norm": 0.9838012456893921, "learning_rate": 9.375e-05, "loss": 6.2712, "step": 17 }, { "epoch": 1.125, "grad_norm": 0.983974277973175, "learning_rate": 8.75e-05, "loss": 6.1015, "step": 18 }, { "epoch": 1.1875, "grad_norm": 0.8950835466384888, "learning_rate": 8.125000000000001e-05, "loss": 6.1462, "step": 19 }, { "epoch": 1.25, "grad_norm": 1.661649465560913, "learning_rate": 7.500000000000001e-05, "loss": 6.3825, "step": 20 }, { "epoch": 1.25, "eval_clap": 0.11188629269599915, "eval_loss": 4.979422092437744, "eval_runtime": 516.4364, "eval_samples_per_second": 0.015, "eval_steps_per_second": 0.015, "step": 20 }, { "epoch": 1.3125, "grad_norm": 1.2889989614486694, "learning_rate": 6.875e-05, "loss": 6.2617, "step": 21 }, { "epoch": 1.375, "grad_norm": 0.9493317008018494, "learning_rate": 6.25e-05, "loss": 5.9844, "step": 22 }, { "epoch": 1.4375, "grad_norm": 0.9931596517562866, "learning_rate": 5.6250000000000005e-05, "loss": 6.2795, "step": 23 }, { "epoch": 1.5, "grad_norm": 1.8261293172836304, "learning_rate": 5e-05, "loss": 5.7055, "step": 24 }, { "epoch": 1.5625, "grad_norm": 1.0377460718154907, "learning_rate": 4.375e-05, "loss": 6.3178, "step": 25 }, { "epoch": 1.5625, "eval_clap": 0.09158371388912201, "eval_loss": 5.108123779296875, "eval_runtime": 516.4832, "eval_samples_per_second": 0.015, "eval_steps_per_second": 0.015, "step": 25 }, { "epoch": 1.625, "grad_norm": 1.5140869617462158, "learning_rate": 3.7500000000000003e-05, "loss": 6.4832, "step": 26 }, { "epoch": 1.6875, "grad_norm": 0.9569382667541504, "learning_rate": 3.125e-05, "loss": 6.3163, "step": 27 }, { "epoch": 1.75, "grad_norm": 1.586517095565796, "learning_rate": 2.5e-05, "loss": 6.2888, "step": 28 }, { "epoch": 1.8125, "grad_norm": 1.238013744354248, "learning_rate": 1.8750000000000002e-05, "loss": 6.1361, "step": 29 }, { "epoch": 1.875, "grad_norm": 1.0142874717712402, "learning_rate": 1.25e-05, "loss": 6.1301, "step": 30 }, { "epoch": 1.875, "eval_clap": 0.08499659597873688, "eval_loss": 5.103597640991211, "eval_runtime": 424.9013, "eval_samples_per_second": 0.019, "eval_steps_per_second": 0.019, "step": 30 }, { "epoch": 1.9375, "grad_norm": 0.9664702415466309, "learning_rate": 6.25e-06, "loss": 6.3296, "step": 31 }, { "epoch": 2.0, "grad_norm": 1.8057743310928345, "learning_rate": 0.0, "loss": 6.544, "step": 32 }, { "epoch": 2.0, "step": 32, "total_flos": 152209974742572.0, "train_loss": 6.930231094360352, "train_runtime": 3130.5046, "train_samples_per_second": 0.077, "train_steps_per_second": 0.01 } ], "logging_steps": 1.0, "max_steps": 32, "num_input_tokens_seen": 0, "num_train_epochs": 2, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": false, "should_training_stop": false }, "attributes": {} } }, "total_flos": 152209974742572.0, "train_batch_size": 8, "trial_name": null, "trial_params": null }