{ "best_metric": 1.1404521465301514, "best_model_checkpoint": "./checkpoints/dpo-mix-7k/phi-2-dpo-mix-7k-ORPO-16-20-9/checkpoint-1232", "epoch": 1.0, "eval_steps": 500, "global_step": 1232, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.04, "grad_norm": 140.0, "learning_rate": 2.5e-06, "loss": 9.455, "step": 50 }, { "epoch": 0.08, "grad_norm": 95.0, "learning_rate": 5e-06, "loss": 7.3937, "step": 100 }, { "epoch": 0.12, "grad_norm": 19.0, "learning_rate": 4.975969658958691e-06, "loss": 1.5555, "step": 150 }, { "epoch": 0.16, "grad_norm": 7.90625, "learning_rate": 4.904340601667208e-06, "loss": 1.2716, "step": 200 }, { "epoch": 0.2, "grad_norm": 9.375, "learning_rate": 4.786489844665701e-06, "loss": 1.1798, "step": 250 }, { "epoch": 0.24, "grad_norm": 38.75, "learning_rate": 4.624682983060346e-06, "loss": 1.2401, "step": 300 }, { "epoch": 0.28, "grad_norm": 6.15625, "learning_rate": 4.422030636104903e-06, "loss": 1.2003, "step": 350 }, { "epoch": 0.32, "grad_norm": 7.46875, "learning_rate": 4.182428647807503e-06, "loss": 1.1913, "step": 400 }, { "epoch": 0.37, "grad_norm": 19.125, "learning_rate": 3.910483192162515e-06, "loss": 1.1505, "step": 450 }, { "epoch": 0.41, "grad_norm": 10.375, "learning_rate": 3.6114222228049657e-06, "loss": 1.1917, "step": 500 }, { "epoch": 0.45, "grad_norm": 10.3125, "learning_rate": 3.2909949694035004e-06, "loss": 1.2068, "step": 550 }, { "epoch": 0.49, "grad_norm": 34.5, "learning_rate": 2.9553614129006543e-06, "loss": 1.1559, "step": 600 }, { "epoch": 0.53, "grad_norm": 7.40625, "learning_rate": 2.610973864358563e-06, "loss": 1.2084, "step": 650 }, { "epoch": 0.57, "grad_norm": 5.25, "learning_rate": 2.2644529239707054e-06, "loss": 1.1095, "step": 700 }, { "epoch": 0.61, "grad_norm": 11.25, "learning_rate": 1.9224602048374618e-06, "loss": 1.127, "step": 750 }, { "epoch": 0.65, "grad_norm": 50.0, "learning_rate": 1.5915702682983657e-06, "loss": 1.1861, "step": 800 }, { "epoch": 0.69, "grad_norm": 9.1875, "learning_rate": 1.278144232771154e-06, "loss": 1.2159, "step": 850 }, { "epoch": 0.73, "grad_norm": 12.3125, "learning_rate": 9.88207485875784e-07, "loss": 1.1351, "step": 900 }, { "epoch": 0.77, "grad_norm": 12.5625, "learning_rate": 7.273338507388969e-07, "loss": 1.1951, "step": 950 }, { "epoch": 0.81, "grad_norm": 11.3125, "learning_rate": 5.005384332973154e-07, "loss": 1.1518, "step": 1000 }, { "epoch": 0.85, "grad_norm": 5.34375, "learning_rate": 3.121812105332203e-07, "loss": 1.1758, "step": 1050 }, { "epoch": 0.89, "grad_norm": 30.0, "learning_rate": 1.6588321308710409e-07, "loss": 1.1458, "step": 1100 }, { "epoch": 0.93, "grad_norm": 7.53125, "learning_rate": 6.44569135767989e-08, "loss": 1.2038, "step": 1150 }, { "epoch": 0.97, "grad_norm": 5.5, "learning_rate": 9.852158856538118e-09, "loss": 1.2168, "step": 1200 }, { "epoch": 1.0, "eval_loss": 1.1404521465301514, "eval_runtime": 2329.2969, "eval_samples_per_second": 2.115, "eval_steps_per_second": 0.529, "step": 1232 } ], "logging_steps": 50, "max_steps": 1232, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 500, "total_flos": 1.6032229924379034e+17, "train_batch_size": 4, "trial_name": null, "trial_params": null }