{ "best_metric": 1.05314302444458, "best_model_checkpoint": "th_cl_33epochs_lora_pos_neg/checkpoint-32", "epoch": 5.0, "eval_steps": 500, "global_step": 160, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.3125, "grad_norm": 8.306446075439453, "learning_rate": 9.375e-05, "loss": 0.164, "step": 10 }, { "epoch": 0.625, "grad_norm": 26.88968849182129, "learning_rate": 8.75e-05, "loss": 0.3455, "step": 20 }, { "epoch": 0.9375, "grad_norm": 10.799789428710938, "learning_rate": 8.125000000000001e-05, "loss": 0.3999, "step": 30 }, { "epoch": 1.0, "eval_accuracy": 0.4888888888888889, "eval_balanced_accuracy": 0.4939271255060729, "eval_loss": 1.05314302444458, "eval_runtime": 88.4283, "eval_samples_per_second": 0.509, "eval_steps_per_second": 0.068, "step": 32 }, { "epoch": 1.25, "grad_norm": 20.668262481689453, "learning_rate": 7.500000000000001e-05, "loss": 0.1668, "step": 40 }, { "epoch": 1.5625, "grad_norm": 19.973417282104492, "learning_rate": 6.875e-05, "loss": 0.1598, "step": 50 }, { "epoch": 1.875, "grad_norm": 21.18987464904785, "learning_rate": 6.25e-05, "loss": 0.2178, "step": 60 }, { "epoch": 2.0, "eval_accuracy": 0.4666666666666667, "eval_balanced_accuracy": 0.4722222222222222, "eval_loss": 1.058489203453064, "eval_runtime": 88.3706, "eval_samples_per_second": 0.509, "eval_steps_per_second": 0.068, "step": 64 }, { "epoch": 2.1875, "grad_norm": 8.074424743652344, "learning_rate": 5.6250000000000005e-05, "loss": 0.1336, "step": 70 }, { "epoch": 2.5, "grad_norm": 7.036731243133545, "learning_rate": 5e-05, "loss": 0.13, "step": 80 }, { "epoch": 2.8125, "grad_norm": 3.6135730743408203, "learning_rate": 4.375e-05, "loss": 0.0794, "step": 90 }, { "epoch": 3.0, "eval_accuracy": 0.5555555555555556, "eval_balanced_accuracy": 0.55, "eval_loss": 1.0634422302246094, "eval_runtime": 88.5007, "eval_samples_per_second": 0.508, "eval_steps_per_second": 0.068, "step": 96 }, { "epoch": 3.125, "grad_norm": 1.4257570505142212, "learning_rate": 3.7500000000000003e-05, "loss": 0.0825, "step": 100 }, { "epoch": 3.4375, "grad_norm": 23.794193267822266, "learning_rate": 3.125e-05, "loss": 0.0622, "step": 110 }, { "epoch": 3.75, "grad_norm": 7.325411796569824, "learning_rate": 2.5e-05, "loss": 0.0271, "step": 120 }, { "epoch": 4.0, "eval_accuracy": 0.5333333333333333, "eval_balanced_accuracy": 0.53, "eval_loss": 1.208985686302185, "eval_runtime": 88.4567, "eval_samples_per_second": 0.509, "eval_steps_per_second": 0.068, "step": 128 }, { "epoch": 4.0625, "grad_norm": 0.5730175971984863, "learning_rate": 1.8750000000000002e-05, "loss": 0.0257, "step": 130 }, { "epoch": 4.375, "grad_norm": 1.101705551147461, "learning_rate": 1.25e-05, "loss": 0.0126, "step": 140 }, { "epoch": 4.6875, "grad_norm": 2.9783823490142822, "learning_rate": 6.25e-06, "loss": 0.0166, "step": 150 }, { "epoch": 5.0, "grad_norm": 9.750654220581055, "learning_rate": 0.0, "loss": 0.0412, "step": 160 }, { "epoch": 5.0, "eval_accuracy": 0.4666666666666667, "eval_balanced_accuracy": 0.47, "eval_loss": 1.2303385734558105, "eval_runtime": 88.5783, "eval_samples_per_second": 0.508, "eval_steps_per_second": 0.068, "step": 160 }, { "epoch": 5.0, "step": 160, "total_flos": 2.696144222158848e+16, "train_loss": 0.12904777154326438, "train_runtime": 8156.5384, "train_samples_per_second": 0.154, "train_steps_per_second": 0.02 } ], "logging_steps": 10, "max_steps": 160, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 500, "total_flos": 2.696144222158848e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }