|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 8.04289544235925, |
|
"eval_steps": 500, |
|
"global_step": 3000, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.7321488261222839, |
|
"eval_loss": 0.5215846300125122, |
|
"eval_runtime": 9.3633, |
|
"eval_samples_per_second": 318.585, |
|
"eval_steps_per_second": 19.972, |
|
"step": 373 |
|
}, |
|
{ |
|
"epoch": 1.3404825737265416, |
|
"grad_norm": 13.217938423156738, |
|
"learning_rate": 2.418286940875578e-06, |
|
"loss": 0.5967, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7723767757415771, |
|
"eval_loss": 0.45723703503608704, |
|
"eval_runtime": 9.2866, |
|
"eval_samples_per_second": 321.216, |
|
"eval_steps_per_second": 20.137, |
|
"step": 746 |
|
}, |
|
{ |
|
"epoch": 2.680965147453083, |
|
"grad_norm": 15.163392066955566, |
|
"learning_rate": 2.043939117210628e-06, |
|
"loss": 0.4683, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.796513557434082, |
|
"eval_loss": 0.4389999508857727, |
|
"eval_runtime": 9.2689, |
|
"eval_samples_per_second": 321.829, |
|
"eval_steps_per_second": 20.175, |
|
"step": 1119 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8055648803710938, |
|
"eval_loss": 0.45021912455558777, |
|
"eval_runtime": 9.4524, |
|
"eval_samples_per_second": 315.58, |
|
"eval_steps_per_second": 19.783, |
|
"step": 1492 |
|
}, |
|
{ |
|
"epoch": 4.021447721179625, |
|
"grad_norm": 33.55284118652344, |
|
"learning_rate": 1.6695912935456774e-06, |
|
"loss": 0.3783, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.8079115152359009, |
|
"eval_loss": 0.4486864507198334, |
|
"eval_runtime": 9.329, |
|
"eval_samples_per_second": 319.756, |
|
"eval_steps_per_second": 20.045, |
|
"step": 1865 |
|
}, |
|
{ |
|
"epoch": 5.361930294906166, |
|
"grad_norm": 17.95193862915039, |
|
"learning_rate": 1.2952434698807272e-06, |
|
"loss": 0.3049, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.8139457106590271, |
|
"eval_loss": 0.44699645042419434, |
|
"eval_runtime": 9.3232, |
|
"eval_samples_per_second": 319.955, |
|
"eval_steps_per_second": 20.058, |
|
"step": 2238 |
|
}, |
|
{ |
|
"epoch": 6.702412868632708, |
|
"grad_norm": 19.94598388671875, |
|
"learning_rate": 9.208956462157772e-07, |
|
"loss": 0.2663, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.8162923455238342, |
|
"eval_loss": 0.4586087167263031, |
|
"eval_runtime": 9.3905, |
|
"eval_samples_per_second": 317.661, |
|
"eval_steps_per_second": 19.914, |
|
"step": 2611 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.8169627785682678, |
|
"eval_loss": 0.46715453267097473, |
|
"eval_runtime": 9.458, |
|
"eval_samples_per_second": 315.395, |
|
"eval_steps_per_second": 19.772, |
|
"step": 2984 |
|
}, |
|
{ |
|
"epoch": 8.04289544235925, |
|
"grad_norm": 21.875141143798828, |
|
"learning_rate": 5.465478225508271e-07, |
|
"loss": 0.2394, |
|
"step": 3000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 3730, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 1.6826244219836316e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": { |
|
"learning_rate": 2.7926347645405278e-06, |
|
"per_device_train_batch_size": 32 |
|
} |
|
} |
|
|