adapters-mistral-bnb8-QLORA-super_glue-wic
/
trainer_state-mistral-bnb8-QLORA-super_glue-wic-sequence_classification.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 6.4, | |
"eval_steps": 1, | |
"global_step": 10, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.64, | |
"grad_norm": 436.4122314453125, | |
"learning_rate": 2.5e-05, | |
"loss": 5.8587, | |
"step": 1 | |
}, | |
{ | |
"epoch": 0.64, | |
"eval_accuracy": 0.516, | |
"eval_loss": 5.887912750244141, | |
"eval_runtime": 13.1544, | |
"eval_samples_per_second": 19.005, | |
"eval_steps_per_second": 0.988, | |
"step": 1 | |
}, | |
{ | |
"epoch": 1.28, | |
"grad_norm": 448.7404479980469, | |
"learning_rate": 5e-05, | |
"loss": 5.979, | |
"step": 2 | |
}, | |
{ | |
"epoch": 1.28, | |
"eval_accuracy": 0.484, | |
"eval_loss": 3.829352378845215, | |
"eval_runtime": 13.1532, | |
"eval_samples_per_second": 19.007, | |
"eval_steps_per_second": 0.988, | |
"step": 2 | |
}, | |
{ | |
"epoch": 1.92, | |
"grad_norm": 345.4527282714844, | |
"learning_rate": 4.375e-05, | |
"loss": 3.48, | |
"step": 3 | |
}, | |
{ | |
"epoch": 1.92, | |
"eval_accuracy": 0.616, | |
"eval_loss": 0.9900831580162048, | |
"eval_runtime": 13.1349, | |
"eval_samples_per_second": 19.033, | |
"eval_steps_per_second": 0.99, | |
"step": 3 | |
}, | |
{ | |
"epoch": 2.56, | |
"grad_norm": 34.28495407104492, | |
"learning_rate": 3.7500000000000003e-05, | |
"loss": 0.9253, | |
"step": 4 | |
}, | |
{ | |
"epoch": 2.56, | |
"eval_accuracy": 0.484, | |
"eval_loss": 2.8530449867248535, | |
"eval_runtime": 13.1214, | |
"eval_samples_per_second": 19.053, | |
"eval_steps_per_second": 0.991, | |
"step": 4 | |
}, | |
{ | |
"epoch": 3.2, | |
"grad_norm": 369.2248229980469, | |
"learning_rate": 3.125e-05, | |
"loss": 2.7248, | |
"step": 5 | |
}, | |
{ | |
"epoch": 3.2, | |
"eval_accuracy": 0.484, | |
"eval_loss": 0.8809528946876526, | |
"eval_runtime": 13.1143, | |
"eval_samples_per_second": 19.063, | |
"eval_steps_per_second": 0.991, | |
"step": 5 | |
}, | |
{ | |
"epoch": 3.84, | |
"grad_norm": 57.836917877197266, | |
"learning_rate": 2.5e-05, | |
"loss": 0.8653, | |
"step": 6 | |
}, | |
{ | |
"epoch": 3.84, | |
"eval_accuracy": 0.516, | |
"eval_loss": 2.1503427028656006, | |
"eval_runtime": 13.1024, | |
"eval_samples_per_second": 19.081, | |
"eval_steps_per_second": 0.992, | |
"step": 6 | |
}, | |
{ | |
"epoch": 4.48, | |
"grad_norm": 308.1085205078125, | |
"learning_rate": 1.8750000000000002e-05, | |
"loss": 2.14, | |
"step": 7 | |
}, | |
{ | |
"epoch": 4.48, | |
"eval_accuracy": 0.516, | |
"eval_loss": 1.6527570486068726, | |
"eval_runtime": 13.106, | |
"eval_samples_per_second": 19.075, | |
"eval_steps_per_second": 0.992, | |
"step": 7 | |
}, | |
{ | |
"epoch": 5.12, | |
"grad_norm": 301.7503356933594, | |
"learning_rate": 1.25e-05, | |
"loss": 1.772, | |
"step": 8 | |
}, | |
{ | |
"epoch": 5.12, | |
"eval_accuracy": 0.644, | |
"eval_loss": 0.7859167456626892, | |
"eval_runtime": 13.1191, | |
"eval_samples_per_second": 19.056, | |
"eval_steps_per_second": 0.991, | |
"step": 8 | |
}, | |
{ | |
"epoch": 5.76, | |
"grad_norm": 123.35485076904297, | |
"learning_rate": 6.25e-06, | |
"loss": 0.786, | |
"step": 9 | |
}, | |
{ | |
"epoch": 5.76, | |
"eval_accuracy": 0.628, | |
"eval_loss": 0.9045919179916382, | |
"eval_runtime": 13.1146, | |
"eval_samples_per_second": 19.063, | |
"eval_steps_per_second": 0.991, | |
"step": 9 | |
}, | |
{ | |
"epoch": 6.4, | |
"grad_norm": 109.03207397460938, | |
"learning_rate": 0.0, | |
"loss": 0.7251, | |
"step": 10 | |
}, | |
{ | |
"epoch": 6.4, | |
"eval_accuracy": 0.624, | |
"eval_loss": 1.0280524492263794, | |
"eval_runtime": 13.1179, | |
"eval_samples_per_second": 19.058, | |
"eval_steps_per_second": 0.991, | |
"step": 10 | |
}, | |
{ | |
"epoch": 6.4, | |
"step": 10, | |
"total_flos": 1.974162432e+16, | |
"train_loss": 2.5256052255630492, | |
"train_runtime": 1346.5171, | |
"train_samples_per_second": 7.427, | |
"train_steps_per_second": 0.007 | |
} | |
], | |
"logging_steps": 1, | |
"max_steps": 10, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 10, | |
"save_steps": 500, | |
"total_flos": 1.974162432e+16, | |
"train_batch_size": 20, | |
"trial_name": null, | |
"trial_params": null | |
} | |