imdatta0's picture
End of training
8b1b653 verified
raw
history blame
1.52 kB
{
"best_metric": 0.9220191240310669,
"best_model_checkpoint": "/home/datta0/models/lora_final/Qwen2-7B_magiccoder_default/checkpoint-4",
"epoch": 0.026101141924959218,
"eval_steps": 4,
"global_step": 4,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0065252854812398045,
"grad_norm": 6.940319538116455,
"learning_rate": 7.5e-05,
"loss": 0.8051,
"step": 1
},
{
"epoch": 0.013050570962479609,
"grad_norm": 5.597632884979248,
"learning_rate": 0.00015,
"loss": 0.9302,
"step": 2
},
{
"epoch": 0.026101141924959218,
"grad_norm": 4.383840084075928,
"learning_rate": 0.0003,
"loss": 0.8215,
"step": 4
},
{
"epoch": 0.026101141924959218,
"eval_loss": 0.9220191240310669,
"eval_runtime": 24.7526,
"eval_samples_per_second": 19.756,
"eval_steps_per_second": 2.505,
"step": 4
}
],
"logging_steps": 2,
"max_steps": 153,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 4,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9312211371294720.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}