imdatta0's picture
End of training
8b1b653 verified
raw
history blame
1.52 kB
{
"best_metric": 1.2975215911865234,
"best_model_checkpoint": "/home/datta0/models/lora_final/gemma-2-9b_magiccoder_ortho/checkpoint-4",
"epoch": 0.026130448096356028,
"eval_steps": 4,
"global_step": 4,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006532612024089007,
"grad_norm": 5.831324577331543,
"learning_rate": 2.5e-05,
"loss": 1.3342,
"step": 1
},
{
"epoch": 0.013065224048178014,
"grad_norm": 5.2910685539245605,
"learning_rate": 5e-05,
"loss": 1.2199,
"step": 2
},
{
"epoch": 0.026130448096356028,
"grad_norm": 3.488215684890747,
"learning_rate": 0.0001,
"loss": 1.2682,
"step": 4
},
{
"epoch": 0.026130448096356028,
"eval_loss": 1.2975215911865234,
"eval_runtime": 866.0894,
"eval_samples_per_second": 0.565,
"eval_steps_per_second": 0.565,
"step": 4
}
],
"logging_steps": 2,
"max_steps": 153,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 4,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8159435195842560.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}