|
{ |
|
"best_metric": 2.2854506969451904, |
|
"best_model_checkpoint": "/home/datta0/models/lora_final/Mistral-7B-v0.3_pct_ortho/checkpoint-8", |
|
"epoch": 0.020631850419084462, |
|
"eval_steps": 8, |
|
"global_step": 8, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025789813023855577, |
|
"grad_norm": 10.031487464904785, |
|
"learning_rate": 3.75e-05, |
|
"loss": 2.128, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010315925209542231, |
|
"grad_norm": 7.364378929138184, |
|
"learning_rate": 0.00015, |
|
"loss": 2.0612, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.020631850419084462, |
|
"grad_norm": 23.759828567504883, |
|
"learning_rate": 0.0003, |
|
"loss": 2.1243, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.020631850419084462, |
|
"eval_loss": 2.2854506969451904, |
|
"eval_runtime": 11.1093, |
|
"eval_samples_per_second": 22.054, |
|
"eval_steps_per_second": 2.79, |
|
"step": 8 |
|
} |
|
], |
|
"logging_steps": 4, |
|
"max_steps": 387, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 8, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.8441287540146176e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|