|
{ |
|
"best_metric": 1.9983205795288086, |
|
"best_model_checkpoint": "/home/datta0/models/lora_final/Mistral-7B-v0.3_pct_ortho_r16/checkpoint-16", |
|
"epoch": 0.04127367996775494, |
|
"eval_steps": 8, |
|
"global_step": 16, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025796049979846837, |
|
"grad_norm": 7.310375690460205, |
|
"learning_rate": 1.25e-05, |
|
"loss": 2.1533, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.010318419991938735, |
|
"grad_norm": 7.721648216247559, |
|
"learning_rate": 5e-05, |
|
"loss": 2.0899, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.02063683998387747, |
|
"grad_norm": 4.991214275360107, |
|
"learning_rate": 0.0001, |
|
"loss": 1.9566, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02063683998387747, |
|
"eval_loss": 2.0118165016174316, |
|
"eval_runtime": 141.9765, |
|
"eval_samples_per_second": 1.726, |
|
"eval_steps_per_second": 0.866, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.030955259975816204, |
|
"grad_norm": 4.564762592315674, |
|
"learning_rate": 9.997251843068762e-05, |
|
"loss": 2.0017, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04127367996775494, |
|
"grad_norm": 4.318169593811035, |
|
"learning_rate": 9.989010393221656e-05, |
|
"loss": 2.0191, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04127367996775494, |
|
"eval_loss": 1.9983205795288086, |
|
"eval_runtime": 142.0598, |
|
"eval_samples_per_second": 1.725, |
|
"eval_steps_per_second": 0.866, |
|
"step": 16 |
|
} |
|
], |
|
"logging_steps": 4, |
|
"max_steps": 387, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 8, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.4040568375476224e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|