imdatta0's picture
End of training
8b1b653 verified
raw
history blame
3.21 kB
{
"best_metric": 1.9184670448303223,
"best_model_checkpoint": "/home/datta0/models/lora_final/Qwen2-7B_pct_ortho_r16/checkpoint-32",
"epoch": 0.08244766505636071,
"eval_steps": 8,
"global_step": 32,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025764895330112722,
"grad_norm": 2.5038321018218994,
"learning_rate": 1.25e-05,
"loss": 2.0463,
"step": 1
},
{
"epoch": 0.010305958132045089,
"grad_norm": 2.0884063243865967,
"learning_rate": 5e-05,
"loss": 2.1379,
"step": 4
},
{
"epoch": 0.020611916264090178,
"grad_norm": 2.1778032779693604,
"learning_rate": 0.0001,
"loss": 2.0486,
"step": 8
},
{
"epoch": 0.020611916264090178,
"eval_loss": 1.997605562210083,
"eval_runtime": 130.7432,
"eval_samples_per_second": 1.874,
"eval_steps_per_second": 0.941,
"step": 8
},
{
"epoch": 0.030917874396135265,
"grad_norm": 1.3578734397888184,
"learning_rate": 9.997266286704631e-05,
"loss": 2.0017,
"step": 12
},
{
"epoch": 0.041223832528180356,
"grad_norm": 1.0907150506973267,
"learning_rate": 9.989068136093873e-05,
"loss": 1.9839,
"step": 16
},
{
"epoch": 0.041223832528180356,
"eval_loss": 1.9347867965698242,
"eval_runtime": 133.851,
"eval_samples_per_second": 1.83,
"eval_steps_per_second": 0.919,
"step": 16
},
{
"epoch": 0.05152979066022544,
"grad_norm": 1.1747814416885376,
"learning_rate": 9.975414512725057e-05,
"loss": 1.9421,
"step": 20
},
{
"epoch": 0.06183574879227053,
"grad_norm": 1.2741020917892456,
"learning_rate": 9.956320346634876e-05,
"loss": 2.0083,
"step": 24
},
{
"epoch": 0.06183574879227053,
"eval_loss": 1.9231261014938354,
"eval_runtime": 123.8213,
"eval_samples_per_second": 1.979,
"eval_steps_per_second": 0.993,
"step": 24
},
{
"epoch": 0.07214170692431562,
"grad_norm": 1.1227424144744873,
"learning_rate": 9.931806517013612e-05,
"loss": 1.9946,
"step": 28
},
{
"epoch": 0.08244766505636071,
"grad_norm": 1.14728581905365,
"learning_rate": 9.901899829374047e-05,
"loss": 1.923,
"step": 32
},
{
"epoch": 0.08244766505636071,
"eval_loss": 1.9184670448303223,
"eval_runtime": 236.0583,
"eval_samples_per_second": 1.038,
"eval_steps_per_second": 0.521,
"step": 32
}
],
"logging_steps": 4,
"max_steps": 388,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 8,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.483746134414131e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}