|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9767441860465116, |
|
"eval_steps": 500, |
|
"global_step": 192, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15503875968992248, |
|
"grad_norm": 27.14871976634167, |
|
"learning_rate": 5e-06, |
|
"loss": 0.6931, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.31007751937984496, |
|
"grad_norm": 14.215725662461802, |
|
"learning_rate": 1e-05, |
|
"loss": 0.6074, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.46511627906976744, |
|
"grad_norm": 8.789668411329039, |
|
"learning_rate": 9.91682838414733e-06, |
|
"loss": 0.563, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.6201550387596899, |
|
"grad_norm": 8.656020918037868, |
|
"learning_rate": 9.670080543662742e-06, |
|
"loss": 0.6109, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.7751937984496124, |
|
"grad_norm": 9.508510861756253, |
|
"learning_rate": 9.267965445186733e-06, |
|
"loss": 0.598, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.9302325581395349, |
|
"grad_norm": 7.438616074276716, |
|
"learning_rate": 8.72386091371891e-06, |
|
"loss": 0.5954, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0852713178294573, |
|
"grad_norm": 11.43708438878541, |
|
"learning_rate": 8.055868570489247e-06, |
|
"loss": 0.3699, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.2403100775193798, |
|
"grad_norm": 17.52083315465849, |
|
"learning_rate": 7.286211616523193e-06, |
|
"loss": 0.1977, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.3953488372093024, |
|
"grad_norm": 13.876316165599672, |
|
"learning_rate": 6.440495496826189e-06, |
|
"loss": 0.2065, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.550387596899225, |
|
"grad_norm": 8.884685401439247, |
|
"learning_rate": 5.546856041889374e-06, |
|
"loss": 0.167, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.7054263565891472, |
|
"grad_norm": 14.293184788635473, |
|
"learning_rate": 4.635023426695462e-06, |
|
"loss": 0.1753, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.8604651162790697, |
|
"grad_norm": 11.602070326403544, |
|
"learning_rate": 3.7353330880415963e-06, |
|
"loss": 0.1473, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.0155038759689923, |
|
"grad_norm": 5.538956396622884, |
|
"learning_rate": 2.8777165056209256e-06, |
|
"loss": 0.1551, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.1705426356589146, |
|
"grad_norm": 4.859110219903441, |
|
"learning_rate": 2.0907054222102367e-06, |
|
"loss": 0.0216, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.3255813953488373, |
|
"grad_norm": 2.695580515171099, |
|
"learning_rate": 1.4004826312100218e-06, |
|
"loss": 0.0013, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.4806201550387597, |
|
"grad_norm": 13.977813199581801, |
|
"learning_rate": 8.30010910550611e-07, |
|
"loss": 0.0114, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.6356589147286824, |
|
"grad_norm": 0.13568716951643228, |
|
"learning_rate": 3.9826908215420344e-07, |
|
"loss": 0.0088, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.7906976744186047, |
|
"grad_norm": 13.78710482551732, |
|
"learning_rate": 1.196206122203647e-07, |
|
"loss": 0.0018, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.945736434108527, |
|
"grad_norm": 6.559094463464456, |
|
"learning_rate": 3.3357581488030476e-09, |
|
"loss": 0.0141, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 2.9767441860465116, |
|
"step": 192, |
|
"total_flos": 0.0, |
|
"train_loss": 0.26802204532820423, |
|
"train_runtime": 6969.4008, |
|
"train_samples_per_second": 3.109, |
|
"train_steps_per_second": 0.028 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 192, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|