|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 3175, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15748031496062992, |
|
"grad_norm": 5.84375, |
|
"learning_rate": 0.00031446540880503143, |
|
"loss": 1.6016, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.31496062992125984, |
|
"grad_norm": 2.90625, |
|
"learning_rate": 0.0004997720451762572, |
|
"loss": 2.0478, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.47244094488188976, |
|
"grad_norm": 3.0, |
|
"learning_rate": 0.0004973084374349976, |
|
"loss": 2.0072, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6299212598425197, |
|
"grad_norm": 2.578125, |
|
"learning_rate": 0.0004921639131931859, |
|
"loss": 1.8507, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7874015748031497, |
|
"grad_norm": 1.6953125, |
|
"learning_rate": 0.00048439424102900066, |
|
"loss": 1.7019, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.9448818897637795, |
|
"grad_norm": 2.6875, |
|
"learning_rate": 0.00047408364711169396, |
|
"loss": 1.5988, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.1023622047244095, |
|
"grad_norm": 1.8203125, |
|
"learning_rate": 0.00046134390215823, |
|
"loss": 1.3103, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.2598425196850394, |
|
"grad_norm": 1.671875, |
|
"learning_rate": 0.00044631310979666443, |
|
"loss": 1.237, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.4173228346456692, |
|
"grad_norm": 1.7421875, |
|
"learning_rate": 0.0004291542094708612, |
|
"loss": 1.2112, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.574803149606299, |
|
"grad_norm": 2.28125, |
|
"learning_rate": 0.000410053210115622, |
|
"loss": 1.1948, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.7322834645669292, |
|
"grad_norm": 1.5859375, |
|
"learning_rate": 0.00038921717374985584, |
|
"loss": 1.1666, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.889763779527559, |
|
"grad_norm": 1.59375, |
|
"learning_rate": 0.0003668719708463959, |
|
"loss": 1.1001, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 2.047244094488189, |
|
"grad_norm": 1.6171875, |
|
"learning_rate": 0.00034325983181110047, |
|
"loss": 0.9377, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.204724409448819, |
|
"grad_norm": 1.640625, |
|
"learning_rate": 0.00031863672111412524, |
|
"loss": 0.6561, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.362204724409449, |
|
"grad_norm": 1.9296875, |
|
"learning_rate": 0.00029326956253877123, |
|
"loss": 0.6872, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.5196850393700787, |
|
"grad_norm": 1.5859375, |
|
"learning_rate": 0.00026743334562725617, |
|
"loss": 0.6448, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.677165354330709, |
|
"grad_norm": 1.6484375, |
|
"learning_rate": 0.00024140814469062377, |
|
"loss": 0.643, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.8346456692913384, |
|
"grad_norm": 1.28125, |
|
"learning_rate": 0.0002154760826978469, |
|
"loss": 0.6216, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.9921259842519685, |
|
"grad_norm": 1.625, |
|
"learning_rate": 0.00018991827295670777, |
|
"loss": 0.5788, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.1496062992125986, |
|
"grad_norm": 1.2890625, |
|
"learning_rate": 0.00016501177173978493, |
|
"loss": 0.2589, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.3070866141732282, |
|
"grad_norm": 1.7734375, |
|
"learning_rate": 0.00014102657489022886, |
|
"loss": 0.2383, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.4645669291338583, |
|
"grad_norm": 1.1875, |
|
"learning_rate": 0.00011822269096524812, |
|
"loss": 0.2227, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.622047244094488, |
|
"grad_norm": 1.46875, |
|
"learning_rate": 9.684732264553247e-05, |
|
"loss": 0.2168, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.779527559055118, |
|
"grad_norm": 1.0390625, |
|
"learning_rate": 7.713218696519558e-05, |
|
"loss": 0.2162, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.937007874015748, |
|
"grad_norm": 1.609375, |
|
"learning_rate": 5.929100341195187e-05, |
|
"loss": 0.1944, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 4.094488188976378, |
|
"grad_norm": 0.45703125, |
|
"learning_rate": 4.351717712746703e-05, |
|
"loss": 0.123, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 4.251968503937007, |
|
"grad_norm": 0.4609375, |
|
"learning_rate": 2.9981702322862735e-05, |
|
"loss": 0.0768, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 4.409448818897638, |
|
"grad_norm": 0.59375, |
|
"learning_rate": 1.8831308637139e-05, |
|
"loss": 0.0761, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 4.566929133858268, |
|
"grad_norm": 0.53515625, |
|
"learning_rate": 1.0186870532686742e-05, |
|
"loss": 0.0786, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 4.724409448818898, |
|
"grad_norm": 0.640625, |
|
"learning_rate": 4.1420969706420505e-06, |
|
"loss": 0.0758, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 4.881889763779528, |
|
"grad_norm": 0.515625, |
|
"learning_rate": 7.625155704936715e-07, |
|
"loss": 0.0792, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 3175, |
|
"total_flos": 2.254602634660147e+16, |
|
"train_loss": 0.7783217168792965, |
|
"train_runtime": 2040.3082, |
|
"train_samples_per_second": 12.444, |
|
"train_steps_per_second": 1.556 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 3175, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.254602634660147e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|