|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.8509367363562315, |
|
"eval_steps": 500, |
|
"global_step": 10500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.13575889220743959, |
|
"grad_norm": 0.42541322112083435, |
|
"learning_rate": 4.775092768576342e-05, |
|
"loss": 1.6148, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.27151778441487917, |
|
"grad_norm": 0.3683030903339386, |
|
"learning_rate": 4.5488279482306095e-05, |
|
"loss": 1.4559, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.40727667662231876, |
|
"grad_norm": 0.2653275728225708, |
|
"learning_rate": 4.322563127884877e-05, |
|
"loss": 1.4429, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.5430355688297583, |
|
"grad_norm": 0.2617078423500061, |
|
"learning_rate": 4.096298307539144e-05, |
|
"loss": 1.4354, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.678794461037198, |
|
"grad_norm": 0.2017202079296112, |
|
"learning_rate": 3.8700334871934116e-05, |
|
"loss": 1.4308, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.8145533532446375, |
|
"grad_norm": 0.20993387699127197, |
|
"learning_rate": 3.643768666847679e-05, |
|
"loss": 1.4278, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.9503122454520772, |
|
"grad_norm": 0.16242703795433044, |
|
"learning_rate": 3.417503846501946e-05, |
|
"loss": 1.4253, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_bleu": 0.0, |
|
"eval_gen_len": 19.0, |
|
"eval_loss": 1.4292237758636475, |
|
"eval_meteor": 0.03936079398500908, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 213.0238, |
|
"eval_samples_per_second": 7.281, |
|
"eval_steps_per_second": 1.821, |
|
"step": 3683 |
|
}, |
|
{ |
|
"epoch": 1.0860711376595167, |
|
"grad_norm": 0.29393067955970764, |
|
"learning_rate": 3.1912390261562137e-05, |
|
"loss": 1.4232, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.2218300298669562, |
|
"grad_norm": 0.14881190657615662, |
|
"learning_rate": 2.9649742058104807e-05, |
|
"loss": 1.4215, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.3575889220743957, |
|
"grad_norm": 0.15426018834114075, |
|
"learning_rate": 2.7387093854647484e-05, |
|
"loss": 1.4205, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.4933478142818355, |
|
"grad_norm": 0.20635420083999634, |
|
"learning_rate": 2.5124445651190154e-05, |
|
"loss": 1.4196, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.629106706489275, |
|
"grad_norm": 0.1161305382847786, |
|
"learning_rate": 2.2861797447732827e-05, |
|
"loss": 1.4189, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.7648655986967148, |
|
"grad_norm": 0.18034473061561584, |
|
"learning_rate": 2.05991492442755e-05, |
|
"loss": 1.4181, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.9006244909041543, |
|
"grad_norm": 0.12195830792188644, |
|
"learning_rate": 1.8336501040818175e-05, |
|
"loss": 1.4177, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_bleu": 0.0, |
|
"eval_gen_len": 19.0, |
|
"eval_loss": 1.4223343133926392, |
|
"eval_meteor": 0.07128909550595973, |
|
"eval_rouge1": 0.0, |
|
"eval_rouge2": 0.0, |
|
"eval_rougeL": 0.0, |
|
"eval_rougeLsum": 0.0, |
|
"eval_runtime": 214.8209, |
|
"eval_samples_per_second": 7.22, |
|
"eval_steps_per_second": 1.806, |
|
"step": 7366 |
|
}, |
|
{ |
|
"epoch": 2.036383383111594, |
|
"grad_norm": 0.09966401755809784, |
|
"learning_rate": 1.6073852837360848e-05, |
|
"loss": 1.4171, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.1721422753190334, |
|
"grad_norm": 0.12720510363578796, |
|
"learning_rate": 1.3811204633903522e-05, |
|
"loss": 1.4165, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.307901167526473, |
|
"grad_norm": 0.17217351496219635, |
|
"learning_rate": 1.1548556430446195e-05, |
|
"loss": 1.4163, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 2.4436600597339124, |
|
"grad_norm": 0.1286771595478058, |
|
"learning_rate": 9.285908226988869e-06, |
|
"loss": 1.4161, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.579418951941352, |
|
"grad_norm": 0.12232056260108948, |
|
"learning_rate": 7.023260023531542e-06, |
|
"loss": 1.4158, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.7151778441487915, |
|
"grad_norm": 0.14986146986484528, |
|
"learning_rate": 4.760611820074215e-06, |
|
"loss": 1.4156, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.8509367363562315, |
|
"grad_norm": 0.08726570755243301, |
|
"learning_rate": 2.4979636166168884e-06, |
|
"loss": 1.4155, |
|
"step": 10500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 11049, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5158605926308512e+19, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|