|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9898305084745763, |
|
"eval_steps": 500, |
|
"global_step": 73, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.013559322033898305, |
|
"grad_norm": 5.050205082877998, |
|
"learning_rate": 0.000125, |
|
"loss": 1.2623, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06779661016949153, |
|
"grad_norm": 2.9389613294125154, |
|
"learning_rate": 0.000625, |
|
"loss": 1.2343, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.13559322033898305, |
|
"grad_norm": 2.7719726396922333, |
|
"learning_rate": 0.0009976658173588243, |
|
"loss": 1.2732, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2033898305084746, |
|
"grad_norm": 0.5763036252280294, |
|
"learning_rate": 0.0009716559066288715, |
|
"loss": 1.1358, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.2711864406779661, |
|
"grad_norm": 0.24783483942004164, |
|
"learning_rate": 0.0009182350690051134, |
|
"loss": 1.0897, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3389830508474576, |
|
"grad_norm": 0.13051456050984434, |
|
"learning_rate": 0.0008405079293933986, |
|
"loss": 1.0148, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.4067796610169492, |
|
"grad_norm": 0.16709779185955242, |
|
"learning_rate": 0.000742991706621303, |
|
"loss": 1.0134, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4745762711864407, |
|
"grad_norm": 0.09299287343519272, |
|
"learning_rate": 0.0006313536890992934, |
|
"loss": 1.0077, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5423728813559322, |
|
"grad_norm": 0.08379356751538114, |
|
"learning_rate": 0.0005120818726180662, |
|
"loss": 0.9777, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6101694915254238, |
|
"grad_norm": 0.06580795358809759, |
|
"learning_rate": 0.000392107901616097, |
|
"loss": 0.9547, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6779661016949152, |
|
"grad_norm": 0.076526418601839, |
|
"learning_rate": 0.0002784042272003794, |
|
"loss": 0.9417, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7457627118644068, |
|
"grad_norm": 0.08072786402812815, |
|
"learning_rate": 0.00017757889363191482, |
|
"loss": 0.9267, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.8135593220338984, |
|
"grad_norm": 0.08008172204396788, |
|
"learning_rate": 9.549150281252633e-05, |
|
"loss": 0.9646, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.8813559322033898, |
|
"grad_norm": 0.049972195778438866, |
|
"learning_rate": 3.691267552111183e-05, |
|
"loss": 0.9495, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.9491525423728814, |
|
"grad_norm": 0.05054132063952455, |
|
"learning_rate": 5.2468002744744395e-06, |
|
"loss": 0.9527, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9898305084745763, |
|
"eval_loss": 1.0899375677108765, |
|
"eval_runtime": 139.4951, |
|
"eval_samples_per_second": 20.775, |
|
"eval_steps_per_second": 0.652, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.9898305084745763, |
|
"step": 73, |
|
"total_flos": 91862908010496.0, |
|
"train_loss": 1.0252333144619041, |
|
"train_runtime": 1907.7952, |
|
"train_samples_per_second": 4.937, |
|
"train_steps_per_second": 0.038 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 73, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 91862908010496.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|