|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.961038961038961, |
|
"eval_steps": 3, |
|
"global_step": 57, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05194805194805195, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 2.1051, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.15584415584415584, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 2.0376, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.15584415584415584, |
|
"eval_loss": 2.0735840797424316, |
|
"eval_runtime": 188.468, |
|
"eval_samples_per_second": 1.443, |
|
"eval_steps_per_second": 0.027, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.3116883116883117, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0, |
|
"loss": 2.0549, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.3116883116883117, |
|
"eval_loss": 2.0735840797424316, |
|
"eval_runtime": 183.8886, |
|
"eval_samples_per_second": 1.479, |
|
"eval_steps_per_second": 0.027, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.4675324675324675, |
|
"grad_norm": 1.9069625261641296, |
|
"learning_rate": 1.8e-06, |
|
"loss": 2.1192, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.4675324675324675, |
|
"eval_loss": 2.066406726837158, |
|
"eval_runtime": 188.4211, |
|
"eval_samples_per_second": 1.444, |
|
"eval_steps_per_second": 0.027, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.6233766233766234, |
|
"grad_norm": 2.195508371268519, |
|
"learning_rate": 3e-06, |
|
"loss": 2.1346, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.6233766233766234, |
|
"eval_loss": 2.055842638015747, |
|
"eval_runtime": 194.004, |
|
"eval_samples_per_second": 1.402, |
|
"eval_steps_per_second": 0.026, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.7792207792207793, |
|
"grad_norm": 1.6392960084592598, |
|
"learning_rate": 1.5844124594183009e-06, |
|
"loss": 2.08, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.7792207792207793, |
|
"eval_loss": 2.0401382446289062, |
|
"eval_runtime": 191.3975, |
|
"eval_samples_per_second": 1.421, |
|
"eval_steps_per_second": 0.026, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.935064935064935, |
|
"grad_norm": 1.4536448011292864, |
|
"learning_rate": 8.15810619632839e-07, |
|
"loss": 2.0576, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.935064935064935, |
|
"eval_loss": 2.024768829345703, |
|
"eval_runtime": 190.2805, |
|
"eval_samples_per_second": 1.429, |
|
"eval_steps_per_second": 0.026, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 1.5244570928071939, |
|
"learning_rate": 4.147020859997813e-07, |
|
"loss": 2.0351, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"eval_loss": 2.0168356895446777, |
|
"eval_runtime": 183.6958, |
|
"eval_samples_per_second": 1.481, |
|
"eval_steps_per_second": 0.027, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.2467532467532467, |
|
"grad_norm": 0.9156345951361289, |
|
"learning_rate": 2.1460580257299744e-07, |
|
"loss": 1.9633, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.2467532467532467, |
|
"eval_loss": 2.0134994983673096, |
|
"eval_runtime": 183.9488, |
|
"eval_samples_per_second": 1.479, |
|
"eval_steps_per_second": 0.027, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.4025974025974026, |
|
"grad_norm": 2.2398676747041684, |
|
"learning_rate": 1.198239157793215e-07, |
|
"loss": 2.0389, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.4025974025974026, |
|
"eval_loss": 2.0117082595825195, |
|
"eval_runtime": 179.0815, |
|
"eval_samples_per_second": 1.519, |
|
"eval_steps_per_second": 0.028, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.5584415584415585, |
|
"grad_norm": 0.8504588484649336, |
|
"learning_rate": 7.754566893269768e-08, |
|
"loss": 1.9637, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.5584415584415585, |
|
"eval_loss": 2.0106735229492188, |
|
"eval_runtime": 184.0813, |
|
"eval_samples_per_second": 1.478, |
|
"eval_steps_per_second": 0.027, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 2.8901234292003775, |
|
"learning_rate": 5.997157748148484e-08, |
|
"loss": 1.9749, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"eval_loss": 2.0099711418151855, |
|
"eval_runtime": 186.4048, |
|
"eval_samples_per_second": 1.459, |
|
"eval_steps_per_second": 0.027, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.87012987012987, |
|
"grad_norm": 1.81025589751347, |
|
"learning_rate": 5.3254794554805664e-08, |
|
"loss": 2.0231, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.87012987012987, |
|
"eval_loss": 2.0093705654144287, |
|
"eval_runtime": 181.0103, |
|
"eval_samples_per_second": 1.503, |
|
"eval_steps_per_second": 0.028, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.0259740259740258, |
|
"grad_norm": 0.6589563923312365, |
|
"learning_rate": 5.093567565825763e-08, |
|
"loss": 1.9785, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.0259740259740258, |
|
"eval_loss": 2.0088231563568115, |
|
"eval_runtime": 180.9094, |
|
"eval_samples_per_second": 1.504, |
|
"eval_steps_per_second": 0.028, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.1818181818181817, |
|
"grad_norm": 0.6589712270301414, |
|
"learning_rate": 5.022931619396663e-08, |
|
"loss": 1.9619, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.1818181818181817, |
|
"eval_loss": 2.008312463760376, |
|
"eval_runtime": 190.7131, |
|
"eval_samples_per_second": 1.426, |
|
"eval_steps_per_second": 0.026, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 2.3376623376623376, |
|
"grad_norm": 0.9435038636441478, |
|
"learning_rate": 5.0045716697115606e-08, |
|
"loss": 1.9971, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.3376623376623376, |
|
"eval_loss": 2.007845640182495, |
|
"eval_runtime": 193.4537, |
|
"eval_samples_per_second": 1.406, |
|
"eval_steps_per_second": 0.026, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.4935064935064934, |
|
"grad_norm": 2.734201702872017, |
|
"learning_rate": 5.0013450033658545e-08, |
|
"loss": 1.9992, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.4935064935064934, |
|
"eval_loss": 2.0075366497039795, |
|
"eval_runtime": 191.0125, |
|
"eval_samples_per_second": 1.424, |
|
"eval_steps_per_second": 0.026, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.6493506493506493, |
|
"grad_norm": 0.6652092963795725, |
|
"learning_rate": 5.0001589255153705e-08, |
|
"loss": 1.9912, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.6493506493506493, |
|
"eval_loss": 2.007113218307495, |
|
"eval_runtime": 183.1059, |
|
"eval_samples_per_second": 1.485, |
|
"eval_steps_per_second": 0.027, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 2.8051948051948052, |
|
"grad_norm": 0.8157759570772763, |
|
"learning_rate": 5.000011197079783e-08, |
|
"loss": 1.9894, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.8051948051948052, |
|
"eval_loss": 2.0066730976104736, |
|
"eval_runtime": 188.7087, |
|
"eval_samples_per_second": 1.441, |
|
"eval_steps_per_second": 0.026, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 2.961038961038961, |
|
"grad_norm": 2.3142513279299544, |
|
"learning_rate": 5.000000337112684e-08, |
|
"loss": 2.0143, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.961038961038961, |
|
"eval_loss": 2.006239414215088, |
|
"eval_runtime": 185.8251, |
|
"eval_samples_per_second": 1.464, |
|
"eval_steps_per_second": 0.027, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 2.961038961038961, |
|
"step": 57, |
|
"total_flos": 227154377834496.0, |
|
"train_loss": 2.023003205918429, |
|
"train_runtime": 21371.877, |
|
"train_samples_per_second": 0.343, |
|
"train_steps_per_second": 0.003 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 57, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 12, |
|
"total_flos": 227154377834496.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|