|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.0, |
|
"global_step": 13096, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9.618204031765426e-06, |
|
"loss": 0.3591, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 9.23640806353085e-06, |
|
"loss": 0.3059, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 8.854612095296275e-06, |
|
"loss": 0.2878, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 8.4728161270617e-06, |
|
"loss": 0.2803, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 8.091020158827123e-06, |
|
"loss": 0.2734, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 7.709224190592548e-06, |
|
"loss": 0.2593, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 7.327428222357973e-06, |
|
"loss": 0.2356, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"learning_rate": 6.945632254123397e-06, |
|
"loss": 0.1979, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.37, |
|
"learning_rate": 6.563836285888821e-06, |
|
"loss": 0.1948, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 6.1820403176542464e-06, |
|
"loss": 0.1996, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 5.80024434941967e-06, |
|
"loss": 0.1996, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 5.418448381185095e-06, |
|
"loss": 0.1905, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 5.03665241295052e-06, |
|
"loss": 0.1908, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.14, |
|
"learning_rate": 4.6548564447159445e-06, |
|
"loss": 0.1417, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 4.273060476481369e-06, |
|
"loss": 0.1369, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"learning_rate": 3.891264508246793e-06, |
|
"loss": 0.1378, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"learning_rate": 3.5094685400122174e-06, |
|
"loss": 0.1355, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 3.1276725717776426e-06, |
|
"loss": 0.145, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 2.745876603543067e-06, |
|
"loss": 0.1411, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 2.364080635308491e-06, |
|
"loss": 0.1127, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 3.21, |
|
"learning_rate": 1.982284667073916e-06, |
|
"loss": 0.0956, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 1.6004886988393402e-06, |
|
"loss": 0.0963, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 3.51, |
|
"learning_rate": 1.218692730604765e-06, |
|
"loss": 0.1004, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 8.368967623701894e-07, |
|
"loss": 0.1026, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 4.55100794135614e-07, |
|
"loss": 0.1097, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 7.330482590103849e-08, |
|
"loss": 0.0936, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 13096, |
|
"total_flos": 2503158872647680.0, |
|
"train_loss": 0.1811416630491514, |
|
"train_runtime": 5121.9571, |
|
"train_samples_per_second": 81.799, |
|
"train_steps_per_second": 2.557 |
|
} |
|
], |
|
"max_steps": 13096, |
|
"num_train_epochs": 4, |
|
"total_flos": 2503158872647680.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|