|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9929780131230572, |
|
"eval_steps": 500, |
|
"global_step": 26000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.99092041233887e-05, |
|
"loss": 0.6515, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.963714624920319e-05, |
|
"loss": 0.6374, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.918481444677047e-05, |
|
"loss": 0.6399, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 9.855385151059141e-05, |
|
"loss": 0.6424, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 9.774654899398198e-05, |
|
"loss": 0.6391, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 9.676583888652963e-05, |
|
"loss": 0.6313, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 9.561528296559068e-05, |
|
"loss": 0.6355, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 9.429905986050238e-05, |
|
"loss": 0.6304, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.282194987649045e-05, |
|
"loss": 0.6256, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 9.118931763338888e-05, |
|
"loss": 0.6269, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 8.94070925822255e-05, |
|
"loss": 0.6306, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 8.748174747043391e-05, |
|
"loss": 0.6237, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.542027483390225e-05, |
|
"loss": 0.6325, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 8.323016160123629e-05, |
|
"loss": 0.6266, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"learning_rate": 8.091936190246954e-05, |
|
"loss": 0.6261, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 7.849626818097493e-05, |
|
"loss": 0.6282, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 7.596968071349464e-05, |
|
"loss": 0.6271, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 7.334877564898644e-05, |
|
"loss": 0.6232, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 7.064307168236421e-05, |
|
"loss": 0.6224, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 6.786239548416799e-05, |
|
"loss": 0.6247, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 6.501684601171727e-05, |
|
"loss": 0.6238, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 6.21167578313637e-05, |
|
"loss": 0.628, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 5.917266358505073e-05, |
|
"loss": 0.6267, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 5.6195255737495125e-05, |
|
"loss": 0.6285, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 5.319534774291885e-05, |
|
"loss": 0.6285, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 5.0183834772366734e-05, |
|
"loss": 0.6246, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 4.7171654144242266e-05, |
|
"loss": 0.628, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"learning_rate": 4.416974560177112e-05, |
|
"loss": 0.6276, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 4.1189011581658147e-05, |
|
"loss": 0.6284, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 3.82402776182354e-05, |
|
"loss": 0.6242, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.78, |
|
"learning_rate": 3.5334253026906946e-05, |
|
"loss": 0.6222, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 3.248149200968176e-05, |
|
"loss": 0.6241, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"learning_rate": 2.969235532405272e-05, |
|
"loss": 0.6145, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"learning_rate": 2.6976972654434195e-05, |
|
"loss": 0.6181, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"learning_rate": 2.434520582281914e-05, |
|
"loss": 0.6257, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 2.1806612972268065e-05, |
|
"loss": 0.6167, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.13, |
|
"learning_rate": 1.9370413853309972e-05, |
|
"loss": 0.6208, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 1.7045456339329084e-05, |
|
"loss": 0.6162, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 1.4840184292548053e-05, |
|
"loss": 0.6118, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 2.3, |
|
"learning_rate": 1.2762606897313006e-05, |
|
"loss": 0.6155, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 1.0820269572057097e-05, |
|
"loss": 0.6208, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 9.020226565585154e-06, |
|
"loss": 0.6152, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 7.36901533720562e-06, |
|
"loss": 0.6135, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 5.87263281375654e-06, |
|
"loss": 0.6098, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 4.536513609756426e-06, |
|
"loss": 0.6157, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 3.365510289780621e-06, |
|
"loss": 0.6156, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 2.71, |
|
"learning_rate": 2.3638757447472237e-06, |
|
"loss": 0.6143, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 1.535247746118651e-06, |
|
"loss": 0.6206, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 8.826357341157054e-07, |
|
"loss": 0.6161, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 4.084098879270637e-07, |
|
"loss": 0.6193, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 2.94, |
|
"learning_rate": 1.1429251760938231e-07, |
|
"loss": 0.6147, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 1.3518089412467572e-09, |
|
"loss": 0.6172, |
|
"step": 26000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 26061, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 3.6163640128943923e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|