bacolbert-v0.0 / trainer_state.json
baconnier's picture
Upload folder using huggingface_hub
bac1803 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 14064,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.035551763367463025,
"grad_norm": 8.486759185791016,
"learning_rate": 2.8937713310580205e-06,
"loss": 1.3328,
"step": 500
},
{
"epoch": 0.07110352673492605,
"grad_norm": 9.923312187194824,
"learning_rate": 2.7873293515358363e-06,
"loss": 1.0132,
"step": 1000
},
{
"epoch": 0.10665529010238908,
"grad_norm": 8.329161643981934,
"learning_rate": 2.6806740614334473e-06,
"loss": 0.9106,
"step": 1500
},
{
"epoch": 0.1422070534698521,
"grad_norm": 9.29595947265625,
"learning_rate": 2.5740187713310584e-06,
"loss": 0.8662,
"step": 2000
},
{
"epoch": 0.17775881683731512,
"grad_norm": 8.718080520629883,
"learning_rate": 2.4673634812286687e-06,
"loss": 0.835,
"step": 2500
},
{
"epoch": 0.21331058020477817,
"grad_norm": 7.578711032867432,
"learning_rate": 2.360921501706485e-06,
"loss": 0.7989,
"step": 3000
},
{
"epoch": 0.24886234357224118,
"grad_norm": 7.699925422668457,
"learning_rate": 2.2542662116040955e-06,
"loss": 0.7699,
"step": 3500
},
{
"epoch": 0.2844141069397042,
"grad_norm": 9.358419418334961,
"learning_rate": 2.1476109215017066e-06,
"loss": 0.7482,
"step": 4000
},
{
"epoch": 0.3199658703071672,
"grad_norm": 10.0253324508667,
"learning_rate": 2.0409556313993177e-06,
"loss": 0.7231,
"step": 4500
},
{
"epoch": 0.35551763367463024,
"grad_norm": 7.296217918395996,
"learning_rate": 1.9343003412969284e-06,
"loss": 0.7141,
"step": 5000
},
{
"epoch": 0.3910693970420933,
"grad_norm": 8.735479354858398,
"learning_rate": 1.8278583617747441e-06,
"loss": 0.6845,
"step": 5500
},
{
"epoch": 0.42662116040955633,
"grad_norm": 8.95112133026123,
"learning_rate": 1.7212030716723552e-06,
"loss": 0.673,
"step": 6000
},
{
"epoch": 0.46217292377701935,
"grad_norm": 6.7044291496276855,
"learning_rate": 1.614547781569966e-06,
"loss": 0.6734,
"step": 6500
},
{
"epoch": 0.49772468714448237,
"grad_norm": 8.28055477142334,
"learning_rate": 1.5078924914675768e-06,
"loss": 0.6547,
"step": 7000
},
{
"epoch": 0.5332764505119454,
"grad_norm": 8.512009620666504,
"learning_rate": 1.4014505119453925e-06,
"loss": 0.6486,
"step": 7500
},
{
"epoch": 0.5688282138794084,
"grad_norm": 7.365358352661133,
"learning_rate": 1.2947952218430034e-06,
"loss": 0.6417,
"step": 8000
},
{
"epoch": 0.6043799772468714,
"grad_norm": 11.026593208312988,
"learning_rate": 1.1881399317406143e-06,
"loss": 0.629,
"step": 8500
},
{
"epoch": 0.6399317406143344,
"grad_norm": 8.76122760772705,
"learning_rate": 1.0814846416382254e-06,
"loss": 0.6171,
"step": 9000
},
{
"epoch": 0.6754835039817975,
"grad_norm": 9.334900856018066,
"learning_rate": 9.75042662116041e-07,
"loss": 0.6168,
"step": 9500
},
{
"epoch": 0.7110352673492605,
"grad_norm": 9.215682029724121,
"learning_rate": 8.683873720136519e-07,
"loss": 0.6164,
"step": 10000
},
{
"epoch": 0.7465870307167235,
"grad_norm": 7.827558994293213,
"learning_rate": 7.617320819112628e-07,
"loss": 0.6137,
"step": 10500
},
{
"epoch": 0.7821387940841866,
"grad_norm": 7.96988582611084,
"learning_rate": 6.550767918088738e-07,
"loss": 0.607,
"step": 11000
},
{
"epoch": 0.8176905574516496,
"grad_norm": 8.489790916442871,
"learning_rate": 5.486348122866894e-07,
"loss": 0.5998,
"step": 11500
},
{
"epoch": 0.8532423208191127,
"grad_norm": 7.611661434173584,
"learning_rate": 4.4197952218430034e-07,
"loss": 0.5966,
"step": 12000
},
{
"epoch": 0.8887940841865757,
"grad_norm": 9.416797637939453,
"learning_rate": 3.353242320819113e-07,
"loss": 0.5989,
"step": 12500
},
{
"epoch": 0.9243458475540387,
"grad_norm": 7.783421516418457,
"learning_rate": 2.286689419795222e-07,
"loss": 0.593,
"step": 13000
},
{
"epoch": 0.9598976109215017,
"grad_norm": 8.348119735717773,
"learning_rate": 1.2222696245733788e-07,
"loss": 0.5993,
"step": 13500
},
{
"epoch": 0.9954493742889647,
"grad_norm": 9.241561889648438,
"learning_rate": 1.5571672354948806e-08,
"loss": 0.584,
"step": 14000
}
],
"logging_steps": 500,
"max_steps": 14064,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}