llm-mdeberta-v3-swag / trainer_state.json
Paulo Vitor
fine tune swag
406e7e9
raw
history blame
7.43 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 27582,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"learning_rate": 4.909361177579581e-05,
"loss": 1.1223,
"step": 500
},
{
"epoch": 0.11,
"learning_rate": 4.818722355159162e-05,
"loss": 1.035,
"step": 1000
},
{
"epoch": 0.16,
"learning_rate": 4.728083532738743e-05,
"loss": 1.0383,
"step": 1500
},
{
"epoch": 0.22,
"learning_rate": 4.637444710318324e-05,
"loss": 0.9976,
"step": 2000
},
{
"epoch": 0.27,
"learning_rate": 4.546805887897905e-05,
"loss": 0.9862,
"step": 2500
},
{
"epoch": 0.33,
"learning_rate": 4.4561670654774854e-05,
"loss": 0.9828,
"step": 3000
},
{
"epoch": 0.38,
"learning_rate": 4.3655282430570665e-05,
"loss": 0.991,
"step": 3500
},
{
"epoch": 0.44,
"learning_rate": 4.2748894206366476e-05,
"loss": 0.9492,
"step": 4000
},
{
"epoch": 0.49,
"learning_rate": 4.184250598216228e-05,
"loss": 0.9492,
"step": 4500
},
{
"epoch": 0.54,
"learning_rate": 4.093611775795809e-05,
"loss": 0.9483,
"step": 5000
},
{
"epoch": 0.6,
"learning_rate": 4.00297295337539e-05,
"loss": 0.9157,
"step": 5500
},
{
"epoch": 0.65,
"learning_rate": 3.912334130954971e-05,
"loss": 0.9092,
"step": 6000
},
{
"epoch": 0.71,
"learning_rate": 3.821695308534552e-05,
"loss": 0.8941,
"step": 6500
},
{
"epoch": 0.76,
"learning_rate": 3.731056486114133e-05,
"loss": 0.9126,
"step": 7000
},
{
"epoch": 0.82,
"learning_rate": 3.640417663693714e-05,
"loss": 0.8869,
"step": 7500
},
{
"epoch": 0.87,
"learning_rate": 3.549778841273294e-05,
"loss": 0.8928,
"step": 8000
},
{
"epoch": 0.92,
"learning_rate": 3.459140018852875e-05,
"loss": 0.8764,
"step": 8500
},
{
"epoch": 0.98,
"learning_rate": 3.368501196432456e-05,
"loss": 0.8788,
"step": 9000
},
{
"epoch": 1.03,
"learning_rate": 3.277862374012037e-05,
"loss": 0.7333,
"step": 9500
},
{
"epoch": 1.09,
"learning_rate": 3.187223551591618e-05,
"loss": 0.6604,
"step": 10000
},
{
"epoch": 1.14,
"learning_rate": 3.0965847291711983e-05,
"loss": 0.6689,
"step": 10500
},
{
"epoch": 1.2,
"learning_rate": 3.0059459067507794e-05,
"loss": 0.6555,
"step": 11000
},
{
"epoch": 1.25,
"learning_rate": 2.9153070843303605e-05,
"loss": 0.658,
"step": 11500
},
{
"epoch": 1.31,
"learning_rate": 2.8246682619099413e-05,
"loss": 0.6627,
"step": 12000
},
{
"epoch": 1.36,
"learning_rate": 2.734029439489522e-05,
"loss": 0.6626,
"step": 12500
},
{
"epoch": 1.41,
"learning_rate": 2.643390617069103e-05,
"loss": 0.6381,
"step": 13000
},
{
"epoch": 1.47,
"learning_rate": 2.552751794648684e-05,
"loss": 0.6319,
"step": 13500
},
{
"epoch": 1.52,
"learning_rate": 2.462112972228265e-05,
"loss": 0.6766,
"step": 14000
},
{
"epoch": 1.58,
"learning_rate": 2.3714741498078457e-05,
"loss": 0.6428,
"step": 14500
},
{
"epoch": 1.63,
"learning_rate": 2.2808353273874268e-05,
"loss": 0.622,
"step": 15000
},
{
"epoch": 1.69,
"learning_rate": 2.1901965049670075e-05,
"loss": 0.6313,
"step": 15500
},
{
"epoch": 1.74,
"learning_rate": 2.0995576825465886e-05,
"loss": 0.6466,
"step": 16000
},
{
"epoch": 1.79,
"learning_rate": 2.0089188601261694e-05,
"loss": 0.6203,
"step": 16500
},
{
"epoch": 1.85,
"learning_rate": 1.91828003770575e-05,
"loss": 0.6103,
"step": 17000
},
{
"epoch": 1.9,
"learning_rate": 1.8276412152853312e-05,
"loss": 0.6085,
"step": 17500
},
{
"epoch": 1.96,
"learning_rate": 1.737002392864912e-05,
"loss": 0.6039,
"step": 18000
},
{
"epoch": 2.01,
"learning_rate": 1.646363570444493e-05,
"loss": 0.5586,
"step": 18500
},
{
"epoch": 2.07,
"learning_rate": 1.5557247480240738e-05,
"loss": 0.4023,
"step": 19000
},
{
"epoch": 2.12,
"learning_rate": 1.4650859256036547e-05,
"loss": 0.39,
"step": 19500
},
{
"epoch": 2.18,
"learning_rate": 1.3744471031832355e-05,
"loss": 0.3777,
"step": 20000
},
{
"epoch": 2.23,
"learning_rate": 1.2838082807628162e-05,
"loss": 0.3958,
"step": 20500
},
{
"epoch": 2.28,
"learning_rate": 1.1931694583423973e-05,
"loss": 0.4169,
"step": 21000
},
{
"epoch": 2.34,
"learning_rate": 1.1025306359219782e-05,
"loss": 0.3802,
"step": 21500
},
{
"epoch": 2.39,
"learning_rate": 1.011891813501559e-05,
"loss": 0.3793,
"step": 22000
},
{
"epoch": 2.45,
"learning_rate": 9.212529910811399e-06,
"loss": 0.4118,
"step": 22500
},
{
"epoch": 2.5,
"learning_rate": 8.306141686607208e-06,
"loss": 0.3776,
"step": 23000
},
{
"epoch": 2.56,
"learning_rate": 7.399753462403016e-06,
"loss": 0.3892,
"step": 23500
},
{
"epoch": 2.61,
"learning_rate": 6.493365238198826e-06,
"loss": 0.3759,
"step": 24000
},
{
"epoch": 2.66,
"learning_rate": 5.586977013994634e-06,
"loss": 0.3663,
"step": 24500
},
{
"epoch": 2.72,
"learning_rate": 4.680588789790443e-06,
"loss": 0.3836,
"step": 25000
},
{
"epoch": 2.77,
"learning_rate": 3.7742005655862523e-06,
"loss": 0.3731,
"step": 25500
},
{
"epoch": 2.83,
"learning_rate": 2.8678123413820606e-06,
"loss": 0.3657,
"step": 26000
},
{
"epoch": 2.88,
"learning_rate": 1.96142411717787e-06,
"loss": 0.3635,
"step": 26500
},
{
"epoch": 2.94,
"learning_rate": 1.0550358929736786e-06,
"loss": 0.3885,
"step": 27000
},
{
"epoch": 2.99,
"learning_rate": 1.4864766876948735e-07,
"loss": 0.3573,
"step": 27500
},
{
"epoch": 3.0,
"step": 27582,
"total_flos": 2.836008195999293e+16,
"train_loss": 0.6582008492790169,
"train_runtime": 15084.1881,
"train_samples_per_second": 14.627,
"train_steps_per_second": 1.829
}
],
"logging_steps": 500,
"max_steps": 27582,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 2.836008195999293e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}