|
{ |
|
"best_metric": 0.7445234060287476, |
|
"best_model_checkpoint": "/home/shared/dt01/temutauro/ccasimiro/berta/src/finetuning/tc/roberta-base-ca-cased-tecla/checkpoint-34440", |
|
"epoch": 10.0, |
|
"global_step": 34440, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 4.9274099883855984e-05, |
|
"loss": 1.3202, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"learning_rate": 4.8548199767711965e-05, |
|
"loss": 0.9902, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 4.782229965156795e-05, |
|
"loss": 0.9309, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.58, |
|
"learning_rate": 4.709639953542393e-05, |
|
"loss": 0.9006, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 4.637049941927991e-05, |
|
"loss": 0.8717, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 4.564459930313589e-05, |
|
"loss": 0.8612, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.6943275928497314, |
|
"eval_loss": 0.8295034766197205, |
|
"eval_runtime": 288.6692, |
|
"eval_samples_per_second": 47.757, |
|
"step": 3444 |
|
}, |
|
{ |
|
"epoch": 1.02, |
|
"learning_rate": 4.491869918699187e-05, |
|
"loss": 0.8394, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"learning_rate": 4.4192799070847854e-05, |
|
"loss": 0.7212, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.31, |
|
"learning_rate": 4.3466898954703835e-05, |
|
"loss": 0.7093, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 4.2740998838559817e-05, |
|
"loss": 0.6932, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 4.20150987224158e-05, |
|
"loss": 0.7032, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 4.128919860627178e-05, |
|
"loss": 0.6976, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 4.056329849012776e-05, |
|
"loss": 0.6866, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.7147105932235718, |
|
"eval_loss": 0.7905623912811279, |
|
"eval_runtime": 288.7903, |
|
"eval_samples_per_second": 47.737, |
|
"step": 6888 |
|
}, |
|
{ |
|
"epoch": 2.03, |
|
"learning_rate": 3.983739837398374e-05, |
|
"loss": 0.648, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 3.9111498257839724e-05, |
|
"loss": 0.504, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.32, |
|
"learning_rate": 3.8385598141695705e-05, |
|
"loss": 0.5056, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 3.7659698025551686e-05, |
|
"loss": 0.5104, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 2.61, |
|
"learning_rate": 3.693379790940767e-05, |
|
"loss": 0.5021, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 3.620789779326365e-05, |
|
"loss": 0.502, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 3.548199767711963e-05, |
|
"loss": 0.501, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.7299433946609497, |
|
"eval_loss": 0.8311841487884521, |
|
"eval_runtime": 289.0756, |
|
"eval_samples_per_second": 47.69, |
|
"step": 10332 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"learning_rate": 3.475609756097561e-05, |
|
"loss": 0.44, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 3.4030197444831594e-05, |
|
"loss": 0.3205, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 3.3304297328687575e-05, |
|
"loss": 0.3217, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 3.2578397212543556e-05, |
|
"loss": 0.3295, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 3.63, |
|
"learning_rate": 3.185249709639954e-05, |
|
"loss": 0.3314, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 3.77, |
|
"learning_rate": 3.112659698025552e-05, |
|
"loss": 0.3384, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 3.92, |
|
"learning_rate": 3.04006968641115e-05, |
|
"loss": 0.3415, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.722907304763794, |
|
"eval_loss": 0.9545897245407104, |
|
"eval_runtime": 288.9468, |
|
"eval_samples_per_second": 47.711, |
|
"step": 13776 |
|
}, |
|
{ |
|
"epoch": 4.07, |
|
"learning_rate": 2.9674796747967482e-05, |
|
"loss": 0.2816, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 4.21, |
|
"learning_rate": 2.8948896631823464e-05, |
|
"loss": 0.1995, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 2.8222996515679445e-05, |
|
"loss": 0.2125, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"learning_rate": 2.7497096399535426e-05, |
|
"loss": 0.2144, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 4.65, |
|
"learning_rate": 2.6771196283391408e-05, |
|
"loss": 0.2109, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 4.79, |
|
"learning_rate": 2.604529616724739e-05, |
|
"loss": 0.2238, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 4.94, |
|
"learning_rate": 2.5319396051103367e-05, |
|
"loss": 0.223, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_accuracy": 0.7305237054824829, |
|
"eval_loss": 1.0980908870697021, |
|
"eval_runtime": 288.7866, |
|
"eval_samples_per_second": 47.738, |
|
"step": 17220 |
|
}, |
|
{ |
|
"epoch": 5.08, |
|
"learning_rate": 2.4593495934959352e-05, |
|
"loss": 0.1694, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 5.23, |
|
"learning_rate": 2.3867595818815333e-05, |
|
"loss": 0.1347, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 5.37, |
|
"learning_rate": 2.314169570267131e-05, |
|
"loss": 0.1356, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 5.52, |
|
"learning_rate": 2.2415795586527293e-05, |
|
"loss": 0.1274, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 5.66, |
|
"learning_rate": 2.1689895470383274e-05, |
|
"loss": 0.1416, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 5.81, |
|
"learning_rate": 2.096399535423926e-05, |
|
"loss": 0.1375, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 2.023809523809524e-05, |
|
"loss": 0.15, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_accuracy": 0.7300159335136414, |
|
"eval_loss": 1.323264479637146, |
|
"eval_runtime": 289.0229, |
|
"eval_samples_per_second": 47.699, |
|
"step": 20664 |
|
}, |
|
{ |
|
"epoch": 6.1, |
|
"learning_rate": 1.9512195121951222e-05, |
|
"loss": 0.1056, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 6.24, |
|
"learning_rate": 1.8786295005807203e-05, |
|
"loss": 0.0862, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 1.806039488966318e-05, |
|
"loss": 0.0916, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 6.53, |
|
"learning_rate": 1.7334494773519163e-05, |
|
"loss": 0.0954, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"learning_rate": 1.6608594657375144e-05, |
|
"loss": 0.0956, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 1.5882694541231126e-05, |
|
"loss": 0.0891, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 6.97, |
|
"learning_rate": 1.5156794425087109e-05, |
|
"loss": 0.0935, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_accuracy": 0.7382127046585083, |
|
"eval_loss": 1.6066738367080688, |
|
"eval_runtime": 288.9144, |
|
"eval_samples_per_second": 47.717, |
|
"step": 24108 |
|
}, |
|
{ |
|
"epoch": 7.11, |
|
"learning_rate": 1.443089430894309e-05, |
|
"loss": 0.0559, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 7.26, |
|
"learning_rate": 1.3704994192799073e-05, |
|
"loss": 0.0581, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 7.4, |
|
"learning_rate": 1.2979094076655051e-05, |
|
"loss": 0.06, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 1.2253193960511034e-05, |
|
"loss": 0.055, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 7.69, |
|
"learning_rate": 1.1527293844367016e-05, |
|
"loss": 0.0677, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 7.84, |
|
"learning_rate": 1.0801393728222997e-05, |
|
"loss": 0.0552, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 7.98, |
|
"learning_rate": 1.0075493612078979e-05, |
|
"loss": 0.0642, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.7361816167831421, |
|
"eval_loss": 1.801051378250122, |
|
"eval_runtime": 288.9587, |
|
"eval_samples_per_second": 47.709, |
|
"step": 27552 |
|
}, |
|
{ |
|
"epoch": 8.13, |
|
"learning_rate": 9.34959349593496e-06, |
|
"loss": 0.0399, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 8.28, |
|
"learning_rate": 8.623693379790942e-06, |
|
"loss": 0.0324, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 8.42, |
|
"learning_rate": 7.897793263646923e-06, |
|
"loss": 0.0383, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 8.57, |
|
"learning_rate": 7.1718931475029035e-06, |
|
"loss": 0.0396, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 8.71, |
|
"learning_rate": 6.445993031358885e-06, |
|
"loss": 0.038, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 8.86, |
|
"learning_rate": 5.720092915214867e-06, |
|
"loss": 0.0355, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_accuracy": 0.7436529994010925, |
|
"eval_loss": 1.9809679985046387, |
|
"eval_runtime": 288.9869, |
|
"eval_samples_per_second": 47.705, |
|
"step": 30996 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 4.994192799070848e-06, |
|
"loss": 0.0348, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 9.15, |
|
"learning_rate": 4.26829268292683e-06, |
|
"loss": 0.0194, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 9.29, |
|
"learning_rate": 3.542392566782811e-06, |
|
"loss": 0.0181, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 9.44, |
|
"learning_rate": 2.8164924506387924e-06, |
|
"loss": 0.0158, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 9.58, |
|
"learning_rate": 2.090592334494774e-06, |
|
"loss": 0.0201, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"learning_rate": 1.364692218350755e-06, |
|
"loss": 0.0218, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 9.87, |
|
"learning_rate": 6.387921022067365e-07, |
|
"loss": 0.0203, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy": 0.7445234060287476, |
|
"eval_loss": 2.0729739665985107, |
|
"eval_runtime": 288.9098, |
|
"eval_samples_per_second": 47.717, |
|
"step": 34440 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 34440, |
|
"total_flos": 0, |
|
"train_runtime": 75009.1041, |
|
"train_samples_per_second": 0.459 |
|
} |
|
], |
|
"max_steps": 34440, |
|
"num_train_epochs": 10, |
|
"total_flos": 0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|