|
{ |
|
"best_metric": 0.9099925797674994, |
|
"best_model_checkpoint": "./fine-tune/bert-base-uncased/qqp/checkpoint-34113", |
|
"epoch": 3.0, |
|
"global_step": 34113, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 1.9706856623574592e-05, |
|
"loss": 0.4417, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.9413713247149183e-05, |
|
"loss": 0.3623, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 1.9120569870723774e-05, |
|
"loss": 0.3469, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 1.882742649429836e-05, |
|
"loss": 0.3275, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.8534283117872952e-05, |
|
"loss": 0.315, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 1.8241139741447543e-05, |
|
"loss": 0.3067, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 1.7947996365022134e-05, |
|
"loss": 0.3031, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 1.7654852988596725e-05, |
|
"loss": 0.2986, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"learning_rate": 1.7361709612171312e-05, |
|
"loss": 0.2994, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.7068566235745903e-05, |
|
"loss": 0.2825, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 1.6775422859320497e-05, |
|
"loss": 0.2814, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 1.6482279482895085e-05, |
|
"loss": 0.2746, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 1.6189136106469676e-05, |
|
"loss": 0.2706, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 1.5895992730044266e-05, |
|
"loss": 0.2669, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.5602849353618857e-05, |
|
"loss": 0.264, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 1.5309705977193448e-05, |
|
"loss": 0.2746, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 1.5016562600768037e-05, |
|
"loss": 0.2598, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 1.4723419224342628e-05, |
|
"loss": 0.2585, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 1.4430275847917217e-05, |
|
"loss": 0.2528, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.4137132471491808e-05, |
|
"loss": 0.2501, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 1.3843989095066399e-05, |
|
"loss": 0.2445, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 1.3550845718640988e-05, |
|
"loss": 0.2511, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.8968587682414049, |
|
"eval_combined_score": 0.8805007987294208, |
|
"eval_f1": 0.8641428292174367, |
|
"eval_loss": 0.24689364433288574, |
|
"eval_runtime": 193.2661, |
|
"eval_samples_per_second": 209.193, |
|
"eval_steps_per_second": 26.15, |
|
"step": 11371 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 1.3257702342215579e-05, |
|
"loss": 0.2299, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 1.2964558965790168e-05, |
|
"loss": 0.1843, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.2671415589364759e-05, |
|
"loss": 0.1884, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.14, |
|
"learning_rate": 1.2378272212939351e-05, |
|
"loss": 0.1784, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.19, |
|
"learning_rate": 1.208512883651394e-05, |
|
"loss": 0.1761, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.23, |
|
"learning_rate": 1.1791985460088531e-05, |
|
"loss": 0.1846, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 1.149884208366312e-05, |
|
"loss": 0.188, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.1205698707237711e-05, |
|
"loss": 0.1926, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 1.0912555330812302e-05, |
|
"loss": 0.1854, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 1.0619411954386891e-05, |
|
"loss": 0.1867, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 1.0326268577961482e-05, |
|
"loss": 0.1839, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"learning_rate": 1.0033125201536071e-05, |
|
"loss": 0.1803, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 9.739981825110662e-06, |
|
"loss": 0.1841, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"learning_rate": 9.446838448685253e-06, |
|
"loss": 0.1845, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 9.153695072259843e-06, |
|
"loss": 0.1825, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 8.860551695834433e-06, |
|
"loss": 0.1796, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 8.567408319409023e-06, |
|
"loss": 0.1759, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 8.274264942983614e-06, |
|
"loss": 0.1739, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 7.981121566558205e-06, |
|
"loss": 0.1713, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 7.687978190132794e-06, |
|
"loss": 0.1761, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.89, |
|
"learning_rate": 7.394834813707384e-06, |
|
"loss": 0.1774, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.93, |
|
"learning_rate": 7.101691437281976e-06, |
|
"loss": 0.1792, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 6.808548060856566e-06, |
|
"loss": 0.1763, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.9070986890922582, |
|
"eval_combined_score": 0.8919997902724007, |
|
"eval_f1": 0.8769008914525432, |
|
"eval_loss": 0.2379385530948639, |
|
"eval_runtime": 192.8495, |
|
"eval_samples_per_second": 209.645, |
|
"eval_steps_per_second": 26.207, |
|
"step": 22742 |
|
}, |
|
{ |
|
"epoch": 2.02, |
|
"learning_rate": 6.515404684431156e-06, |
|
"loss": 0.1462, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 2.07, |
|
"learning_rate": 6.222261308005746e-06, |
|
"loss": 0.1244, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 2.11, |
|
"learning_rate": 5.929117931580336e-06, |
|
"loss": 0.118, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"learning_rate": 5.6359745551549274e-06, |
|
"loss": 0.1237, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 5.3428311787295174e-06, |
|
"loss": 0.1216, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 5.049687802304107e-06, |
|
"loss": 0.1242, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"learning_rate": 4.756544425878697e-06, |
|
"loss": 0.1199, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 4.463401049453288e-06, |
|
"loss": 0.1209, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 4.170257673027878e-06, |
|
"loss": 0.1187, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 3.877114296602469e-06, |
|
"loss": 0.1149, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 2.46, |
|
"learning_rate": 3.583970920177059e-06, |
|
"loss": 0.1144, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 3.290827543751649e-06, |
|
"loss": 0.1261, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 2.9976841673262398e-06, |
|
"loss": 0.1227, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 2.59, |
|
"learning_rate": 2.7045407909008298e-06, |
|
"loss": 0.1276, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 2.41139741447542e-06, |
|
"loss": 0.1181, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 2.68, |
|
"learning_rate": 2.1182540380500106e-06, |
|
"loss": 0.1184, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 1.825110661624601e-06, |
|
"loss": 0.1253, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 2.77, |
|
"learning_rate": 1.531967285199191e-06, |
|
"loss": 0.1122, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 2.81, |
|
"learning_rate": 1.2388239087737813e-06, |
|
"loss": 0.1077, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 9.456805323483717e-07, |
|
"loss": 0.1211, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 6.525371559229619e-07, |
|
"loss": 0.1197, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 2.95, |
|
"learning_rate": 3.5939377949755233e-07, |
|
"loss": 0.1148, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 2.99, |
|
"learning_rate": 6.62504030721426e-08, |
|
"loss": 0.1221, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_accuracy": 0.9099925797674994, |
|
"eval_combined_score": 0.8944088968565445, |
|
"eval_f1": 0.8788252139455897, |
|
"eval_loss": 0.2828538715839386, |
|
"eval_runtime": 194.3457, |
|
"eval_samples_per_second": 208.031, |
|
"eval_steps_per_second": 26.005, |
|
"step": 34113 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 34113, |
|
"total_flos": 7.179892878638592e+16, |
|
"train_loss": 0.19735700893404895, |
|
"train_runtime": 13532.9561, |
|
"train_samples_per_second": 80.658, |
|
"train_steps_per_second": 2.521 |
|
} |
|
], |
|
"max_steps": 34113, |
|
"num_train_epochs": 3, |
|
"total_flos": 7.179892878638592e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|