|
{ |
|
"best_metric": 2.2508649826049805, |
|
"best_model_checkpoint": "hBERTv1_new_pretrain_w_init_48_ver2_stsb/checkpoint-540", |
|
"epoch": 11.0, |
|
"eval_steps": 500, |
|
"global_step": 990, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.733333333333334e-05, |
|
"loss": 2.3716, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_combined_score": 0.09954876427078817, |
|
"eval_loss": 2.4198310375213623, |
|
"eval_pearson": 0.1235081678469039, |
|
"eval_runtime": 7.919, |
|
"eval_samples_per_second": 189.417, |
|
"eval_spearmanr": 0.07558936069467244, |
|
"eval_steps_per_second": 3.031, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 3.466666666666667e-05, |
|
"loss": 2.1648, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_combined_score": 0.059901129208988935, |
|
"eval_loss": 2.421804904937744, |
|
"eval_pearson": 0.05915702497074951, |
|
"eval_runtime": 7.9228, |
|
"eval_samples_per_second": 189.328, |
|
"eval_spearmanr": 0.060645233447228365, |
|
"eval_steps_per_second": 3.029, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 2.1915, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_combined_score": 0.10509199818892286, |
|
"eval_loss": 2.5305275917053223, |
|
"eval_pearson": 0.11429814840247612, |
|
"eval_runtime": 7.9275, |
|
"eval_samples_per_second": 189.214, |
|
"eval_spearmanr": 0.09588584797536959, |
|
"eval_steps_per_second": 3.027, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 2.9333333333333333e-05, |
|
"loss": 2.1855, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_combined_score": 0.10434278350873159, |
|
"eval_loss": 2.4912118911743164, |
|
"eval_pearson": 0.11175276310573931, |
|
"eval_runtime": 7.9356, |
|
"eval_samples_per_second": 189.022, |
|
"eval_spearmanr": 0.09693280391172387, |
|
"eval_steps_per_second": 3.024, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 2.1858, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_combined_score": 0.10867629190279143, |
|
"eval_loss": 2.353940963745117, |
|
"eval_pearson": 0.11303164261998537, |
|
"eval_runtime": 7.8971, |
|
"eval_samples_per_second": 189.944, |
|
"eval_spearmanr": 0.10432094118559751, |
|
"eval_steps_per_second": 3.039, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 2.4e-05, |
|
"loss": 2.1818, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_combined_score": 0.12661347630524494, |
|
"eval_loss": 2.2508649826049805, |
|
"eval_pearson": 0.12848032160953568, |
|
"eval_runtime": 7.8875, |
|
"eval_samples_per_second": 190.175, |
|
"eval_spearmanr": 0.12474663100095418, |
|
"eval_steps_per_second": 3.043, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 2.1333333333333335e-05, |
|
"loss": 2.2562, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_combined_score": 0.10085775212511634, |
|
"eval_loss": 2.330181360244751, |
|
"eval_pearson": 0.10432624437463486, |
|
"eval_runtime": 7.8945, |
|
"eval_samples_per_second": 190.005, |
|
"eval_spearmanr": 0.09738925987559782, |
|
"eval_steps_per_second": 3.04, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.866666666666667e-05, |
|
"loss": 2.2299, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_combined_score": 0.17028955228719306, |
|
"eval_loss": 2.3748996257781982, |
|
"eval_pearson": 0.1984074571487062, |
|
"eval_runtime": 7.8956, |
|
"eval_samples_per_second": 189.98, |
|
"eval_spearmanr": 0.14217164742567992, |
|
"eval_steps_per_second": 3.04, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 2.0676, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_combined_score": 0.13143073261190957, |
|
"eval_loss": 2.3883156776428223, |
|
"eval_pearson": 0.12998858260155782, |
|
"eval_runtime": 7.9009, |
|
"eval_samples_per_second": 189.851, |
|
"eval_spearmanr": 0.13287288262226135, |
|
"eval_steps_per_second": 3.038, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.926, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_combined_score": 0.12458804638022844, |
|
"eval_loss": 2.588383674621582, |
|
"eval_pearson": 0.12590757541676778, |
|
"eval_runtime": 7.9206, |
|
"eval_samples_per_second": 189.38, |
|
"eval_spearmanr": 0.1232685173436891, |
|
"eval_steps_per_second": 3.03, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 1.0666666666666667e-05, |
|
"loss": 1.7701, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_combined_score": 0.1984576696603884, |
|
"eval_loss": 2.3776161670684814, |
|
"eval_pearson": 0.19105961985541245, |
|
"eval_runtime": 7.9395, |
|
"eval_samples_per_second": 188.928, |
|
"eval_spearmanr": 0.20585571946536435, |
|
"eval_steps_per_second": 3.023, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"step": 990, |
|
"total_flos": 9365417243967488.0, |
|
"train_loss": 2.139166490959399, |
|
"train_runtime": 1123.3813, |
|
"train_samples_per_second": 76.764, |
|
"train_steps_per_second": 1.202 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 1350, |
|
"num_train_epochs": 15, |
|
"save_steps": 500, |
|
"total_flos": 9365417243967488.0, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|