|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9957679860592482, |
|
"eval_steps": 250, |
|
"global_step": 3500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0028450513887407092, |
|
"grad_norm": 0.2809712886810303, |
|
"learning_rate": 4.445629945763315e-08, |
|
"loss": 0.0202, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0056901027774814185, |
|
"grad_norm": 0.2773303985595703, |
|
"learning_rate": 8.89125989152663e-08, |
|
"loss": 0.0184, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.008535154166222128, |
|
"grad_norm": 0.28651171922683716, |
|
"learning_rate": 1.3336889837289946e-07, |
|
"loss": 0.018, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.011380205554962837, |
|
"grad_norm": 0.24912181496620178, |
|
"learning_rate": 1.778251978305326e-07, |
|
"loss": 0.0173, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.014225256943703546, |
|
"grad_norm": 0.2654182016849518, |
|
"learning_rate": 2.2228149728816572e-07, |
|
"loss": 0.0193, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.017070308332444255, |
|
"grad_norm": 0.2645716965198517, |
|
"learning_rate": 2.667377967457989e-07, |
|
"loss": 0.0158, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.019915359721184963, |
|
"grad_norm": 0.28719639778137207, |
|
"learning_rate": 3.1119409620343207e-07, |
|
"loss": 0.016, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.022760411109925674, |
|
"grad_norm": 0.24205148220062256, |
|
"learning_rate": 3.556503956610652e-07, |
|
"loss": 0.0139, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.02560546249866638, |
|
"grad_norm": 0.1541828215122223, |
|
"learning_rate": 4.0010669511869836e-07, |
|
"loss": 0.0143, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.028450513887407092, |
|
"grad_norm": 0.24321648478507996, |
|
"learning_rate": 4.4456299457633145e-07, |
|
"loss": 0.0138, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0312955652761478, |
|
"grad_norm": 0.13496069610118866, |
|
"learning_rate": 4.890192940339646e-07, |
|
"loss": 0.0127, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.03414061666488851, |
|
"grad_norm": 0.1574280858039856, |
|
"learning_rate": 5.334755934915978e-07, |
|
"loss": 0.0115, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.03698566805362922, |
|
"grad_norm": 0.16117185354232788, |
|
"learning_rate": 5.779318929492309e-07, |
|
"loss": 0.0117, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.039830719442369926, |
|
"grad_norm": 0.11417609453201294, |
|
"learning_rate": 6.223881924068641e-07, |
|
"loss": 0.0111, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.04267577083111064, |
|
"grad_norm": 0.13975922763347626, |
|
"learning_rate": 6.668444918644972e-07, |
|
"loss": 0.0111, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.04552082221985135, |
|
"grad_norm": 0.10122673958539963, |
|
"learning_rate": 7.113007913221304e-07, |
|
"loss": 0.0106, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.048365873608592055, |
|
"grad_norm": 0.10620034486055374, |
|
"learning_rate": 7.557570907797635e-07, |
|
"loss": 0.01, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.05121092499733276, |
|
"grad_norm": 0.10404311865568161, |
|
"learning_rate": 8.002133902373967e-07, |
|
"loss": 0.0103, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.05405597638607347, |
|
"grad_norm": 0.09400220960378647, |
|
"learning_rate": 8.446696896950297e-07, |
|
"loss": 0.0106, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.056901027774814185, |
|
"grad_norm": 0.0968412309885025, |
|
"learning_rate": 8.891259891526629e-07, |
|
"loss": 0.0102, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.05974607916355489, |
|
"grad_norm": 0.10527963936328888, |
|
"learning_rate": 9.335822886102961e-07, |
|
"loss": 0.0103, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.0625911305522956, |
|
"grad_norm": 0.11160361021757126, |
|
"learning_rate": 9.780385880679293e-07, |
|
"loss": 0.0109, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.06543618194103631, |
|
"grad_norm": 0.09594683349132538, |
|
"learning_rate": 1.0224948875255625e-06, |
|
"loss": 0.0099, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.06828123332977702, |
|
"grad_norm": 0.09191753715276718, |
|
"learning_rate": 1.0669511869831957e-06, |
|
"loss": 0.0086, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.07112628471851773, |
|
"grad_norm": 0.10616082698106766, |
|
"learning_rate": 1.1114074864408287e-06, |
|
"loss": 0.01, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.07112628471851773, |
|
"eval_loss": 0.04481230676174164, |
|
"eval_runtime": 8.9152, |
|
"eval_samples_per_second": 168.253, |
|
"eval_steps_per_second": 10.544, |
|
"eval_sts_dev_pearson_cosine": 0.7566652114171531, |
|
"eval_sts_dev_pearson_dot": 0.6030696212508195, |
|
"eval_sts_dev_pearson_euclidean": 0.723338411802702, |
|
"eval_sts_dev_pearson_manhattan": 0.7229820157274984, |
|
"eval_sts_dev_pearson_max": 0.7566652114171531, |
|
"eval_sts_dev_spearman_cosine": 0.7641649126837959, |
|
"eval_sts_dev_spearman_dot": 0.5924781561865081, |
|
"eval_sts_dev_spearman_euclidean": 0.7161218303280887, |
|
"eval_sts_dev_spearman_manhattan": 0.7161351439672354, |
|
"eval_sts_dev_spearman_max": 0.7641649126837959, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.07397133610725844, |
|
"grad_norm": 0.09024439752101898, |
|
"learning_rate": 1.1558637858984619e-06, |
|
"loss": 0.0098, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.07681638749599914, |
|
"grad_norm": 0.10580305010080338, |
|
"learning_rate": 1.200320085356095e-06, |
|
"loss": 0.0094, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.07966143888473985, |
|
"grad_norm": 0.07469004392623901, |
|
"learning_rate": 1.2447763848137283e-06, |
|
"loss": 0.0097, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.08250649027348056, |
|
"grad_norm": 0.08532268553972244, |
|
"learning_rate": 1.2892326842713615e-06, |
|
"loss": 0.0094, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.08535154166222128, |
|
"grad_norm": 0.11069530993700027, |
|
"learning_rate": 1.3336889837289944e-06, |
|
"loss": 0.0095, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.08819659305096199, |
|
"grad_norm": 0.14181849360466003, |
|
"learning_rate": 1.3781452831866276e-06, |
|
"loss": 0.0098, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.0910416444397027, |
|
"grad_norm": 0.10291895270347595, |
|
"learning_rate": 1.4226015826442608e-06, |
|
"loss": 0.0092, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.0938866958284434, |
|
"grad_norm": 0.09878888726234436, |
|
"learning_rate": 1.467057882101894e-06, |
|
"loss": 0.0095, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.09673174721718411, |
|
"grad_norm": 0.11313822865486145, |
|
"learning_rate": 1.511514181559527e-06, |
|
"loss": 0.0103, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.09957679860592482, |
|
"grad_norm": 0.10922758281230927, |
|
"learning_rate": 1.5559704810171602e-06, |
|
"loss": 0.0097, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.10242184999466553, |
|
"grad_norm": 0.09477540105581284, |
|
"learning_rate": 1.6004267804747934e-06, |
|
"loss": 0.0091, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.10526690138340623, |
|
"grad_norm": 0.11776648461818695, |
|
"learning_rate": 1.6448830799324264e-06, |
|
"loss": 0.0094, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.10811195277214694, |
|
"grad_norm": 0.10813190042972565, |
|
"learning_rate": 1.6893393793900594e-06, |
|
"loss": 0.0088, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.11095700416088766, |
|
"grad_norm": 0.0988766998052597, |
|
"learning_rate": 1.7337956788476928e-06, |
|
"loss": 0.009, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.11380205554962837, |
|
"grad_norm": 0.11297037452459335, |
|
"learning_rate": 1.7782519783053258e-06, |
|
"loss": 0.0098, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.11664710693836908, |
|
"grad_norm": 0.11383142322301865, |
|
"learning_rate": 1.8227082777629592e-06, |
|
"loss": 0.0083, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.11949215832710978, |
|
"grad_norm": 0.09991955757141113, |
|
"learning_rate": 1.8671645772205922e-06, |
|
"loss": 0.0099, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.12233720971585049, |
|
"grad_norm": 0.12355756759643555, |
|
"learning_rate": 1.911620876678225e-06, |
|
"loss": 0.0094, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.1251822611045912, |
|
"grad_norm": 0.13183994591236115, |
|
"learning_rate": 1.9560771761358586e-06, |
|
"loss": 0.0092, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.1280273124933319, |
|
"grad_norm": 0.09731684625148773, |
|
"learning_rate": 2.0005334755934916e-06, |
|
"loss": 0.009, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.13087236388207263, |
|
"grad_norm": 0.10299917310476303, |
|
"learning_rate": 2.044989775051125e-06, |
|
"loss": 0.0088, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.13371741527081332, |
|
"grad_norm": 0.10299093276262283, |
|
"learning_rate": 2.089446074508758e-06, |
|
"loss": 0.0092, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.13656246665955404, |
|
"grad_norm": 0.10356424003839493, |
|
"learning_rate": 2.1339023739663914e-06, |
|
"loss": 0.0083, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.13940751804829474, |
|
"grad_norm": 0.09914368391036987, |
|
"learning_rate": 2.1783586734240244e-06, |
|
"loss": 0.0089, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.14225256943703546, |
|
"grad_norm": 0.09933792054653168, |
|
"learning_rate": 2.2228149728816573e-06, |
|
"loss": 0.0089, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.14225256943703546, |
|
"eval_loss": 0.044373366981744766, |
|
"eval_runtime": 9.347, |
|
"eval_samples_per_second": 160.479, |
|
"eval_steps_per_second": 10.057, |
|
"eval_sts_dev_pearson_cosine": 0.7648120608553326, |
|
"eval_sts_dev_pearson_dot": 0.5968017368208518, |
|
"eval_sts_dev_pearson_euclidean": 0.7279404616700573, |
|
"eval_sts_dev_pearson_manhattan": 0.7275721595620177, |
|
"eval_sts_dev_pearson_max": 0.7648120608553326, |
|
"eval_sts_dev_spearman_cosine": 0.7725220548635785, |
|
"eval_sts_dev_spearman_dot": 0.5821758472972447, |
|
"eval_sts_dev_spearman_euclidean": 0.7216429612052142, |
|
"eval_sts_dev_spearman_manhattan": 0.7215280543286006, |
|
"eval_sts_dev_spearman_max": 0.7725220548635785, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.14509762082577615, |
|
"grad_norm": 0.09889407455921173, |
|
"learning_rate": 2.2672712723392908e-06, |
|
"loss": 0.0095, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.14794267221451687, |
|
"grad_norm": 0.08651946485042572, |
|
"learning_rate": 2.3117275717969237e-06, |
|
"loss": 0.0095, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.1507877236032576, |
|
"grad_norm": 0.11702941358089447, |
|
"learning_rate": 2.356183871254557e-06, |
|
"loss": 0.0091, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.1536327749919983, |
|
"grad_norm": 0.0989551842212677, |
|
"learning_rate": 2.40064017071219e-06, |
|
"loss": 0.0082, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.156477826380739, |
|
"grad_norm": 0.13362008333206177, |
|
"learning_rate": 2.445096470169823e-06, |
|
"loss": 0.0091, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.1593228777694797, |
|
"grad_norm": 0.09574282169342041, |
|
"learning_rate": 2.4895527696274565e-06, |
|
"loss": 0.0086, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.16216792915822043, |
|
"grad_norm": 0.10629838705062866, |
|
"learning_rate": 2.5340090690850895e-06, |
|
"loss": 0.009, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.16501298054696112, |
|
"grad_norm": 0.09541832655668259, |
|
"learning_rate": 2.578465368542723e-06, |
|
"loss": 0.0088, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.16785803193570184, |
|
"grad_norm": 0.1058318167924881, |
|
"learning_rate": 2.622921668000356e-06, |
|
"loss": 0.0087, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.17070308332444256, |
|
"grad_norm": 0.09181062877178192, |
|
"learning_rate": 2.667377967457989e-06, |
|
"loss": 0.0089, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.17354813471318326, |
|
"grad_norm": 0.1417740136384964, |
|
"learning_rate": 2.7118342669156223e-06, |
|
"loss": 0.009, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.17639318610192398, |
|
"grad_norm": 0.11063350737094879, |
|
"learning_rate": 2.7562905663732553e-06, |
|
"loss": 0.0088, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.17923823749066467, |
|
"grad_norm": 0.09422960132360458, |
|
"learning_rate": 2.8007468658308887e-06, |
|
"loss": 0.0088, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.1820832888794054, |
|
"grad_norm": 0.08875516802072525, |
|
"learning_rate": 2.8452031652885217e-06, |
|
"loss": 0.0081, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.18492834026814609, |
|
"grad_norm": 0.08624540269374847, |
|
"learning_rate": 2.8896594647461547e-06, |
|
"loss": 0.0082, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.1877733916568868, |
|
"grad_norm": 0.09943191707134247, |
|
"learning_rate": 2.934115764203788e-06, |
|
"loss": 0.0088, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.1906184430456275, |
|
"grad_norm": 0.08673301339149475, |
|
"learning_rate": 2.978572063661421e-06, |
|
"loss": 0.0086, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.19346349443436822, |
|
"grad_norm": 0.090702585875988, |
|
"learning_rate": 3.023028363119054e-06, |
|
"loss": 0.0085, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.19630854582310894, |
|
"grad_norm": 0.08411288261413574, |
|
"learning_rate": 3.067484662576687e-06, |
|
"loss": 0.009, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.19915359721184964, |
|
"grad_norm": 0.09590886533260345, |
|
"learning_rate": 3.1119409620343205e-06, |
|
"loss": 0.0083, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.20199864860059036, |
|
"grad_norm": 0.12336103618144989, |
|
"learning_rate": 3.1563972614919534e-06, |
|
"loss": 0.0088, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.20484369998933105, |
|
"grad_norm": 0.08143921196460724, |
|
"learning_rate": 3.200853560949587e-06, |
|
"loss": 0.0088, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.20768875137807177, |
|
"grad_norm": 0.09164416790008545, |
|
"learning_rate": 3.24530986040722e-06, |
|
"loss": 0.0087, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.21053380276681247, |
|
"grad_norm": 0.1068928986787796, |
|
"learning_rate": 3.289766159864853e-06, |
|
"loss": 0.0088, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.2133788541555532, |
|
"grad_norm": 0.08625414967536926, |
|
"learning_rate": 3.3342224593224862e-06, |
|
"loss": 0.008, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.2133788541555532, |
|
"eval_loss": 0.04651115834712982, |
|
"eval_runtime": 9.062, |
|
"eval_samples_per_second": 165.526, |
|
"eval_steps_per_second": 10.373, |
|
"eval_sts_dev_pearson_cosine": 0.769180082275811, |
|
"eval_sts_dev_pearson_dot": 0.5978598585684491, |
|
"eval_sts_dev_pearson_euclidean": 0.7336764223199792, |
|
"eval_sts_dev_pearson_manhattan": 0.7333470603433799, |
|
"eval_sts_dev_pearson_max": 0.769180082275811, |
|
"eval_sts_dev_spearman_cosine": 0.779812353514179, |
|
"eval_sts_dev_spearman_dot": 0.5813102081634336, |
|
"eval_sts_dev_spearman_euclidean": 0.7286880899787377, |
|
"eval_sts_dev_spearman_manhattan": 0.728602343078262, |
|
"eval_sts_dev_spearman_max": 0.779812353514179, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.21622390554429388, |
|
"grad_norm": 0.13600626587867737, |
|
"learning_rate": 3.378678758780119e-06, |
|
"loss": 0.0087, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.2190689569330346, |
|
"grad_norm": 0.14500027894973755, |
|
"learning_rate": 3.423135058237752e-06, |
|
"loss": 0.0087, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.22191400832177532, |
|
"grad_norm": 0.10052921622991562, |
|
"learning_rate": 3.4675913576953856e-06, |
|
"loss": 0.009, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.22475905971051602, |
|
"grad_norm": 0.08056453615427017, |
|
"learning_rate": 3.512047657153019e-06, |
|
"loss": 0.0085, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.22760411109925674, |
|
"grad_norm": 0.08645027875900269, |
|
"learning_rate": 3.5565039566106516e-06, |
|
"loss": 0.009, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.23044916248799743, |
|
"grad_norm": 0.12497828155755997, |
|
"learning_rate": 3.600960256068285e-06, |
|
"loss": 0.0082, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.23329421387673815, |
|
"grad_norm": 0.06854041665792465, |
|
"learning_rate": 3.6454165555259184e-06, |
|
"loss": 0.0073, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.23613926526547885, |
|
"grad_norm": 0.0781393051147461, |
|
"learning_rate": 3.689872854983551e-06, |
|
"loss": 0.0078, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.23898431665421957, |
|
"grad_norm": 0.09048033505678177, |
|
"learning_rate": 3.7343291544411844e-06, |
|
"loss": 0.0088, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.24182936804296026, |
|
"grad_norm": 0.10866343975067139, |
|
"learning_rate": 3.7787854538988178e-06, |
|
"loss": 0.0077, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.24467441943170098, |
|
"grad_norm": 0.08551909774541855, |
|
"learning_rate": 3.82324175335645e-06, |
|
"loss": 0.008, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.2475194708204417, |
|
"grad_norm": 0.10334528237581253, |
|
"learning_rate": 3.867698052814084e-06, |
|
"loss": 0.008, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.2503645222091824, |
|
"grad_norm": 0.10668737441301346, |
|
"learning_rate": 3.912154352271717e-06, |
|
"loss": 0.0086, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.2532095735979231, |
|
"grad_norm": 0.09555123746395111, |
|
"learning_rate": 3.9566106517293506e-06, |
|
"loss": 0.0083, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.2560546249866638, |
|
"grad_norm": 0.09091876447200775, |
|
"learning_rate": 4.001066951186983e-06, |
|
"loss": 0.0081, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.2588996763754045, |
|
"grad_norm": 0.10903967916965485, |
|
"learning_rate": 4.0455232506446165e-06, |
|
"loss": 0.0081, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.26174472776414526, |
|
"grad_norm": 0.0973634123802185, |
|
"learning_rate": 4.08997955010225e-06, |
|
"loss": 0.0077, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.26458977915288595, |
|
"grad_norm": 0.10415139049291611, |
|
"learning_rate": 4.1344358495598825e-06, |
|
"loss": 0.0083, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.26743483054162664, |
|
"grad_norm": 0.07658711075782776, |
|
"learning_rate": 4.178892149017516e-06, |
|
"loss": 0.0081, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.2702798819303674, |
|
"grad_norm": 0.08397387713193893, |
|
"learning_rate": 4.223348448475149e-06, |
|
"loss": 0.0069, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.2731249333191081, |
|
"grad_norm": 0.07437604665756226, |
|
"learning_rate": 4.267804747932783e-06, |
|
"loss": 0.0084, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.2759699847078488, |
|
"grad_norm": 0.08432639390230179, |
|
"learning_rate": 4.312261047390415e-06, |
|
"loss": 0.0075, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.2788150360965895, |
|
"grad_norm": 0.10041897743940353, |
|
"learning_rate": 4.356717346848049e-06, |
|
"loss": 0.0081, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.2816600874853302, |
|
"grad_norm": 0.08802913874387741, |
|
"learning_rate": 4.401173646305682e-06, |
|
"loss": 0.0086, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.2845051388740709, |
|
"grad_norm": 0.10163892060518265, |
|
"learning_rate": 4.445629945763315e-06, |
|
"loss": 0.0079, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2845051388740709, |
|
"eval_loss": 0.047278326004743576, |
|
"eval_runtime": 8.8427, |
|
"eval_samples_per_second": 169.631, |
|
"eval_steps_per_second": 10.63, |
|
"eval_sts_dev_pearson_cosine": 0.7757447848847815, |
|
"eval_sts_dev_pearson_dot": 0.6005493714862752, |
|
"eval_sts_dev_pearson_euclidean": 0.7410592947515149, |
|
"eval_sts_dev_pearson_manhattan": 0.7408111418700313, |
|
"eval_sts_dev_pearson_max": 0.7757447848847815, |
|
"eval_sts_dev_spearman_cosine": 0.78547452833307, |
|
"eval_sts_dev_spearman_dot": 0.582949767142449, |
|
"eval_sts_dev_spearman_euclidean": 0.7377389195713249, |
|
"eval_sts_dev_spearman_manhattan": 0.7378881505306938, |
|
"eval_sts_dev_spearman_max": 0.78547452833307, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2873501902628116, |
|
"grad_norm": 0.08119315654039383, |
|
"learning_rate": 4.490086245220948e-06, |
|
"loss": 0.0088, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.2901952416515523, |
|
"grad_norm": 0.10737801343202591, |
|
"learning_rate": 4.5345425446785815e-06, |
|
"loss": 0.0073, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.29304029304029305, |
|
"grad_norm": 0.08908016234636307, |
|
"learning_rate": 4.578998844136214e-06, |
|
"loss": 0.008, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.29588534442903375, |
|
"grad_norm": 0.08246352523565292, |
|
"learning_rate": 4.6234551435938475e-06, |
|
"loss": 0.0073, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.29873039581777444, |
|
"grad_norm": 0.08022474497556686, |
|
"learning_rate": 4.667911443051481e-06, |
|
"loss": 0.008, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.3015754472065152, |
|
"grad_norm": 0.07938782870769501, |
|
"learning_rate": 4.712367742509114e-06, |
|
"loss": 0.0074, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.3044204985952559, |
|
"grad_norm": 0.07806035876274109, |
|
"learning_rate": 4.756824041966747e-06, |
|
"loss": 0.007, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.3072655499839966, |
|
"grad_norm": 0.08204073458909988, |
|
"learning_rate": 4.80128034142438e-06, |
|
"loss": 0.0075, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.31011060137273727, |
|
"grad_norm": 0.07709292322397232, |
|
"learning_rate": 4.845736640882014e-06, |
|
"loss": 0.0077, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.312955652761478, |
|
"grad_norm": 0.09081903845071793, |
|
"learning_rate": 4.890192940339646e-06, |
|
"loss": 0.0076, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.3158007041502187, |
|
"grad_norm": 0.0967966839671135, |
|
"learning_rate": 4.93464923979728e-06, |
|
"loss": 0.0082, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.3186457555389594, |
|
"grad_norm": 0.07934273034334183, |
|
"learning_rate": 4.979105539254913e-06, |
|
"loss": 0.0073, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.32149080692770016, |
|
"grad_norm": 0.07694579660892487, |
|
"learning_rate": 5.023561838712546e-06, |
|
"loss": 0.007, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.32433585831644085, |
|
"grad_norm": 0.07895845174789429, |
|
"learning_rate": 5.068018138170179e-06, |
|
"loss": 0.0077, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.32718090970518154, |
|
"grad_norm": 0.07453285902738571, |
|
"learning_rate": 5.1124744376278124e-06, |
|
"loss": 0.0074, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.33002596109392224, |
|
"grad_norm": 0.08011069148778915, |
|
"learning_rate": 5.156930737085446e-06, |
|
"loss": 0.0076, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.332871012482663, |
|
"grad_norm": 0.08325564116239548, |
|
"learning_rate": 5.201387036543078e-06, |
|
"loss": 0.0078, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.3357160638714037, |
|
"grad_norm": 0.08292325586080551, |
|
"learning_rate": 5.245843336000712e-06, |
|
"loss": 0.0073, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.3385611152601444, |
|
"grad_norm": 0.08605830371379852, |
|
"learning_rate": 5.290299635458345e-06, |
|
"loss": 0.0077, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.3414061666488851, |
|
"grad_norm": 0.08384672552347183, |
|
"learning_rate": 5.334755934915978e-06, |
|
"loss": 0.0068, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.3442512180376258, |
|
"grad_norm": 0.10930886119604111, |
|
"learning_rate": 5.379212234373611e-06, |
|
"loss": 0.0079, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 0.3470962694263665, |
|
"grad_norm": 0.09999439120292664, |
|
"learning_rate": 5.423668533831245e-06, |
|
"loss": 0.0073, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 0.3499413208151072, |
|
"grad_norm": 0.0711401030421257, |
|
"learning_rate": 5.468124833288877e-06, |
|
"loss": 0.0075, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 0.35278637220384795, |
|
"grad_norm": 0.12141191959381104, |
|
"learning_rate": 5.512581132746511e-06, |
|
"loss": 0.0078, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 0.35563142359258865, |
|
"grad_norm": 0.08158909529447556, |
|
"learning_rate": 5.557037432204144e-06, |
|
"loss": 0.0073, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.35563142359258865, |
|
"eval_loss": 0.04716332256793976, |
|
"eval_runtime": 9.2017, |
|
"eval_samples_per_second": 163.013, |
|
"eval_steps_per_second": 10.215, |
|
"eval_sts_dev_pearson_cosine": 0.7745797315927636, |
|
"eval_sts_dev_pearson_dot": 0.6045460896217866, |
|
"eval_sts_dev_pearson_euclidean": 0.7454842399458257, |
|
"eval_sts_dev_pearson_manhattan": 0.7450723972536097, |
|
"eval_sts_dev_pearson_max": 0.7745797315927636, |
|
"eval_sts_dev_spearman_cosine": 0.7854745787288134, |
|
"eval_sts_dev_spearman_dot": 0.5869807295128947, |
|
"eval_sts_dev_spearman_euclidean": 0.7432930573697278, |
|
"eval_sts_dev_spearman_manhattan": 0.7431996603127268, |
|
"eval_sts_dev_spearman_max": 0.7854745787288134, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.35847647498132934, |
|
"grad_norm": 0.07299906015396118, |
|
"learning_rate": 5.601493731661777e-06, |
|
"loss": 0.0073, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 0.36132152637007003, |
|
"grad_norm": 0.08581911772489548, |
|
"learning_rate": 5.64595003111941e-06, |
|
"loss": 0.007, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 0.3641665777588108, |
|
"grad_norm": 0.08339793235063553, |
|
"learning_rate": 5.690406330577043e-06, |
|
"loss": 0.0068, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 0.3670116291475515, |
|
"grad_norm": 0.08051007241010666, |
|
"learning_rate": 5.734862630034677e-06, |
|
"loss": 0.0067, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 0.36985668053629217, |
|
"grad_norm": 0.08804050087928772, |
|
"learning_rate": 5.779318929492309e-06, |
|
"loss": 0.0078, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.3727017319250329, |
|
"grad_norm": 0.07765672355890274, |
|
"learning_rate": 5.823775228949943e-06, |
|
"loss": 0.0072, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 0.3755467833137736, |
|
"grad_norm": 0.08375009894371033, |
|
"learning_rate": 5.868231528407576e-06, |
|
"loss": 0.0071, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 0.3783918347025143, |
|
"grad_norm": 0.07235526293516159, |
|
"learning_rate": 5.912687827865209e-06, |
|
"loss": 0.0068, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 0.381236886091255, |
|
"grad_norm": 0.08521237969398499, |
|
"learning_rate": 5.957144127322842e-06, |
|
"loss": 0.0068, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 0.38408193747999575, |
|
"grad_norm": 0.08466946333646774, |
|
"learning_rate": 6.001600426780475e-06, |
|
"loss": 0.0074, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.38692698886873644, |
|
"grad_norm": 0.08605194091796875, |
|
"learning_rate": 6.046056726238108e-06, |
|
"loss": 0.0074, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 0.38977204025747714, |
|
"grad_norm": 0.10898251086473465, |
|
"learning_rate": 6.0905130256957415e-06, |
|
"loss": 0.0077, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 0.3926170916462179, |
|
"grad_norm": 0.07759184390306473, |
|
"learning_rate": 6.134969325153374e-06, |
|
"loss": 0.0069, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 0.3954621430349586, |
|
"grad_norm": 0.09418889880180359, |
|
"learning_rate": 6.1794256246110075e-06, |
|
"loss": 0.0079, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 0.3983071944236993, |
|
"grad_norm": 0.07198388129472733, |
|
"learning_rate": 6.223881924068641e-06, |
|
"loss": 0.0066, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.40115224581243997, |
|
"grad_norm": 0.1127256527543068, |
|
"learning_rate": 6.2683382235262735e-06, |
|
"loss": 0.008, |
|
"step": 1410 |
|
}, |
|
{ |
|
"epoch": 0.4039972972011807, |
|
"grad_norm": 0.09208247065544128, |
|
"learning_rate": 6.312794522983907e-06, |
|
"loss": 0.008, |
|
"step": 1420 |
|
}, |
|
{ |
|
"epoch": 0.4068423485899214, |
|
"grad_norm": 0.08608128875494003, |
|
"learning_rate": 6.35725082244154e-06, |
|
"loss": 0.0071, |
|
"step": 1430 |
|
}, |
|
{ |
|
"epoch": 0.4096873999786621, |
|
"grad_norm": 0.09031302481889725, |
|
"learning_rate": 6.401707121899174e-06, |
|
"loss": 0.0066, |
|
"step": 1440 |
|
}, |
|
{ |
|
"epoch": 0.4125324513674028, |
|
"grad_norm": 0.08052125573158264, |
|
"learning_rate": 6.446163421356806e-06, |
|
"loss": 0.0079, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.41537750275614355, |
|
"grad_norm": 0.07241199910640717, |
|
"learning_rate": 6.49061972081444e-06, |
|
"loss": 0.0075, |
|
"step": 1460 |
|
}, |
|
{ |
|
"epoch": 0.41822255414488424, |
|
"grad_norm": 0.10164492577314377, |
|
"learning_rate": 6.535076020272072e-06, |
|
"loss": 0.0066, |
|
"step": 1470 |
|
}, |
|
{ |
|
"epoch": 0.42106760553362493, |
|
"grad_norm": 0.08544593304395676, |
|
"learning_rate": 6.579532319729706e-06, |
|
"loss": 0.007, |
|
"step": 1480 |
|
}, |
|
{ |
|
"epoch": 0.4239126569223657, |
|
"grad_norm": 0.11136358976364136, |
|
"learning_rate": 6.623988619187339e-06, |
|
"loss": 0.0066, |
|
"step": 1490 |
|
}, |
|
{ |
|
"epoch": 0.4267577083111064, |
|
"grad_norm": 0.07907264679670334, |
|
"learning_rate": 6.6684449186449725e-06, |
|
"loss": 0.0066, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.4267577083111064, |
|
"eval_loss": 0.04738680273294449, |
|
"eval_runtime": 8.8516, |
|
"eval_samples_per_second": 169.462, |
|
"eval_steps_per_second": 10.62, |
|
"eval_sts_dev_pearson_cosine": 0.7790001769076627, |
|
"eval_sts_dev_pearson_dot": 0.6181753135926376, |
|
"eval_sts_dev_pearson_euclidean": 0.7499901311425706, |
|
"eval_sts_dev_pearson_manhattan": 0.7495768624913272, |
|
"eval_sts_dev_pearson_max": 0.7790001769076627, |
|
"eval_sts_dev_spearman_cosine": 0.7907671099319872, |
|
"eval_sts_dev_spearman_dot": 0.5999468232206078, |
|
"eval_sts_dev_spearman_euclidean": 0.7486926337135288, |
|
"eval_sts_dev_spearman_manhattan": 0.7484812166973952, |
|
"eval_sts_dev_spearman_max": 0.7907671099319872, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.42960275969984707, |
|
"grad_norm": 0.0868421345949173, |
|
"learning_rate": 6.712901218102606e-06, |
|
"loss": 0.0075, |
|
"step": 1510 |
|
}, |
|
{ |
|
"epoch": 0.43244781108858776, |
|
"grad_norm": 0.08232498168945312, |
|
"learning_rate": 6.757357517560238e-06, |
|
"loss": 0.0072, |
|
"step": 1520 |
|
}, |
|
{ |
|
"epoch": 0.4352928624773285, |
|
"grad_norm": 0.08831491321325302, |
|
"learning_rate": 6.801813817017871e-06, |
|
"loss": 0.0072, |
|
"step": 1530 |
|
}, |
|
{ |
|
"epoch": 0.4381379138660692, |
|
"grad_norm": 0.09035244584083557, |
|
"learning_rate": 6.846270116475504e-06, |
|
"loss": 0.0067, |
|
"step": 1540 |
|
}, |
|
{ |
|
"epoch": 0.4409829652548099, |
|
"grad_norm": 0.09386060386896133, |
|
"learning_rate": 6.890726415933138e-06, |
|
"loss": 0.0073, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.44382801664355065, |
|
"grad_norm": 0.07013905048370361, |
|
"learning_rate": 6.935182715390771e-06, |
|
"loss": 0.0066, |
|
"step": 1560 |
|
}, |
|
{ |
|
"epoch": 0.44667306803229134, |
|
"grad_norm": 0.09007762372493744, |
|
"learning_rate": 6.979639014848405e-06, |
|
"loss": 0.0063, |
|
"step": 1570 |
|
}, |
|
{ |
|
"epoch": 0.44951811942103204, |
|
"grad_norm": 0.08053620904684067, |
|
"learning_rate": 7.024095314306038e-06, |
|
"loss": 0.0074, |
|
"step": 1580 |
|
}, |
|
{ |
|
"epoch": 0.45236317080977273, |
|
"grad_norm": 0.09655388444662094, |
|
"learning_rate": 7.06855161376367e-06, |
|
"loss": 0.0075, |
|
"step": 1590 |
|
}, |
|
{ |
|
"epoch": 0.4552082221985135, |
|
"grad_norm": 0.07398466765880585, |
|
"learning_rate": 7.113007913221303e-06, |
|
"loss": 0.0069, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.4580532735872542, |
|
"grad_norm": 0.08047506213188171, |
|
"learning_rate": 7.1574642126789366e-06, |
|
"loss": 0.0065, |
|
"step": 1610 |
|
}, |
|
{ |
|
"epoch": 0.46089832497599487, |
|
"grad_norm": 0.09212318807840347, |
|
"learning_rate": 7.20192051213657e-06, |
|
"loss": 0.007, |
|
"step": 1620 |
|
}, |
|
{ |
|
"epoch": 0.4637433763647356, |
|
"grad_norm": 0.08342114090919495, |
|
"learning_rate": 7.246376811594203e-06, |
|
"loss": 0.0067, |
|
"step": 1630 |
|
}, |
|
{ |
|
"epoch": 0.4665884277534763, |
|
"grad_norm": 0.06753776967525482, |
|
"learning_rate": 7.290833111051837e-06, |
|
"loss": 0.0067, |
|
"step": 1640 |
|
}, |
|
{ |
|
"epoch": 0.469433479142217, |
|
"grad_norm": 0.06800476461648941, |
|
"learning_rate": 7.33528941050947e-06, |
|
"loss": 0.0072, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.4722785305309577, |
|
"grad_norm": 0.08325930684804916, |
|
"learning_rate": 7.379745709967102e-06, |
|
"loss": 0.007, |
|
"step": 1660 |
|
}, |
|
{ |
|
"epoch": 0.47512358191969845, |
|
"grad_norm": 0.0933527797460556, |
|
"learning_rate": 7.424202009424735e-06, |
|
"loss": 0.0078, |
|
"step": 1670 |
|
}, |
|
{ |
|
"epoch": 0.47796863330843914, |
|
"grad_norm": 0.08404399454593658, |
|
"learning_rate": 7.468658308882369e-06, |
|
"loss": 0.0069, |
|
"step": 1680 |
|
}, |
|
{ |
|
"epoch": 0.48081368469717983, |
|
"grad_norm": 0.08951716870069504, |
|
"learning_rate": 7.513114608340002e-06, |
|
"loss": 0.0067, |
|
"step": 1690 |
|
}, |
|
{ |
|
"epoch": 0.4836587360859205, |
|
"grad_norm": 0.0985400453209877, |
|
"learning_rate": 7.5575709077976356e-06, |
|
"loss": 0.0072, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.4865037874746613, |
|
"grad_norm": 0.08860517293214798, |
|
"learning_rate": 7.602027207255269e-06, |
|
"loss": 0.0071, |
|
"step": 1710 |
|
}, |
|
{ |
|
"epoch": 0.48934883886340197, |
|
"grad_norm": 0.09596813470125198, |
|
"learning_rate": 7.6464835067129e-06, |
|
"loss": 0.0069, |
|
"step": 1720 |
|
}, |
|
{ |
|
"epoch": 0.49219389025214266, |
|
"grad_norm": 0.08169304579496384, |
|
"learning_rate": 7.690939806170534e-06, |
|
"loss": 0.0074, |
|
"step": 1730 |
|
}, |
|
{ |
|
"epoch": 0.4950389416408834, |
|
"grad_norm": 0.08802200853824615, |
|
"learning_rate": 7.735396105628168e-06, |
|
"loss": 0.0073, |
|
"step": 1740 |
|
}, |
|
{ |
|
"epoch": 0.4978839930296241, |
|
"grad_norm": 0.10039868205785751, |
|
"learning_rate": 7.779852405085801e-06, |
|
"loss": 0.0064, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.4978839930296241, |
|
"eval_loss": 0.04991479963064194, |
|
"eval_runtime": 9.5702, |
|
"eval_samples_per_second": 156.737, |
|
"eval_steps_per_second": 9.822, |
|
"eval_sts_dev_pearson_cosine": 0.7809326345755484, |
|
"eval_sts_dev_pearson_dot": 0.6075613489678238, |
|
"eval_sts_dev_pearson_euclidean": 0.7492249179863018, |
|
"eval_sts_dev_pearson_manhattan": 0.748824124010157, |
|
"eval_sts_dev_pearson_max": 0.7809326345755484, |
|
"eval_sts_dev_spearman_cosine": 0.7937663255592566, |
|
"eval_sts_dev_spearman_dot": 0.5920294462594152, |
|
"eval_sts_dev_spearman_euclidean": 0.7488717447673626, |
|
"eval_sts_dev_spearman_manhattan": 0.7486136956534813, |
|
"eval_sts_dev_spearman_max": 0.7937663255592566, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.5007290444183649, |
|
"grad_norm": 0.08260150998830795, |
|
"learning_rate": 7.824308704543434e-06, |
|
"loss": 0.0064, |
|
"step": 1760 |
|
}, |
|
{ |
|
"epoch": 0.5035740958071055, |
|
"grad_norm": 0.05973382294178009, |
|
"learning_rate": 7.868765004001068e-06, |
|
"loss": 0.0068, |
|
"step": 1770 |
|
}, |
|
{ |
|
"epoch": 0.5064191471958462, |
|
"grad_norm": 0.08796348422765732, |
|
"learning_rate": 7.913221303458701e-06, |
|
"loss": 0.007, |
|
"step": 1780 |
|
}, |
|
{ |
|
"epoch": 0.5092641985845869, |
|
"grad_norm": 0.10702888667583466, |
|
"learning_rate": 7.957677602916333e-06, |
|
"loss": 0.0065, |
|
"step": 1790 |
|
}, |
|
{ |
|
"epoch": 0.5121092499733276, |
|
"grad_norm": 0.07452105730772018, |
|
"learning_rate": 8.002133902373966e-06, |
|
"loss": 0.0073, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.5149543013620683, |
|
"grad_norm": 0.07454142719507217, |
|
"learning_rate": 8.0465902018316e-06, |
|
"loss": 0.0061, |
|
"step": 1810 |
|
}, |
|
{ |
|
"epoch": 0.517799352750809, |
|
"grad_norm": 0.08079402148723602, |
|
"learning_rate": 8.091046501289233e-06, |
|
"loss": 0.0071, |
|
"step": 1820 |
|
}, |
|
{ |
|
"epoch": 0.5206444041395498, |
|
"grad_norm": 0.05563436076045036, |
|
"learning_rate": 8.135502800746867e-06, |
|
"loss": 0.0058, |
|
"step": 1830 |
|
}, |
|
{ |
|
"epoch": 0.5234894555282905, |
|
"grad_norm": 0.08133077621459961, |
|
"learning_rate": 8.1799591002045e-06, |
|
"loss": 0.0065, |
|
"step": 1840 |
|
}, |
|
{ |
|
"epoch": 0.5263345069170312, |
|
"grad_norm": 0.06776826083660126, |
|
"learning_rate": 8.224415399662133e-06, |
|
"loss": 0.0067, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.5291795583057719, |
|
"grad_norm": 0.07137738913297653, |
|
"learning_rate": 8.268871699119765e-06, |
|
"loss": 0.0063, |
|
"step": 1860 |
|
}, |
|
{ |
|
"epoch": 0.5320246096945126, |
|
"grad_norm": 0.08924838155508041, |
|
"learning_rate": 8.313327998577398e-06, |
|
"loss": 0.007, |
|
"step": 1870 |
|
}, |
|
{ |
|
"epoch": 0.5348696610832533, |
|
"grad_norm": 0.10980788618326187, |
|
"learning_rate": 8.357784298035032e-06, |
|
"loss": 0.0069, |
|
"step": 1880 |
|
}, |
|
{ |
|
"epoch": 0.537714712471994, |
|
"grad_norm": 0.10096590220928192, |
|
"learning_rate": 8.402240597492665e-06, |
|
"loss": 0.0073, |
|
"step": 1890 |
|
}, |
|
{ |
|
"epoch": 0.5405597638607348, |
|
"grad_norm": 0.06633611768484116, |
|
"learning_rate": 8.446696896950299e-06, |
|
"loss": 0.0067, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.5434048152494755, |
|
"grad_norm": 0.0696336105465889, |
|
"learning_rate": 8.491153196407932e-06, |
|
"loss": 0.0068, |
|
"step": 1910 |
|
}, |
|
{ |
|
"epoch": 0.5462498666382162, |
|
"grad_norm": 0.0785793736577034, |
|
"learning_rate": 8.535609495865565e-06, |
|
"loss": 0.0066, |
|
"step": 1920 |
|
}, |
|
{ |
|
"epoch": 0.5490949180269569, |
|
"grad_norm": 0.10194658488035202, |
|
"learning_rate": 8.580065795323197e-06, |
|
"loss": 0.007, |
|
"step": 1930 |
|
}, |
|
{ |
|
"epoch": 0.5519399694156976, |
|
"grad_norm": 0.07388205081224442, |
|
"learning_rate": 8.62452209478083e-06, |
|
"loss": 0.006, |
|
"step": 1940 |
|
}, |
|
{ |
|
"epoch": 0.5547850208044383, |
|
"grad_norm": 0.09130030125379562, |
|
"learning_rate": 8.668978394238464e-06, |
|
"loss": 0.0062, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.557630072193179, |
|
"grad_norm": 0.06957433372735977, |
|
"learning_rate": 8.713434693696097e-06, |
|
"loss": 0.0062, |
|
"step": 1960 |
|
}, |
|
{ |
|
"epoch": 0.5604751235819196, |
|
"grad_norm": 0.07989591360092163, |
|
"learning_rate": 8.75789099315373e-06, |
|
"loss": 0.0067, |
|
"step": 1970 |
|
}, |
|
{ |
|
"epoch": 0.5633201749706604, |
|
"grad_norm": 0.06442303210496902, |
|
"learning_rate": 8.802347292611364e-06, |
|
"loss": 0.0063, |
|
"step": 1980 |
|
}, |
|
{ |
|
"epoch": 0.5661652263594011, |
|
"grad_norm": 0.0740804597735405, |
|
"learning_rate": 8.846803592068996e-06, |
|
"loss": 0.006, |
|
"step": 1990 |
|
}, |
|
{ |
|
"epoch": 0.5690102777481418, |
|
"grad_norm": 0.09938943386077881, |
|
"learning_rate": 8.89125989152663e-06, |
|
"loss": 0.0067, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.5690102777481418, |
|
"eval_loss": 0.04781487584114075, |
|
"eval_runtime": 9.1274, |
|
"eval_samples_per_second": 164.341, |
|
"eval_steps_per_second": 10.299, |
|
"eval_sts_dev_pearson_cosine": 0.7835655100602283, |
|
"eval_sts_dev_pearson_dot": 0.6140666203682124, |
|
"eval_sts_dev_pearson_euclidean": 0.7549529684333918, |
|
"eval_sts_dev_pearson_manhattan": 0.7544779011570287, |
|
"eval_sts_dev_pearson_max": 0.7835655100602283, |
|
"eval_sts_dev_spearman_cosine": 0.7943462202168604, |
|
"eval_sts_dev_spearman_dot": 0.5976362907741744, |
|
"eval_sts_dev_spearman_euclidean": 0.7553189959320907, |
|
"eval_sts_dev_spearman_manhattan": 0.7548879863357982, |
|
"eval_sts_dev_spearman_max": 0.7943462202168604, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.5718553291368825, |
|
"grad_norm": 0.08203410357236862, |
|
"learning_rate": 8.935716190984263e-06, |
|
"loss": 0.0076, |
|
"step": 2010 |
|
}, |
|
{ |
|
"epoch": 0.5747003805256232, |
|
"grad_norm": 0.06707337498664856, |
|
"learning_rate": 8.980172490441896e-06, |
|
"loss": 0.0069, |
|
"step": 2020 |
|
}, |
|
{ |
|
"epoch": 0.5775454319143639, |
|
"grad_norm": 0.07895516604185104, |
|
"learning_rate": 9.02462878989953e-06, |
|
"loss": 0.0065, |
|
"step": 2030 |
|
}, |
|
{ |
|
"epoch": 0.5803904833031046, |
|
"grad_norm": 0.08560437709093094, |
|
"learning_rate": 9.069085089357163e-06, |
|
"loss": 0.007, |
|
"step": 2040 |
|
}, |
|
{ |
|
"epoch": 0.5832355346918454, |
|
"grad_norm": 0.07935027033090591, |
|
"learning_rate": 9.113541388814796e-06, |
|
"loss": 0.006, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 0.5860805860805861, |
|
"grad_norm": 0.07242047786712646, |
|
"learning_rate": 9.157997688272428e-06, |
|
"loss": 0.0064, |
|
"step": 2060 |
|
}, |
|
{ |
|
"epoch": 0.5889256374693268, |
|
"grad_norm": 0.067719466984272, |
|
"learning_rate": 9.202453987730062e-06, |
|
"loss": 0.0063, |
|
"step": 2070 |
|
}, |
|
{ |
|
"epoch": 0.5917706888580675, |
|
"grad_norm": 0.06705283373594284, |
|
"learning_rate": 9.246910287187695e-06, |
|
"loss": 0.0067, |
|
"step": 2080 |
|
}, |
|
{ |
|
"epoch": 0.5946157402468082, |
|
"grad_norm": 0.07053534686565399, |
|
"learning_rate": 9.291366586645328e-06, |
|
"loss": 0.0064, |
|
"step": 2090 |
|
}, |
|
{ |
|
"epoch": 0.5974607916355489, |
|
"grad_norm": 0.07750871032476425, |
|
"learning_rate": 9.335822886102962e-06, |
|
"loss": 0.0062, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.6003058430242896, |
|
"grad_norm": 0.0752708688378334, |
|
"learning_rate": 9.380279185560595e-06, |
|
"loss": 0.0063, |
|
"step": 2110 |
|
}, |
|
{ |
|
"epoch": 0.6031508944130304, |
|
"grad_norm": 0.06802825629711151, |
|
"learning_rate": 9.424735485018229e-06, |
|
"loss": 0.0063, |
|
"step": 2120 |
|
}, |
|
{ |
|
"epoch": 0.6059959458017711, |
|
"grad_norm": 0.08155028522014618, |
|
"learning_rate": 9.46919178447586e-06, |
|
"loss": 0.0074, |
|
"step": 2130 |
|
}, |
|
{ |
|
"epoch": 0.6088409971905118, |
|
"grad_norm": 0.09067688137292862, |
|
"learning_rate": 9.513648083933494e-06, |
|
"loss": 0.0067, |
|
"step": 2140 |
|
}, |
|
{ |
|
"epoch": 0.6116860485792525, |
|
"grad_norm": 0.06688214093446732, |
|
"learning_rate": 9.558104383391127e-06, |
|
"loss": 0.006, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 0.6145310999679932, |
|
"grad_norm": 0.07848970592021942, |
|
"learning_rate": 9.60256068284876e-06, |
|
"loss": 0.0062, |
|
"step": 2160 |
|
}, |
|
{ |
|
"epoch": 0.6173761513567338, |
|
"grad_norm": 0.07614806294441223, |
|
"learning_rate": 9.647016982306394e-06, |
|
"loss": 0.007, |
|
"step": 2170 |
|
}, |
|
{ |
|
"epoch": 0.6202212027454745, |
|
"grad_norm": 0.07507840543985367, |
|
"learning_rate": 9.691473281764027e-06, |
|
"loss": 0.0069, |
|
"step": 2180 |
|
}, |
|
{ |
|
"epoch": 0.6230662541342153, |
|
"grad_norm": 0.08833298087120056, |
|
"learning_rate": 9.73592958122166e-06, |
|
"loss": 0.007, |
|
"step": 2190 |
|
}, |
|
{ |
|
"epoch": 0.625911305522956, |
|
"grad_norm": 0.07082614302635193, |
|
"learning_rate": 9.780385880679292e-06, |
|
"loss": 0.0065, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.6287563569116967, |
|
"grad_norm": 0.0823511853814125, |
|
"learning_rate": 9.824842180136926e-06, |
|
"loss": 0.0071, |
|
"step": 2210 |
|
}, |
|
{ |
|
"epoch": 0.6316014083004374, |
|
"grad_norm": 0.06206200271844864, |
|
"learning_rate": 9.86929847959456e-06, |
|
"loss": 0.007, |
|
"step": 2220 |
|
}, |
|
{ |
|
"epoch": 0.6344464596891781, |
|
"grad_norm": 0.0808667540550232, |
|
"learning_rate": 9.913754779052193e-06, |
|
"loss": 0.0064, |
|
"step": 2230 |
|
}, |
|
{ |
|
"epoch": 0.6372915110779188, |
|
"grad_norm": 0.06442642211914062, |
|
"learning_rate": 9.958211078509826e-06, |
|
"loss": 0.0061, |
|
"step": 2240 |
|
}, |
|
{ |
|
"epoch": 0.6401365624666595, |
|
"grad_norm": 0.07416217774152756, |
|
"learning_rate": 1.000266737796746e-05, |
|
"loss": 0.0062, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.6401365624666595, |
|
"eval_loss": 0.04642986133694649, |
|
"eval_runtime": 9.625, |
|
"eval_samples_per_second": 155.844, |
|
"eval_steps_per_second": 9.766, |
|
"eval_sts_dev_pearson_cosine": 0.7855623274467606, |
|
"eval_sts_dev_pearson_dot": 0.5924008322873175, |
|
"eval_sts_dev_pearson_euclidean": 0.7567766160311133, |
|
"eval_sts_dev_pearson_manhattan": 0.7563225529205405, |
|
"eval_sts_dev_pearson_max": 0.7855623274467606, |
|
"eval_sts_dev_spearman_cosine": 0.7934736270810029, |
|
"eval_sts_dev_spearman_dot": 0.5798196487719647, |
|
"eval_sts_dev_spearman_euclidean": 0.7571185706646215, |
|
"eval_sts_dev_spearman_manhattan": 0.7567665724312764, |
|
"eval_sts_dev_spearman_max": 0.7934736270810029, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.6429816138554003, |
|
"grad_norm": 0.09135396778583527, |
|
"learning_rate": 1.0047123677425091e-05, |
|
"loss": 0.0069, |
|
"step": 2260 |
|
}, |
|
{ |
|
"epoch": 0.645826665244141, |
|
"grad_norm": 0.08974805474281311, |
|
"learning_rate": 1.0091579976882725e-05, |
|
"loss": 0.0062, |
|
"step": 2270 |
|
}, |
|
{ |
|
"epoch": 0.6486717166328817, |
|
"grad_norm": 0.07147160917520523, |
|
"learning_rate": 1.0136036276340358e-05, |
|
"loss": 0.0063, |
|
"step": 2280 |
|
}, |
|
{ |
|
"epoch": 0.6515167680216224, |
|
"grad_norm": 0.0677226111292839, |
|
"learning_rate": 1.0180492575797991e-05, |
|
"loss": 0.0063, |
|
"step": 2290 |
|
}, |
|
{ |
|
"epoch": 0.6543618194103631, |
|
"grad_norm": 0.06699724495410919, |
|
"learning_rate": 1.0224948875255625e-05, |
|
"loss": 0.006, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.6572068707991038, |
|
"grad_norm": 0.07739193737506866, |
|
"learning_rate": 1.0269405174713258e-05, |
|
"loss": 0.0064, |
|
"step": 2310 |
|
}, |
|
{ |
|
"epoch": 0.6600519221878445, |
|
"grad_norm": 0.04876289144158363, |
|
"learning_rate": 1.0313861474170892e-05, |
|
"loss": 0.0061, |
|
"step": 2320 |
|
}, |
|
{ |
|
"epoch": 0.6628969735765853, |
|
"grad_norm": 0.0824015662074089, |
|
"learning_rate": 1.0358317773628523e-05, |
|
"loss": 0.0065, |
|
"step": 2330 |
|
}, |
|
{ |
|
"epoch": 0.665742024965326, |
|
"grad_norm": 0.06627832353115082, |
|
"learning_rate": 1.0402774073086157e-05, |
|
"loss": 0.0061, |
|
"step": 2340 |
|
}, |
|
{ |
|
"epoch": 0.6685870763540667, |
|
"grad_norm": 0.0926406979560852, |
|
"learning_rate": 1.044723037254379e-05, |
|
"loss": 0.0067, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 0.6714321277428074, |
|
"grad_norm": 0.06944271177053452, |
|
"learning_rate": 1.0491686672001424e-05, |
|
"loss": 0.0066, |
|
"step": 2360 |
|
}, |
|
{ |
|
"epoch": 0.674277179131548, |
|
"grad_norm": 0.07248842716217041, |
|
"learning_rate": 1.0536142971459057e-05, |
|
"loss": 0.0068, |
|
"step": 2370 |
|
}, |
|
{ |
|
"epoch": 0.6771222305202887, |
|
"grad_norm": 0.07560228556394577, |
|
"learning_rate": 1.058059927091669e-05, |
|
"loss": 0.0071, |
|
"step": 2380 |
|
}, |
|
{ |
|
"epoch": 0.6799672819090294, |
|
"grad_norm": 0.07752121984958649, |
|
"learning_rate": 1.0625055570374324e-05, |
|
"loss": 0.0064, |
|
"step": 2390 |
|
}, |
|
{ |
|
"epoch": 0.6828123332977702, |
|
"grad_norm": 0.091914102435112, |
|
"learning_rate": 1.0669511869831956e-05, |
|
"loss": 0.0064, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.6856573846865109, |
|
"grad_norm": 0.08635730296373367, |
|
"learning_rate": 1.0713968169289589e-05, |
|
"loss": 0.0064, |
|
"step": 2410 |
|
}, |
|
{ |
|
"epoch": 0.6885024360752516, |
|
"grad_norm": 0.06619139015674591, |
|
"learning_rate": 1.0758424468747222e-05, |
|
"loss": 0.0064, |
|
"step": 2420 |
|
}, |
|
{ |
|
"epoch": 0.6913474874639923, |
|
"grad_norm": 0.08025142550468445, |
|
"learning_rate": 1.0802880768204856e-05, |
|
"loss": 0.0062, |
|
"step": 2430 |
|
}, |
|
{ |
|
"epoch": 0.694192538852733, |
|
"grad_norm": 0.07627130299806595, |
|
"learning_rate": 1.084733706766249e-05, |
|
"loss": 0.0067, |
|
"step": 2440 |
|
}, |
|
{ |
|
"epoch": 0.6970375902414737, |
|
"grad_norm": 0.0778108760714531, |
|
"learning_rate": 1.0891793367120123e-05, |
|
"loss": 0.0062, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 0.6998826416302144, |
|
"grad_norm": 0.06801705807447433, |
|
"learning_rate": 1.0936249666577754e-05, |
|
"loss": 0.0059, |
|
"step": 2460 |
|
}, |
|
{ |
|
"epoch": 0.7027276930189551, |
|
"grad_norm": 0.06568338721990585, |
|
"learning_rate": 1.0980705966035388e-05, |
|
"loss": 0.0063, |
|
"step": 2470 |
|
}, |
|
{ |
|
"epoch": 0.7055727444076959, |
|
"grad_norm": 0.07497888058423996, |
|
"learning_rate": 1.1025162265493021e-05, |
|
"loss": 0.0055, |
|
"step": 2480 |
|
}, |
|
{ |
|
"epoch": 0.7084177957964366, |
|
"grad_norm": 0.0807732567191124, |
|
"learning_rate": 1.1069618564950655e-05, |
|
"loss": 0.0074, |
|
"step": 2490 |
|
}, |
|
{ |
|
"epoch": 0.7112628471851773, |
|
"grad_norm": 0.070890873670578, |
|
"learning_rate": 1.1114074864408288e-05, |
|
"loss": 0.0064, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.7112628471851773, |
|
"eval_loss": 0.04881654307246208, |
|
"eval_runtime": 9.0782, |
|
"eval_samples_per_second": 165.231, |
|
"eval_steps_per_second": 10.354, |
|
"eval_sts_dev_pearson_cosine": 0.7833801585647451, |
|
"eval_sts_dev_pearson_dot": 0.6099273388035109, |
|
"eval_sts_dev_pearson_euclidean": 0.7555637908097597, |
|
"eval_sts_dev_pearson_manhattan": 0.755336974018684, |
|
"eval_sts_dev_pearson_max": 0.7833801585647451, |
|
"eval_sts_dev_spearman_cosine": 0.7938690229216494, |
|
"eval_sts_dev_spearman_dot": 0.5921480251455608, |
|
"eval_sts_dev_spearman_euclidean": 0.7551889084831053, |
|
"eval_sts_dev_spearman_manhattan": 0.7549767859265626, |
|
"eval_sts_dev_spearman_max": 0.7938690229216494, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.714107898573918, |
|
"grad_norm": 0.06863918155431747, |
|
"learning_rate": 1.1158531163865921e-05, |
|
"loss": 0.006, |
|
"step": 2510 |
|
}, |
|
{ |
|
"epoch": 0.7169529499626587, |
|
"grad_norm": 0.07372977584600449, |
|
"learning_rate": 1.1202987463323555e-05, |
|
"loss": 0.0061, |
|
"step": 2520 |
|
}, |
|
{ |
|
"epoch": 0.7197980013513994, |
|
"grad_norm": 0.06173211336135864, |
|
"learning_rate": 1.1247443762781187e-05, |
|
"loss": 0.0064, |
|
"step": 2530 |
|
}, |
|
{ |
|
"epoch": 0.7226430527401401, |
|
"grad_norm": 0.0746549442410469, |
|
"learning_rate": 1.129190006223882e-05, |
|
"loss": 0.0059, |
|
"step": 2540 |
|
}, |
|
{ |
|
"epoch": 0.7254881041288809, |
|
"grad_norm": 0.07160431891679764, |
|
"learning_rate": 1.1336356361696453e-05, |
|
"loss": 0.0064, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 0.7283331555176216, |
|
"grad_norm": 0.07895245403051376, |
|
"learning_rate": 1.1380812661154087e-05, |
|
"loss": 0.0061, |
|
"step": 2560 |
|
}, |
|
{ |
|
"epoch": 0.7311782069063623, |
|
"grad_norm": 0.06770409643650055, |
|
"learning_rate": 1.142526896061172e-05, |
|
"loss": 0.0062, |
|
"step": 2570 |
|
}, |
|
{ |
|
"epoch": 0.734023258295103, |
|
"grad_norm": 0.07894620299339294, |
|
"learning_rate": 1.1469725260069354e-05, |
|
"loss": 0.0068, |
|
"step": 2580 |
|
}, |
|
{ |
|
"epoch": 0.7368683096838436, |
|
"grad_norm": 0.07674427330493927, |
|
"learning_rate": 1.1514181559526985e-05, |
|
"loss": 0.0061, |
|
"step": 2590 |
|
}, |
|
{ |
|
"epoch": 0.7397133610725843, |
|
"grad_norm": 0.09392740577459335, |
|
"learning_rate": 1.1558637858984619e-05, |
|
"loss": 0.0065, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.742558412461325, |
|
"grad_norm": 0.06918327510356903, |
|
"learning_rate": 1.1603094158442252e-05, |
|
"loss": 0.0055, |
|
"step": 2610 |
|
}, |
|
{ |
|
"epoch": 0.7454034638500658, |
|
"grad_norm": 0.0910848081111908, |
|
"learning_rate": 1.1647550457899886e-05, |
|
"loss": 0.0057, |
|
"step": 2620 |
|
}, |
|
{ |
|
"epoch": 0.7482485152388065, |
|
"grad_norm": 0.06995029747486115, |
|
"learning_rate": 1.1692006757357519e-05, |
|
"loss": 0.0064, |
|
"step": 2630 |
|
}, |
|
{ |
|
"epoch": 0.7510935666275472, |
|
"grad_norm": 0.06808440387248993, |
|
"learning_rate": 1.1736463056815152e-05, |
|
"loss": 0.0056, |
|
"step": 2640 |
|
}, |
|
{ |
|
"epoch": 0.7539386180162879, |
|
"grad_norm": 0.06680841743946075, |
|
"learning_rate": 1.1780919356272784e-05, |
|
"loss": 0.0059, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 0.7567836694050286, |
|
"grad_norm": 0.07254023104906082, |
|
"learning_rate": 1.1825375655730417e-05, |
|
"loss": 0.0059, |
|
"step": 2660 |
|
}, |
|
{ |
|
"epoch": 0.7596287207937693, |
|
"grad_norm": 0.06777305901050568, |
|
"learning_rate": 1.1869831955188051e-05, |
|
"loss": 0.0064, |
|
"step": 2670 |
|
}, |
|
{ |
|
"epoch": 0.76247377218251, |
|
"grad_norm": 0.07384537160396576, |
|
"learning_rate": 1.1914288254645684e-05, |
|
"loss": 0.0067, |
|
"step": 2680 |
|
}, |
|
{ |
|
"epoch": 0.7653188235712508, |
|
"grad_norm": 0.07408931851387024, |
|
"learning_rate": 1.1958744554103318e-05, |
|
"loss": 0.0062, |
|
"step": 2690 |
|
}, |
|
{ |
|
"epoch": 0.7681638749599915, |
|
"grad_norm": 0.0712476596236229, |
|
"learning_rate": 1.200320085356095e-05, |
|
"loss": 0.0056, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.7710089263487322, |
|
"grad_norm": 0.07831274718046188, |
|
"learning_rate": 1.2047657153018583e-05, |
|
"loss": 0.0063, |
|
"step": 2710 |
|
}, |
|
{ |
|
"epoch": 0.7738539777374729, |
|
"grad_norm": 0.08914618194103241, |
|
"learning_rate": 1.2092113452476216e-05, |
|
"loss": 0.0064, |
|
"step": 2720 |
|
}, |
|
{ |
|
"epoch": 0.7766990291262136, |
|
"grad_norm": 0.05413926765322685, |
|
"learning_rate": 1.213656975193385e-05, |
|
"loss": 0.0063, |
|
"step": 2730 |
|
}, |
|
{ |
|
"epoch": 0.7795440805149543, |
|
"grad_norm": 0.07458141446113586, |
|
"learning_rate": 1.2181026051391483e-05, |
|
"loss": 0.0062, |
|
"step": 2740 |
|
}, |
|
{ |
|
"epoch": 0.782389131903695, |
|
"grad_norm": 0.06790082156658173, |
|
"learning_rate": 1.2225482350849116e-05, |
|
"loss": 0.0058, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 0.782389131903695, |
|
"eval_loss": 0.047921594232320786, |
|
"eval_runtime": 9.1598, |
|
"eval_samples_per_second": 163.759, |
|
"eval_steps_per_second": 10.262, |
|
"eval_sts_dev_pearson_cosine": 0.7890077878318924, |
|
"eval_sts_dev_pearson_dot": 0.6145569918459842, |
|
"eval_sts_dev_pearson_euclidean": 0.7622940567617165, |
|
"eval_sts_dev_pearson_manhattan": 0.7619872460429895, |
|
"eval_sts_dev_pearson_max": 0.7890077878318924, |
|
"eval_sts_dev_spearman_cosine": 0.7986960426836712, |
|
"eval_sts_dev_spearman_dot": 0.5995859874295417, |
|
"eval_sts_dev_spearman_euclidean": 0.7620718720832805, |
|
"eval_sts_dev_spearman_manhattan": 0.7619175969681303, |
|
"eval_sts_dev_spearman_max": 0.7986960426836712, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 0.7852341832924358, |
|
"grad_norm": 0.07279020547866821, |
|
"learning_rate": 1.2269938650306748e-05, |
|
"loss": 0.0063, |
|
"step": 2760 |
|
}, |
|
{ |
|
"epoch": 0.7880792346811765, |
|
"grad_norm": 0.07638818025588989, |
|
"learning_rate": 1.2314394949764382e-05, |
|
"loss": 0.0061, |
|
"step": 2770 |
|
}, |
|
{ |
|
"epoch": 0.7909242860699172, |
|
"grad_norm": 0.07741549611091614, |
|
"learning_rate": 1.2358851249222015e-05, |
|
"loss": 0.0059, |
|
"step": 2780 |
|
}, |
|
{ |
|
"epoch": 0.7937693374586579, |
|
"grad_norm": 0.07450976967811584, |
|
"learning_rate": 1.2403307548679648e-05, |
|
"loss": 0.0061, |
|
"step": 2790 |
|
}, |
|
{ |
|
"epoch": 0.7966143888473985, |
|
"grad_norm": 0.07069046795368195, |
|
"learning_rate": 1.2447763848137282e-05, |
|
"loss": 0.0059, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.7994594402361392, |
|
"grad_norm": 0.07043927907943726, |
|
"learning_rate": 1.2492220147594915e-05, |
|
"loss": 0.0058, |
|
"step": 2810 |
|
}, |
|
{ |
|
"epoch": 0.8023044916248799, |
|
"grad_norm": 0.06363347172737122, |
|
"learning_rate": 1.2536676447052547e-05, |
|
"loss": 0.0057, |
|
"step": 2820 |
|
}, |
|
{ |
|
"epoch": 0.8051495430136207, |
|
"grad_norm": 0.0633586049079895, |
|
"learning_rate": 1.2581132746510182e-05, |
|
"loss": 0.0059, |
|
"step": 2830 |
|
}, |
|
{ |
|
"epoch": 0.8079945944023614, |
|
"grad_norm": 0.0656353160738945, |
|
"learning_rate": 1.2625589045967814e-05, |
|
"loss": 0.0058, |
|
"step": 2840 |
|
}, |
|
{ |
|
"epoch": 0.8108396457911021, |
|
"grad_norm": 0.05306802690029144, |
|
"learning_rate": 1.2670045345425447e-05, |
|
"loss": 0.0068, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 0.8136846971798428, |
|
"grad_norm": 0.06369970738887787, |
|
"learning_rate": 1.271450164488308e-05, |
|
"loss": 0.006, |
|
"step": 2860 |
|
}, |
|
{ |
|
"epoch": 0.8165297485685835, |
|
"grad_norm": 0.059002261608839035, |
|
"learning_rate": 1.2758957944340712e-05, |
|
"loss": 0.0058, |
|
"step": 2870 |
|
}, |
|
{ |
|
"epoch": 0.8193747999573242, |
|
"grad_norm": 0.06396970897912979, |
|
"learning_rate": 1.2803414243798347e-05, |
|
"loss": 0.0061, |
|
"step": 2880 |
|
}, |
|
{ |
|
"epoch": 0.8222198513460649, |
|
"grad_norm": 0.06522241234779358, |
|
"learning_rate": 1.2847870543255979e-05, |
|
"loss": 0.0058, |
|
"step": 2890 |
|
}, |
|
{ |
|
"epoch": 0.8250649027348056, |
|
"grad_norm": 0.07420309633016586, |
|
"learning_rate": 1.2892326842713613e-05, |
|
"loss": 0.0055, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.8279099541235464, |
|
"grad_norm": 0.0638047531247139, |
|
"learning_rate": 1.2936783142171246e-05, |
|
"loss": 0.006, |
|
"step": 2910 |
|
}, |
|
{ |
|
"epoch": 0.8307550055122871, |
|
"grad_norm": 0.06835135072469711, |
|
"learning_rate": 1.298123944162888e-05, |
|
"loss": 0.0063, |
|
"step": 2920 |
|
}, |
|
{ |
|
"epoch": 0.8336000569010278, |
|
"grad_norm": 0.07669410854578018, |
|
"learning_rate": 1.3025695741086513e-05, |
|
"loss": 0.0066, |
|
"step": 2930 |
|
}, |
|
{ |
|
"epoch": 0.8364451082897685, |
|
"grad_norm": 0.0799371749162674, |
|
"learning_rate": 1.3070152040544144e-05, |
|
"loss": 0.0059, |
|
"step": 2940 |
|
}, |
|
{ |
|
"epoch": 0.8392901596785092, |
|
"grad_norm": 0.06452161073684692, |
|
"learning_rate": 1.311460834000178e-05, |
|
"loss": 0.0056, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 0.8421352110672499, |
|
"grad_norm": 0.08192815631628036, |
|
"learning_rate": 1.3159064639459411e-05, |
|
"loss": 0.006, |
|
"step": 2960 |
|
}, |
|
{ |
|
"epoch": 0.8449802624559906, |
|
"grad_norm": 0.08512122929096222, |
|
"learning_rate": 1.3203520938917045e-05, |
|
"loss": 0.0058, |
|
"step": 2970 |
|
}, |
|
{ |
|
"epoch": 0.8478253138447314, |
|
"grad_norm": 0.05736185237765312, |
|
"learning_rate": 1.3247977238374678e-05, |
|
"loss": 0.006, |
|
"step": 2980 |
|
}, |
|
{ |
|
"epoch": 0.8506703652334721, |
|
"grad_norm": 0.056353483349084854, |
|
"learning_rate": 1.329243353783231e-05, |
|
"loss": 0.0056, |
|
"step": 2990 |
|
}, |
|
{ |
|
"epoch": 0.8535154166222128, |
|
"grad_norm": 0.07176116853952408, |
|
"learning_rate": 1.3336889837289945e-05, |
|
"loss": 0.0062, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.8535154166222128, |
|
"eval_loss": 0.05112998187541962, |
|
"eval_runtime": 8.8822, |
|
"eval_samples_per_second": 168.877, |
|
"eval_steps_per_second": 10.583, |
|
"eval_sts_dev_pearson_cosine": 0.7899474758938174, |
|
"eval_sts_dev_pearson_dot": 0.601477379904751, |
|
"eval_sts_dev_pearson_euclidean": 0.7631393093085868, |
|
"eval_sts_dev_pearson_manhattan": 0.762660331965514, |
|
"eval_sts_dev_pearson_max": 0.7899474758938174, |
|
"eval_sts_dev_spearman_cosine": 0.7996334507041041, |
|
"eval_sts_dev_spearman_dot": 0.5888332407477099, |
|
"eval_sts_dev_spearman_euclidean": 0.7632786368314162, |
|
"eval_sts_dev_spearman_manhattan": 0.7624223584443209, |
|
"eval_sts_dev_spearman_max": 0.7996334507041041, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.8563604680109534, |
|
"grad_norm": 0.07095087319612503, |
|
"learning_rate": 1.3381346136747577e-05, |
|
"loss": 0.0059, |
|
"step": 3010 |
|
}, |
|
{ |
|
"epoch": 0.8592055193996941, |
|
"grad_norm": 0.06647340953350067, |
|
"learning_rate": 1.3425802436205212e-05, |
|
"loss": 0.0064, |
|
"step": 3020 |
|
}, |
|
{ |
|
"epoch": 0.8620505707884348, |
|
"grad_norm": 0.069040946662426, |
|
"learning_rate": 1.3470258735662843e-05, |
|
"loss": 0.0064, |
|
"step": 3030 |
|
}, |
|
{ |
|
"epoch": 0.8648956221771755, |
|
"grad_norm": 0.05208707973361015, |
|
"learning_rate": 1.3514715035120475e-05, |
|
"loss": 0.006, |
|
"step": 3040 |
|
}, |
|
{ |
|
"epoch": 0.8677406735659163, |
|
"grad_norm": 0.058777816593647, |
|
"learning_rate": 1.355917133457811e-05, |
|
"loss": 0.0059, |
|
"step": 3050 |
|
}, |
|
{ |
|
"epoch": 0.870585724954657, |
|
"grad_norm": 0.0731450691819191, |
|
"learning_rate": 1.3603627634035742e-05, |
|
"loss": 0.0055, |
|
"step": 3060 |
|
}, |
|
{ |
|
"epoch": 0.8734307763433977, |
|
"grad_norm": 0.08497700840234756, |
|
"learning_rate": 1.3648083933493377e-05, |
|
"loss": 0.0056, |
|
"step": 3070 |
|
}, |
|
{ |
|
"epoch": 0.8762758277321384, |
|
"grad_norm": 0.06076115369796753, |
|
"learning_rate": 1.3692540232951009e-05, |
|
"loss": 0.0058, |
|
"step": 3080 |
|
}, |
|
{ |
|
"epoch": 0.8791208791208791, |
|
"grad_norm": 0.05804910510778427, |
|
"learning_rate": 1.3736996532408644e-05, |
|
"loss": 0.0057, |
|
"step": 3090 |
|
}, |
|
{ |
|
"epoch": 0.8819659305096198, |
|
"grad_norm": 0.06687209010124207, |
|
"learning_rate": 1.3781452831866276e-05, |
|
"loss": 0.0058, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 0.8848109818983605, |
|
"grad_norm": 0.06274023652076721, |
|
"learning_rate": 1.3825909131323907e-05, |
|
"loss": 0.0062, |
|
"step": 3110 |
|
}, |
|
{ |
|
"epoch": 0.8876560332871013, |
|
"grad_norm": 0.06041685864329338, |
|
"learning_rate": 1.3870365430781542e-05, |
|
"loss": 0.0058, |
|
"step": 3120 |
|
}, |
|
{ |
|
"epoch": 0.890501084675842, |
|
"grad_norm": 0.07634738087654114, |
|
"learning_rate": 1.3914821730239174e-05, |
|
"loss": 0.0058, |
|
"step": 3130 |
|
}, |
|
{ |
|
"epoch": 0.8933461360645827, |
|
"grad_norm": 0.05441463738679886, |
|
"learning_rate": 1.395927802969681e-05, |
|
"loss": 0.0055, |
|
"step": 3140 |
|
}, |
|
{ |
|
"epoch": 0.8961911874533234, |
|
"grad_norm": 0.08034619688987732, |
|
"learning_rate": 1.4003734329154441e-05, |
|
"loss": 0.0056, |
|
"step": 3150 |
|
}, |
|
{ |
|
"epoch": 0.8990362388420641, |
|
"grad_norm": 0.05668621510267258, |
|
"learning_rate": 1.4048190628612076e-05, |
|
"loss": 0.0055, |
|
"step": 3160 |
|
}, |
|
{ |
|
"epoch": 0.9018812902308048, |
|
"grad_norm": 0.0647374764084816, |
|
"learning_rate": 1.4092646928069708e-05, |
|
"loss": 0.0054, |
|
"step": 3170 |
|
}, |
|
{ |
|
"epoch": 0.9047263416195455, |
|
"grad_norm": 0.06342489272356033, |
|
"learning_rate": 1.413710322752734e-05, |
|
"loss": 0.0059, |
|
"step": 3180 |
|
}, |
|
{ |
|
"epoch": 0.9075713930082863, |
|
"grad_norm": 0.05244365334510803, |
|
"learning_rate": 1.4181559526984975e-05, |
|
"loss": 0.0056, |
|
"step": 3190 |
|
}, |
|
{ |
|
"epoch": 0.910416444397027, |
|
"grad_norm": 0.06412120163440704, |
|
"learning_rate": 1.4226015826442606e-05, |
|
"loss": 0.0057, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 0.9132614957857677, |
|
"grad_norm": 0.06194351613521576, |
|
"learning_rate": 1.4270472125900241e-05, |
|
"loss": 0.0055, |
|
"step": 3210 |
|
}, |
|
{ |
|
"epoch": 0.9161065471745083, |
|
"grad_norm": 0.06416637450456619, |
|
"learning_rate": 1.4314928425357873e-05, |
|
"loss": 0.0061, |
|
"step": 3220 |
|
}, |
|
{ |
|
"epoch": 0.918951598563249, |
|
"grad_norm": 0.05956665053963661, |
|
"learning_rate": 1.4359384724815508e-05, |
|
"loss": 0.0055, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 0.9217966499519897, |
|
"grad_norm": 0.055366478860378265, |
|
"learning_rate": 1.440384102427314e-05, |
|
"loss": 0.0062, |
|
"step": 3240 |
|
}, |
|
{ |
|
"epoch": 0.9246417013407304, |
|
"grad_norm": 0.065581776201725, |
|
"learning_rate": 1.4448297323730772e-05, |
|
"loss": 0.006, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 0.9246417013407304, |
|
"eval_loss": 0.050821732729673386, |
|
"eval_runtime": 9.0965, |
|
"eval_samples_per_second": 164.898, |
|
"eval_steps_per_second": 10.334, |
|
"eval_sts_dev_pearson_cosine": 0.7909511151327651, |
|
"eval_sts_dev_pearson_dot": 0.6053394742010075, |
|
"eval_sts_dev_pearson_euclidean": 0.7648214403760886, |
|
"eval_sts_dev_pearson_manhattan": 0.7646742286120747, |
|
"eval_sts_dev_pearson_max": 0.7909511151327651, |
|
"eval_sts_dev_spearman_cosine": 0.7988801832352788, |
|
"eval_sts_dev_spearman_dot": 0.5877204065924448, |
|
"eval_sts_dev_spearman_euclidean": 0.7657836560350672, |
|
"eval_sts_dev_spearman_manhattan": 0.7656908568548414, |
|
"eval_sts_dev_spearman_max": 0.7988801832352788, |
|
"step": 3250 |
|
}, |
|
{ |
|
"epoch": 0.9274867527294712, |
|
"grad_norm": 0.07194357365369797, |
|
"learning_rate": 1.4492753623188407e-05, |
|
"loss": 0.0058, |
|
"step": 3260 |
|
}, |
|
{ |
|
"epoch": 0.9303318041182119, |
|
"grad_norm": 0.07017621397972107, |
|
"learning_rate": 1.4537209922646039e-05, |
|
"loss": 0.0053, |
|
"step": 3270 |
|
}, |
|
{ |
|
"epoch": 0.9331768555069526, |
|
"grad_norm": 0.0723465159535408, |
|
"learning_rate": 1.4581666222103674e-05, |
|
"loss": 0.0064, |
|
"step": 3280 |
|
}, |
|
{ |
|
"epoch": 0.9360219068956933, |
|
"grad_norm": 0.07010278105735779, |
|
"learning_rate": 1.4626122521561305e-05, |
|
"loss": 0.006, |
|
"step": 3290 |
|
}, |
|
{ |
|
"epoch": 0.938866958284434, |
|
"grad_norm": 0.06073617562651634, |
|
"learning_rate": 1.467057882101894e-05, |
|
"loss": 0.0057, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 0.9417120096731747, |
|
"grad_norm": 0.05822043493390083, |
|
"learning_rate": 1.4715035120476572e-05, |
|
"loss": 0.0059, |
|
"step": 3310 |
|
}, |
|
{ |
|
"epoch": 0.9445570610619154, |
|
"grad_norm": 0.05081350356340408, |
|
"learning_rate": 1.4759491419934204e-05, |
|
"loss": 0.0057, |
|
"step": 3320 |
|
}, |
|
{ |
|
"epoch": 0.9474021124506561, |
|
"grad_norm": 0.0619865357875824, |
|
"learning_rate": 1.4803947719391839e-05, |
|
"loss": 0.0056, |
|
"step": 3330 |
|
}, |
|
{ |
|
"epoch": 0.9502471638393969, |
|
"grad_norm": 0.06353365629911423, |
|
"learning_rate": 1.484840401884947e-05, |
|
"loss": 0.0056, |
|
"step": 3340 |
|
}, |
|
{ |
|
"epoch": 0.9530922152281376, |
|
"grad_norm": 0.07539238780736923, |
|
"learning_rate": 1.4892860318307106e-05, |
|
"loss": 0.0061, |
|
"step": 3350 |
|
}, |
|
{ |
|
"epoch": 0.9559372666168783, |
|
"grad_norm": 0.06994681805372238, |
|
"learning_rate": 1.4937316617764737e-05, |
|
"loss": 0.0053, |
|
"step": 3360 |
|
}, |
|
{ |
|
"epoch": 0.958782318005619, |
|
"grad_norm": 0.05879216268658638, |
|
"learning_rate": 1.498177291722237e-05, |
|
"loss": 0.0056, |
|
"step": 3370 |
|
}, |
|
{ |
|
"epoch": 0.9616273693943597, |
|
"grad_norm": 0.0628664419054985, |
|
"learning_rate": 1.5026229216680004e-05, |
|
"loss": 0.006, |
|
"step": 3380 |
|
}, |
|
{ |
|
"epoch": 0.9644724207831004, |
|
"grad_norm": 0.07300525158643723, |
|
"learning_rate": 1.5070685516137636e-05, |
|
"loss": 0.0066, |
|
"step": 3390 |
|
}, |
|
{ |
|
"epoch": 0.967317472171841, |
|
"grad_norm": 0.09258022159337997, |
|
"learning_rate": 1.5115141815595271e-05, |
|
"loss": 0.0062, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 0.9701625235605819, |
|
"grad_norm": 0.05411943048238754, |
|
"learning_rate": 1.5159598115052903e-05, |
|
"loss": 0.0053, |
|
"step": 3410 |
|
}, |
|
{ |
|
"epoch": 0.9730075749493226, |
|
"grad_norm": 0.06785852462053299, |
|
"learning_rate": 1.5204054414510538e-05, |
|
"loss": 0.0062, |
|
"step": 3420 |
|
}, |
|
{ |
|
"epoch": 0.9758526263380632, |
|
"grad_norm": 0.04845158010721207, |
|
"learning_rate": 1.524851071396817e-05, |
|
"loss": 0.0057, |
|
"step": 3430 |
|
}, |
|
{ |
|
"epoch": 0.9786976777268039, |
|
"grad_norm": 0.07520108669996262, |
|
"learning_rate": 1.52929670134258e-05, |
|
"loss": 0.0059, |
|
"step": 3440 |
|
}, |
|
{ |
|
"epoch": 0.9815427291155446, |
|
"grad_norm": 0.0539853572845459, |
|
"learning_rate": 1.5337423312883436e-05, |
|
"loss": 0.0061, |
|
"step": 3450 |
|
}, |
|
{ |
|
"epoch": 0.9843877805042853, |
|
"grad_norm": 0.0653446614742279, |
|
"learning_rate": 1.5381879612341068e-05, |
|
"loss": 0.0057, |
|
"step": 3460 |
|
}, |
|
{ |
|
"epoch": 0.987232831893026, |
|
"grad_norm": 0.0627661645412445, |
|
"learning_rate": 1.5426335911798703e-05, |
|
"loss": 0.0054, |
|
"step": 3470 |
|
}, |
|
{ |
|
"epoch": 0.9900778832817668, |
|
"grad_norm": 0.05211208760738373, |
|
"learning_rate": 1.5470792211256335e-05, |
|
"loss": 0.0054, |
|
"step": 3480 |
|
}, |
|
{ |
|
"epoch": 0.9929229346705075, |
|
"grad_norm": 0.06061340123414993, |
|
"learning_rate": 1.551524851071397e-05, |
|
"loss": 0.0057, |
|
"step": 3490 |
|
}, |
|
{ |
|
"epoch": 0.9957679860592482, |
|
"grad_norm": 0.06133125722408295, |
|
"learning_rate": 1.5559704810171602e-05, |
|
"loss": 0.0056, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.9957679860592482, |
|
"eval_loss": 0.04846322536468506, |
|
"eval_runtime": 9.4808, |
|
"eval_samples_per_second": 158.214, |
|
"eval_steps_per_second": 9.915, |
|
"eval_sts_dev_pearson_cosine": 0.7873863893875668, |
|
"eval_sts_dev_pearson_dot": 0.590451098760234, |
|
"eval_sts_dev_pearson_euclidean": 0.7578432355248655, |
|
"eval_sts_dev_pearson_manhattan": 0.7576106133017201, |
|
"eval_sts_dev_pearson_max": 0.7873863893875668, |
|
"eval_sts_dev_spearman_cosine": 0.7957546813665759, |
|
"eval_sts_dev_spearman_dot": 0.5781029293331612, |
|
"eval_sts_dev_spearman_euclidean": 0.7559256690552184, |
|
"eval_sts_dev_spearman_manhattan": 0.7557621348675576, |
|
"eval_sts_dev_spearman_max": 0.7957546813665759, |
|
"step": 3500 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 7028, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 3500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|