Muennighoff commited on
Commit
7ce2897
1 Parent(s): 3eabd75

Better model with bs=1024

Browse files
README.md CHANGED
@@ -14,7 +14,7 @@ For usage instructions, refer to our codebase: https://github.com/Muennighoff/sg
14
 
15
  ## Evaluation Results
16
 
17
- For eval results, refer to our paper: https://arxiv.org/abs/2202.08904
18
 
19
  ## Training
20
  The model was trained with the parameters:
 
14
 
15
  ## Evaluation Results
16
 
17
+ For eval results, refer to the eval folder or our paper: https://arxiv.org/abs/2202.08904
18
 
19
  ## Training
20
  The model was trained with the parameters:
config.json CHANGED
@@ -67,7 +67,7 @@
67
  },
68
  "tokenizer_class": "GPT2Tokenizer",
69
  "torch_dtype": "float32",
70
- "transformers_version": "4.11.3",
71
  "use_cache": true,
72
  "vocab_size": 50257,
73
  "window_size": 256
 
67
  },
68
  "tokenizer_class": "GPT2Tokenizer",
69
  "torch_dtype": "float32",
70
+ "transformers_version": "4.20.0.dev0",
71
  "use_cache": true,
72
  "vocab_size": 50257,
73
  "window_size": 256
config_sentence_transformers.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "__version__": {
3
  "sentence_transformers": "2.1.0",
4
- "transformers": "4.11.3",
5
- "pytorch": "1.10.1"
6
  }
7
  }
 
1
  {
2
  "__version__": {
3
  "sentence_transformers": "2.1.0",
4
+ "transformers": "4.20.0.dev0",
5
+ "pytorch": "1.10.2"
6
  }
7
  }
eval/SGPT-1.3B-weightedmean-nli-bitfit_weightedmean_layer-1_results_average_precision.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "askubuntu": 55.97,
3
+ "cqadupstack": 13.47,
4
+ "twitterpara": 73.06,
5
+ "scidocs": 72.77,
6
+ "avg": 53.817499999999995
7
+ }
eval/SGPT-1.3B-weightedmean-nli-bitfit_weightedmean_layer-1_results_detailed.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "askubuntu": {
3
+ "map_askubuntu_title": 55.97,
4
+ "p@1_askubuntu_title": 52.69,
5
+ "p@5_askubuntu_title": 41.94,
6
+ "mrr_askubuntu_title": 68.08
7
+ },
8
+ "cqadupstack": {
9
+ "map@100_cqadupstack_unix": 11.88,
10
+ "ndcg@10_cqadupstack_unix": 13.74,
11
+ "map@100_cqadupstack_gaming": 29.28,
12
+ "ndcg@10_cqadupstack_gaming": 31.75,
13
+ "map@100_cqadupstack_wordpress": 4.34,
14
+ "ndcg@10_cqadupstack_wordpress": 5.12,
15
+ "map@100_cqadupstack_stats": 14.68,
16
+ "ndcg@10_cqadupstack_stats": 16.12,
17
+ "map@100_cqadupstack_tex": 8.04,
18
+ "ndcg@10_cqadupstack_tex": 8.43,
19
+ "map@100_cqadupstack_english": 14.07,
20
+ "ndcg@10_cqadupstack_english": 15.59,
21
+ "map@100_cqadupstack_programmers": 10.78,
22
+ "ndcg@10_cqadupstack_programmers": 11.23,
23
+ "map@100_cqadupstack_mathematica": 10.74,
24
+ "ndcg@10_cqadupstack_mathematica": 12.57,
25
+ "map@100_cqadupstack_physics": 16.04,
26
+ "ndcg@10_cqadupstack_physics": 17.58,
27
+ "map@100_cqadupstack_gis": 14.81,
28
+ "ndcg@10_cqadupstack_gis": 16.19,
29
+ "map@100_cqadupstack_webmasters": 9.84,
30
+ "ndcg@10_cqadupstack_webmasters": 10.36,
31
+ "map@100_cqadupstack_android": 17.19,
32
+ "ndcg@10_cqadupstack_android": 19.08,
33
+ "map@100_cqadupstack_avg": 13.47,
34
+ "ndcg@10_cqadupstack_avg": 14.81
35
+ },
36
+ "twitterpara": {
37
+ "ap_twitter_twitterurl": 75.43,
38
+ "spearman_twitter_twitterurl": 70.6,
39
+ "ap_twitter_pit": 70.69,
40
+ "spearman_twitter_pit": 55.71,
41
+ "ap_twitter_avg": 73.06,
42
+ "spearman_twitter_avg": 63.15
43
+ },
44
+ "scidocs": {
45
+ "map_scidocs_cite_euclidean": 70.1,
46
+ "ndcg_scidocs_cite_euclidean": 85.17,
47
+ "map_scidocs_cite_cosine": 70.1,
48
+ "ndcg_scidocs_cite_cosine": 85.17,
49
+ "map_scidocs_cocite_euclidean": 72.87,
50
+ "ndcg_scidocs_cocite_euclidean": 86.72,
51
+ "map_scidocs_cocite_cosine": 72.87,
52
+ "ndcg_scidocs_cocite_cosine": 86.72,
53
+ "map_scidocs_coview_euclidean": 74.95,
54
+ "ndcg_scidocs_coview_euclidean": 87.03,
55
+ "map_scidocs_coview_cosine": 74.95,
56
+ "ndcg_scidocs_coview_cosine": 87.03,
57
+ "map_scidocs_coread_euclidean": 73.15,
58
+ "ndcg_scidocs_coread_euclidean": 86.15,
59
+ "map_scidocs_coread_cosine": 73.15,
60
+ "ndcg_scidocs_coread_cosine": 86.15,
61
+ "map_scidocs_euclidean_avg": 72.77,
62
+ "ndcg_scidocs_euclidean_avg": 86.27,
63
+ "map_scidocs_cosine_avg": 72.77,
64
+ "ndcg_scidocs_cosine_avg": 86.27
65
+ }
66
+ }
eval/quora.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"SGPT-1.3B-weightedmean-nli-bitfit": {"quora": {"NDCG@1": 0.7423, "NDCG@3": 0.78936, "NDCG@5": 0.80689, "NDCG@10": 0.8233, "NDCG@100": 0.84217, "NDCG@1000": 0.84504}}}
eval/similarity_evaluation_sts-dev_results.csv CHANGED
@@ -1,12 +1,12 @@
1
  epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
2
- 0,9394,0.8500051645470013,0.8600409992174228,0.8579350434999757,0.8600080490167477,0.8586118552167996,0.8617469203235228,0.7608058149670837,0.7604381511210038
3
- 0,18788,0.8562983277057296,0.864162204908486,0.8601312730843966,0.8625133301050996,0.8607488280811342,0.8639575286106531,0.7681104787314248,0.7692631002716936
4
- 0,28182,0.8596813841030053,0.8687182133907144,0.8650055343107589,0.8670291606236576,0.8650238007146754,0.8675525969279471,0.7675989378226494,0.7676807617837957
5
- 0,37576,0.8595723621060155,0.8675970222255094,0.861715020113807,0.8640127475099701,0.861838780152411,0.8644000217985848,0.7728457765100621,0.773343754333348
6
- 0,46970,0.856244430423873,0.8645709470622736,0.8597323280358731,0.8618667620432376,0.8600822588592132,0.8623420951114836,0.7626314124714687,0.7623843446349111
7
- 0,56364,0.8579760976747293,0.8660924375572862,0.8599734576636705,0.8622999064702692,0.859912580570416,0.8627634405979882,0.766185614913061,0.7654656190033244
8
- 0,65758,0.8563700376553489,0.8640453133687431,0.8560378589043434,0.8584665194831709,0.8563104181535777,0.8593896873800454,0.7665079970107405,0.7670426983834987
9
- 0,75152,0.8539522613570749,0.861797559148505,0.8540374976709845,0.8565108633292812,0.8544216492584971,0.8574800081262123,0.7586828062237492,0.7589404988419063
10
- 0,84546,0.8549441896255541,0.8623555737684722,0.855739468423625,0.8579737664740775,0.856250066298839,0.8588377457180055,0.7605760142582496,0.7611254932323566
11
- 0,93940,0.8558261413510759,0.8631677398093837,0.8554252531228967,0.8579164265768554,0.8556445631559064,0.8586445373996172,0.7619674147821671,0.7625495920799111
12
- 0,-1,0.8558252500080686,0.8631688981731724,0.8554246079578192,0.8578830653438679,0.8556419096769361,0.8586410249416773,0.7619578890991296,0.7625334230296067
 
1
  epoch,steps,cosine_pearson,cosine_spearman,euclidean_pearson,euclidean_spearman,manhattan_pearson,manhattan_spearman,dot_pearson,dot_spearman
2
+ 0,440,0.8486729726641167,0.8514738189278961,0.8515245319252214,0.8509027260070884,0.8540666757323956,0.8537290162137693,0.7630773568229123,0.7600358216618573
3
+ 0,880,0.860418177580361,0.8656263802961531,0.859274657391518,0.8610670106016408,0.8614574446998846,0.8635698806563004,0.775971737698288,0.7738330802938131
4
+ 0,1320,0.8646586808912774,0.8708047895637789,0.8600953037639533,0.8630166525165047,0.8623501857502746,0.8655507432332792,0.774750540444686,0.7725645527279972
5
+ 0,1760,0.8628892948335536,0.8689022188557769,0.8611001993825963,0.8627626879284295,0.8634677606209161,0.8653777840391851,0.7684411984607661,0.7660388115697093
6
+ 0,2200,0.8623264709023419,0.8684038857716583,0.8599140937391133,0.8623163529776595,0.8621011652259446,0.8648639001116789,0.7658429961100489,0.7630158219390278
7
+ 0,2640,0.8633934631804789,0.8700248299507874,0.8583066084846345,0.8610312177479946,0.8604516944303623,0.8632651971720137,0.7708747899267866,0.7695161449303083
8
+ 0,3080,0.864875512993908,0.8708072415227665,0.8570051556310841,0.8598815222390387,0.8592210627649711,0.8622960844035745,0.7675942152106912,0.765566539001796
9
+ 0,3520,0.8664358867199037,0.8717878560785026,0.8577414799031283,0.8608674339514554,0.8600366294063135,0.8633069107239323,0.7678182184536243,0.7655516315181986
10
+ 0,3960,0.8660615852729263,0.8715945872618516,0.8580011946328364,0.8608961297007961,0.8603219261222281,0.8635694785207915,0.7658642808961628,0.7634980873963996
11
+ 0,4400,0.8660032674381255,0.8715157046451364,0.8576564771305891,0.8606430352200829,0.8599938864592154,0.8633518022139872,0.7663838558727445,0.7645750276413869
12
+ 0,-1,0.865995009654422,0.8715109608696208,0.857644450885013,0.8606063092160902,0.8599858692389015,0.8633254320890273,0.7663788803033962,0.7645777465044731
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:680ca23f56ceb56df5a6f87bca667020fbc9d6b12bdfd456b4f49a1687833e67
3
- size 5363096833
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e05f11a38a0abb8e97717b109e2d346a74cf1244e419ac5659416d9874487c8
3
+ size 5363081601
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1 +1 @@
1
- {"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "model_max_length": 2048, "special_tokens_map_file": null, "name_or_path": "EleutherAI/gpt-neo-1.3B", "errors": "replace", "tokenizer_class": "GPT2Tokenizer"}
 
1
+ {"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "model_max_length": 2048, "special_tokens_map_file": null, "name_or_path": "EleutherAI/gpt-neo-1.3B", "errors": "replace", "pad_token": null, "add_bos_token": false, "tokenizer_class": "GPT2Tokenizer"}