shuttie commited on
Commit
50af302
0 Parent(s):

initial commit

Browse files
Files changed (8) hide show
  1. .gitattributes +3 -0
  2. README.md +39 -0
  3. config.json +25 -0
  4. model.onnx +3 -0
  5. special_tokens_map.json +7 -0
  6. tokenizer.json +3 -0
  7. tokenizer_config.json +13 -0
  8. vocab.txt +3 -0
.gitattributes ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ *.onnx filter=lfs diff=lfs merge=lfs -text
2
+ vocab.txt filter=lfs diff=lfs merge=lfs -text
3
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - sentence-transformers
4
+ - feature-extraction
5
+ - sentence-similarity
6
+ language: en
7
+ license: apache-2.0
8
+ datasets:
9
+ - s2orc
10
+ - flax-sentence-embeddings/stackexchange_xml
11
+ - ms_marco
12
+ - gooaq
13
+ - yahoo_answers_topics
14
+ - code_search_net
15
+ - search_qa
16
+ - eli5
17
+ - snli
18
+ - multi_nli
19
+ - wikihow
20
+ - natural_questions
21
+ - trivia_qa
22
+ - embedding-data/sentence-compression
23
+ - embedding-data/flickr30k-captions
24
+ - embedding-data/altlex
25
+ - embedding-data/simple-wiki
26
+ - embedding-data/QQP
27
+ - embedding-data/SPECTER
28
+ - embedding-data/PAQ_pairs
29
+ - embedding-data/WikiAnswers
30
+
31
+ ---
32
+
33
+ # ONNX version of intfloat/e5-base-v2
34
+
35
+ This is a sentence-transformers model: It maps sentences & paragraphs to a N dimensional dense vector space and can be used for tasks like clustering or semantic search.
36
+
37
+ ## License
38
+
39
+ Apache 2.0
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "intfloat/e5-base-v2",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "transformers_version": "4.31.0",
22
+ "type_vocab_size": 2,
23
+ "use_cache": true,
24
+ "vocab_size": 30522
25
+ }
model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c65bec2e3ae59c9f3ab86d4a9762c1a73677b5d7edbb41263cddb10b75a5dd5
3
+ size 435811539
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d241a60d5e8f04cc1b2b3e9ef7a4921b27bf526d9f6050ab90f9267a1f9e5c66
3
+ size 711396
tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": true,
5
+ "mask_token": "[MASK]",
6
+ "model_max_length": 512,
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "strip_accents": null,
10
+ "tokenize_chinese_chars": true,
11
+ "tokenizer_class": "BertTokenizer",
12
+ "unk_token": "[UNK]"
13
+ }
vocab.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3
3
+ size 231508