Update README.md
Browse files
README.md
CHANGED
@@ -17,7 +17,7 @@ These models don't train on the MIRACL training data.
|
|
17 |
| splade-japanese-efficient| 0.408 | 0.954 | 0.419 | 0.718 |
|
18 |
| splade-japanese-v2 | 0.580 | 0.967 | 0.629 | 0.844 |
|
19 |
| splade-japanese-v2-doc | 0.478 | 0.930 | 0.514 | 0.759 |
|
20 |
-
| splade-japanese-v3 | 0.604 | 0.979 | 0.647 | 0.877 |
|
21 |
|
22 |
|
23 |
*'splade-japanese-v2-doc' model does not require query encoder during inference.
|
@@ -37,8 +37,8 @@ from transformers import AutoModelForMaskedLM,AutoTokenizer
|
|
37 |
import torch
|
38 |
import numpy as np
|
39 |
|
40 |
-
model = AutoModelForMaskedLM.from_pretrained("aken12/splade-japanese-
|
41 |
-
tokenizer = AutoTokenizer.from_pretrained("aken12/splade-japanese-
|
42 |
vocab_dict = {v: k for k, v in tokenizer.get_vocab().items()}
|
43 |
|
44 |
def encode_query(query):
|
|
|
17 |
| splade-japanese-efficient| 0.408 | 0.954 | 0.419 | 0.718 |
|
18 |
| splade-japanese-v2 | 0.580 | 0.967 | 0.629 | 0.844 |
|
19 |
| splade-japanese-v2-doc | 0.478 | 0.930 | 0.514 | 0.759 |
|
20 |
+
| splade-japanese-v3 | **0.604** | **0.979** | **0.647** | **0.877** |
|
21 |
|
22 |
|
23 |
*'splade-japanese-v2-doc' model does not require query encoder during inference.
|
|
|
37 |
import torch
|
38 |
import numpy as np
|
39 |
|
40 |
+
model = AutoModelForMaskedLM.from_pretrained("aken12/splade-japanese-v3")
|
41 |
+
tokenizer = AutoTokenizer.from_pretrained("aken12/splade-japanese-v3")
|
42 |
vocab_dict = {v: k for k, v in tokenizer.get_vocab().items()}
|
43 |
|
44 |
def encode_query(query):
|