update
Browse files
README.md
CHANGED
@@ -19,6 +19,7 @@ It can be used for:
|
|
19 |
## Usage
|
20 |
```python
|
21 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
|
|
22 |
|
23 |
model_name = 'doc2query/msmarco-german-mt5-base-v1'
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
@@ -37,7 +38,7 @@ def create_queries(para):
|
|
37 |
do_sample=True,
|
38 |
top_p=0.95,
|
39 |
top_k=10,
|
40 |
-
num_return_sequences=
|
41 |
)
|
42 |
|
43 |
# Here we use Beam-search. It generates better quality queries, but with less diversity
|
@@ -54,12 +55,12 @@ def create_queries(para):
|
|
54 |
print("Paragraph:")
|
55 |
print(para)
|
56 |
|
57 |
-
print("\
|
58 |
for i in range(len(beam_outputs)):
|
59 |
query = tokenizer.decode(beam_outputs[i], skip_special_tokens=True)
|
60 |
print(f'{i + 1}: {query}')
|
61 |
|
62 |
-
print("\
|
63 |
for i in range(len(sampling_outputs)):
|
64 |
query = tokenizer.decode(sampling_outputs[i], skip_special_tokens=True)
|
65 |
print(f'{i + 1}: {query}')
|
|
|
19 |
## Usage
|
20 |
```python
|
21 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
22 |
+
import torch
|
23 |
|
24 |
model_name = 'doc2query/msmarco-german-mt5-base-v1'
|
25 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
38 |
do_sample=True,
|
39 |
top_p=0.95,
|
40 |
top_k=10,
|
41 |
+
num_return_sequences=5
|
42 |
)
|
43 |
|
44 |
# Here we use Beam-search. It generates better quality queries, but with less diversity
|
|
|
55 |
print("Paragraph:")
|
56 |
print(para)
|
57 |
|
58 |
+
print("\nBeam Outputs:")
|
59 |
for i in range(len(beam_outputs)):
|
60 |
query = tokenizer.decode(beam_outputs[i], skip_special_tokens=True)
|
61 |
print(f'{i + 1}: {query}')
|
62 |
|
63 |
+
print("\nSampling Outputs:")
|
64 |
for i in range(len(sampling_outputs)):
|
65 |
query = tokenizer.decode(sampling_outputs[i], skip_special_tokens=True)
|
66 |
print(f'{i + 1}: {query}')
|