Update README.md
Browse files
README.md
CHANGED
@@ -20,8 +20,8 @@ tokenizer = PegasusTokenizer.from_pretrained(model_name)
|
|
20 |
model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
|
21 |
|
22 |
def get_response(input_text,num_return_sequences,num_beams):
|
23 |
-
batch = tokenizer([input_text],truncation=True,padding='longest',max_length=
|
24 |
-
translated = model.generate(**batch,max_length=
|
25 |
tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
|
26 |
return tgt_text
|
27 |
```
|
|
|
20 |
model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
|
21 |
|
22 |
def get_response(input_text,num_return_sequences,num_beams):
|
23 |
+
batch = tokenizer([input_text],truncation=True,padding='longest',max_length=2500, return_tensors="pt").to(torch_device)
|
24 |
+
translated = model.generate(**batch,max_length=2500,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
|
25 |
tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
|
26 |
return tgt_text
|
27 |
```
|