UNIST-Eunchan
commited on
Commit
•
f7c77c9
1
Parent(s):
8622207
Update app.py
Browse files
app.py
CHANGED
@@ -108,7 +108,7 @@ chunked_segments = chunking(_book)
|
|
108 |
def generate_output(test_samples):
|
109 |
inputs = tokenizer(
|
110 |
test_samples,
|
111 |
-
padding=max_length,
|
112 |
truncation=True,
|
113 |
max_length=1024,
|
114 |
return_tensors="pt",
|
@@ -119,10 +119,12 @@ def generate_output(test_samples):
|
|
119 |
attention_mask = inputs.attention_mask
|
120 |
|
121 |
outputs = model.generate(input_ids,
|
122 |
-
max_length =
|
123 |
min_length=32,
|
124 |
-
top_p =
|
125 |
-
|
|
|
|
|
126 |
no_repeat_ngram_size=2,
|
127 |
attention_mask=attention_mask)
|
128 |
output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
|
|
108 |
def generate_output(test_samples):
|
109 |
inputs = tokenizer(
|
110 |
test_samples,
|
111 |
+
padding='max_length',
|
112 |
truncation=True,
|
113 |
max_length=1024,
|
114 |
return_tensors="pt",
|
|
|
119 |
attention_mask = inputs.attention_mask
|
120 |
|
121 |
outputs = model.generate(input_ids,
|
122 |
+
max_length = max_length,
|
123 |
min_length=32,
|
124 |
+
top_p = top_p,
|
125 |
+
top_k = top_k,
|
126 |
+
temperature= temperature,
|
127 |
+
num_beams=2,
|
128 |
no_repeat_ngram_size=2,
|
129 |
attention_mask=attention_mask)
|
130 |
output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|