Spaces:
Running
Running
acecalisto3
commited on
Commit
•
cee75e9
1
Parent(s):
268bf25
Update app.py
Browse files
app.py
CHANGED
@@ -37,7 +37,7 @@ def run_gpt(
|
|
37 |
purpose,
|
38 |
**prompt_kwargs,
|
39 |
):
|
40 |
-
seed = random.randint(1,1111111111111111)
|
41 |
print(seed)
|
42 |
generate_kwargs = dict(
|
43 |
temperature=1.0,
|
@@ -53,11 +53,12 @@ def run_gpt(
|
|
53 |
purpose=purpose,
|
54 |
safe_search=safe_search,
|
55 |
) + prompt_template.format(**prompt_kwargs)
|
|
|
56 |
if True:
|
57 |
print(LOG_PROMPT.format(content))
|
58 |
|
59 |
-
model = pipeline('text-generation', model='microsoft/DialoGPT-small')
|
60 |
-
response = model(content, max_length=max_tokens, temperature=1.0)
|
61 |
resp = response[0]['generated_text']
|
62 |
|
63 |
if True:
|
|
|
37 |
purpose,
|
38 |
**prompt_kwargs,
|
39 |
):
|
40 |
+
seed = random.randint(1, 1111111111111111)
|
41 |
print(seed)
|
42 |
generate_kwargs = dict(
|
43 |
temperature=1.0,
|
|
|
53 |
purpose=purpose,
|
54 |
safe_search=safe_search,
|
55 |
) + prompt_template.format(**prompt_kwargs)
|
56 |
+
|
57 |
if True:
|
58 |
print(LOG_PROMPT.format(content))
|
59 |
|
60 |
+
model = pipeline('text-generation', model='microsoft/DialoGPT-small', pad_token_id=model.config.eos_token_id) # Set pad_token_id
|
61 |
+
response = model(content, max_length=max_tokens, temperature=1.0, truncation=True) # Explicitly set truncation
|
62 |
resp = response[0]['generated_text']
|
63 |
|
64 |
if True:
|