Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -29,7 +29,7 @@ def generate_story(prompt, style):
|
|
29 |
{"role": "user", "content": prompt}
|
30 |
]
|
31 |
|
32 |
-
completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=
|
33 |
story = ""
|
34 |
for chunk in completion:
|
35 |
if chunk.choices:
|
@@ -53,7 +53,7 @@ def edit_story(original_story="", edited_prompt=""):
|
|
53 |
{"role": "user", "content": edited_prompt},
|
54 |
{"role": "assistant", "content": original_story}
|
55 |
]
|
56 |
-
completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=
|
57 |
edited_story = ""
|
58 |
for chunk in completion:
|
59 |
if chunk.choices:
|
@@ -81,7 +81,7 @@ def next_story_func(original_story="", next_prompt="", continuation_type="Про
|
|
81 |
{"role": "user", "content": continuation_prompt},
|
82 |
{"role": "assistant", "content": original_story}
|
83 |
]
|
84 |
-
completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=
|
85 |
next_story = ""
|
86 |
for chunk in completion:
|
87 |
if chunk.choices:
|
|
|
29 |
{"role": "user", "content": prompt}
|
30 |
]
|
31 |
|
32 |
+
completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=1200, stream=True)
|
33 |
story = ""
|
34 |
for chunk in completion:
|
35 |
if chunk.choices:
|
|
|
53 |
{"role": "user", "content": edited_prompt},
|
54 |
{"role": "assistant", "content": original_story}
|
55 |
]
|
56 |
+
completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=155000, stream=True)
|
57 |
edited_story = ""
|
58 |
for chunk in completion:
|
59 |
if chunk.choices:
|
|
|
81 |
{"role": "user", "content": continuation_prompt},
|
82 |
{"role": "assistant", "content": original_story}
|
83 |
]
|
84 |
+
completion = client.chat.completions.create(model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, temperature=0.7, max_tokens=1200, stream=True)
|
85 |
next_story = ""
|
86 |
for chunk in completion:
|
87 |
if chunk.choices:
|