Spaces:
Runtime error
Runtime error
vincentmin
commited on
Commit
•
437467e
1
Parent(s):
d100767
Update app.py
Browse files
app.py
CHANGED
@@ -14,6 +14,8 @@ INTRO = """**Chat with Yoda, Albert Einstein, Elon Musk or Kanye West!**
|
|
14 |
|
15 |
👀 **Learn more about Starchat LLM:** [starchat-alpha](https://huggingface.co/blog/starchat-alpha)
|
16 |
|
|
|
|
|
17 |
➡️️ **Intended Use**: this demo is intended to be a fun showcase of what one can do with HuggingFace Inference API and recent chat models.
|
18 |
|
19 |
⚠️ **Limitations**: the model can and will produce factually incorrect information, hallucinating facts and actions. As it has not undergone any advanced tuning/alignment, it can produce problematic outputs, especially if prompted to do so. Finally, this demo is limited to a session length of about 1,000 words.
|
@@ -32,7 +34,7 @@ INSTRUCTIONS_MAPPING = {
|
|
32 |
"Kanye West": "The following is a conversation between rapper Kanye West, and a human user, called User. In the following interactions, User and Kanye West will converse in natural language, and Kanye West will answer User's questions. Kanye West is self-centered, arrogant, a self-proclaimed genius and a great musician. Kanye West interrupted an award ceremony for Taylor Swift and ran for president of the united states. The conversation begins.\n",
|
33 |
}
|
34 |
RETRY_COMMAND = "/retry"
|
35 |
-
|
36 |
|
37 |
def run_model(prompt, model, temperature, top_p):
|
38 |
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
@@ -131,10 +133,10 @@ def chat():
|
|
131 |
|
132 |
gr.Examples(
|
133 |
[
|
|
|
134 |
["Hi Yoda! How do I learn the force?"],
|
135 |
["Hi Elon! Give me an idea for a new startup."],
|
136 |
["Hi Kanye! What will be the theme of your next album?"],
|
137 |
-
["Hi Albert! Why did the apple fall from the tree?"],
|
138 |
],
|
139 |
inputs=inputs,
|
140 |
label="Click on any example and press Enter in the input textbox!",
|
@@ -157,8 +159,11 @@ def chat():
|
|
157 |
temperature=temperature,
|
158 |
top_p=top_p,
|
159 |
)
|
160 |
-
|
|
|
161 |
chat_history = chat_history + [[message, model_output]]
|
|
|
|
|
162 |
yield chat_history
|
163 |
return
|
164 |
|
|
|
14 |
|
15 |
👀 **Learn more about Starchat LLM:** [starchat-alpha](https://huggingface.co/blog/starchat-alpha)
|
16 |
|
17 |
+
👀 **Banner images were created with [stable diffusion web](https://stablediffusionweb.com/).**
|
18 |
+
|
19 |
➡️️ **Intended Use**: this demo is intended to be a fun showcase of what one can do with HuggingFace Inference API and recent chat models.
|
20 |
|
21 |
⚠️ **Limitations**: the model can and will produce factually incorrect information, hallucinating facts and actions. As it has not undergone any advanced tuning/alignment, it can produce problematic outputs, especially if prompted to do so. Finally, this demo is limited to a session length of about 1,000 words.
|
|
|
34 |
"Kanye West": "The following is a conversation between rapper Kanye West, and a human user, called User. In the following interactions, User and Kanye West will converse in natural language, and Kanye West will answer User's questions. Kanye West is self-centered, arrogant, a self-proclaimed genius and a great musician. Kanye West interrupted an award ceremony for Taylor Swift and ran for president of the united states. The conversation begins.\n",
|
35 |
}
|
36 |
RETRY_COMMAND = "/retry"
|
37 |
+
STOP_SEQ = [f"\n{USER_NAME}", "<|end|>"]
|
38 |
|
39 |
def run_model(prompt, model, temperature, top_p):
|
40 |
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
|
|
133 |
|
134 |
gr.Examples(
|
135 |
[
|
136 |
+
["Hi Albert! Why did the apple fall from the tree?"],
|
137 |
["Hi Yoda! How do I learn the force?"],
|
138 |
["Hi Elon! Give me an idea for a new startup."],
|
139 |
["Hi Kanye! What will be the theme of your next album?"],
|
|
|
140 |
],
|
141 |
inputs=inputs,
|
142 |
label="Click on any example and press Enter in the input textbox!",
|
|
|
159 |
temperature=temperature,
|
160 |
top_p=top_p,
|
161 |
)
|
162 |
+
for stop in STOP_SEQ:
|
163 |
+
model_output = model_output[len(prompt):].split(stop)[0]
|
164 |
chat_history = chat_history + [[message, model_output]]
|
165 |
+
print(f"User: {message}")
|
166 |
+
print(f"{bot_name}: {model_output}")
|
167 |
yield chat_history
|
168 |
return
|
169 |
|