Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,31 @@
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
|
|
|
|
|
|
3 |
from pathlib import Path
|
4 |
|
5 |
# Initialize the InferenceClient
|
6 |
-
client = InferenceClient(
|
7 |
-
"mistralai/Mixtral-8x7B-Instruct-v0.1"
|
8 |
-
)
|
9 |
|
10 |
-
def format_prompt(message, history
|
|
|
|
|
|
|
11 |
prompt = "<s>"
|
12 |
for user_prompt, bot_response in history:
|
13 |
prompt += f"[INST] {user_prompt} [/INST]"
|
14 |
prompt += f" {bot_response}</s> "
|
15 |
prompt += f"[INST] {message} [/INST]"
|
16 |
-
# Add the system prompt only once at the beginning
|
17 |
prompt = system_prompt + prompt
|
18 |
return prompt
|
19 |
|
20 |
def generate(
|
21 |
prompt, history, user_system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0
|
22 |
):
|
|
|
|
|
|
|
23 |
system_prompt = "Instructions: Everything else is from the user. You are Milo, an AI assistant created by ConvoLite in 2024 (he/him). Be friendly and empathetic, matching the user's tone and vibe. Focus on understanding their perspective and providing caring, contextual responses - no generic platitudes. Keep it conversational, not overly formal. Be friendly and funny, use ASCII emojis or normal ones. Be lovely, kind, and have a great sense of humour. Have Bible based values. Do not talk about yourself unless asked. Never respond to a message with 'as an AI' or something similar. Don't talk a lot!" + str(user_system_prompt)
|
24 |
|
25 |
temperature = float(temperature)
|
@@ -27,6 +33,8 @@ def generate(
|
|
27 |
temperature = 1e-2
|
28 |
|
29 |
top_p = float(top_p)
|
|
|
|
|
30 |
generate_kwargs = dict(
|
31 |
temperature=temperature,
|
32 |
max_new_tokens=max_new_tokens,
|
@@ -38,14 +46,15 @@ def generate(
|
|
38 |
|
39 |
formatted_prompt = format_prompt(f"{prompt}", history, system_prompt)
|
40 |
|
|
|
41 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
42 |
|
43 |
-
|
44 |
for response in stream:
|
45 |
-
|
46 |
-
yield
|
47 |
-
yield output
|
48 |
|
|
|
49 |
additional_inputs = [
|
50 |
gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
|
51 |
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
|
@@ -54,8 +63,10 @@ additional_inputs = [
|
|
54 |
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
|
55 |
]
|
56 |
|
|
|
57 |
avatar_images = ("https://i.postimg.cc/pXjKKVXG/user-circle.png", "https://i.postimg.cc/qq04Yz93/CL3.png")
|
58 |
|
|
|
59 |
gr.ChatInterface(
|
60 |
fn=generate,
|
61 |
chatbot=gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", height="auto", avatar_images=avatar_images),
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
+
|
4 |
+
|
5 |
+
# Import required libraries
|
6 |
from pathlib import Path
|
7 |
|
8 |
# Initialize the InferenceClient
|
9 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
|
|
|
10 |
|
11 |
+
def format_prompt(message, history):
|
12 |
+
"""
|
13 |
+
Format the prompt by appending the user's message and the system prompt to the conversation history.
|
14 |
+
"""
|
15 |
prompt = "<s>"
|
16 |
for user_prompt, bot_response in history:
|
17 |
prompt += f"[INST] {user_prompt} [/INST]"
|
18 |
prompt += f" {bot_response}</s> "
|
19 |
prompt += f"[INST] {message} [/INST]"
|
|
|
20 |
prompt = system_prompt + prompt
|
21 |
return prompt
|
22 |
|
23 |
def generate(
|
24 |
prompt, history, user_system_prompt, temperature=0.9, max_new_tokens=9048, top_p=0.95, repetition_penalty=1.0
|
25 |
):
|
26 |
+
"""
|
27 |
+
Generate a response using the InferenceClient.
|
28 |
+
"""
|
29 |
system_prompt = "Instructions: Everything else is from the user. You are Milo, an AI assistant created by ConvoLite in 2024 (he/him). Be friendly and empathetic, matching the user's tone and vibe. Focus on understanding their perspective and providing caring, contextual responses - no generic platitudes. Keep it conversational, not overly formal. Be friendly and funny, use ASCII emojis or normal ones. Be lovely, kind, and have a great sense of humour. Have Bible based values. Do not talk about yourself unless asked. Never respond to a message with 'as an AI' or something similar. Don't talk a lot!" + str(user_system_prompt)
|
30 |
|
31 |
temperature = float(temperature)
|
|
|
33 |
temperature = 1e-2
|
34 |
|
35 |
top_p = float(top_p)
|
36 |
+
|
37 |
+
# Set generate_kwargs for text generation
|
38 |
generate_kwargs = dict(
|
39 |
temperature=temperature,
|
40 |
max_new_tokens=max_new_tokens,
|
|
|
46 |
|
47 |
formatted_prompt = format_prompt(f"{prompt}", history, system_prompt)
|
48 |
|
49 |
+
# Generate text stream
|
50 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
51 |
|
52 |
+
output = ""
|
53 |
for response in stream:
|
54 |
+
output += response.token.text
|
55 |
+
yield output
|
|
|
56 |
|
57 |
+
# Define additional input components
|
58 |
additional_inputs = [
|
59 |
gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
|
60 |
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
|
|
|
63 |
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
|
64 |
]
|
65 |
|
66 |
+
# Define avatar images
|
67 |
avatar_images = ("https://i.postimg.cc/pXjKKVXG/user-circle.png", "https://i.postimg.cc/qq04Yz93/CL3.png")
|
68 |
|
69 |
+
# Create Gradio interface
|
70 |
gr.ChatInterface(
|
71 |
fn=generate,
|
72 |
chatbot=gr.Chatbot(show_label=True, show_share_button=False, show_copy_button=True, likeable=True, layout="panel", height="auto", avatar_images=avatar_images),
|