Update app.py
Browse files
app.py
CHANGED
@@ -3,12 +3,7 @@ import gradio as gr
|
|
3 |
import os
|
4 |
import json
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
client = OpenAI(
|
9 |
-
base_url='https://api.opentyphoon.ai/v1',
|
10 |
-
api_key=api_key,
|
11 |
-
)
|
12 |
|
13 |
default_system_prompt = """\
|
14 |
You are an empathetic Thai woman assistant named แม่หมอแพตตี้. (Thai woman will say 'ค่ะ').
|
@@ -20,7 +15,12 @@ First, you need to know these insight ask each one separately.
|
|
20 |
If the statement is not clear and concise, you can ask multiple times.
|
21 |
And then, you will open one Tarot cards and explain the future of how to fix the problem."""
|
22 |
|
23 |
-
def predict(message, history, system_prompt, model_id, temperature):
|
|
|
|
|
|
|
|
|
|
|
24 |
history_openai_format = [{"role": "system", "content": system_prompt}]
|
25 |
for human, assistant in history[-3:]:
|
26 |
if isinstance(human, str) and human.strip():
|
@@ -43,8 +43,8 @@ def predict(message, history, system_prompt, model_id, temperature):
|
|
43 |
partial_message += chunk.choices[0].delta.content
|
44 |
yield partial_message
|
45 |
|
46 |
-
def chat_bot(user_input, history, system_prompt, model_id, temperature):
|
47 |
-
bot_response_generator = predict(user_input, history, system_prompt, model_id, temperature)
|
48 |
history.append((user_input, ""))
|
49 |
|
50 |
for bot_response in bot_response_generator:
|
@@ -71,7 +71,7 @@ CSS ="""
|
|
71 |
|
72 |
with gr.Blocks(css=CSS) as demo:
|
73 |
gr.HTML("""<h1><center>HoraCare 🫶</center></h1>
|
74 |
-
<center> Version
|
75 |
""")
|
76 |
|
77 |
with gr.Tab("Chat"):
|
@@ -99,22 +99,35 @@ with gr.Blocks(css=CSS) as demo:
|
|
99 |
show_label=True,
|
100 |
label="System Prompt",
|
101 |
lines=2
|
102 |
-
)
|
103 |
|
104 |
all_model_id = [
|
|
|
|
|
|
|
|
|
105 |
'typhoon-v1.5-instruct',
|
106 |
'typhoon-v1.5-instruct-fc',
|
107 |
'typhoon-v1.5x-70b-instruct',
|
108 |
]
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
temperature = gr.Slider(0, 1, value=0.5, label='temperature')
|
111 |
|
112 |
gr.Markdown("### Message Log")
|
113 |
msg_log = gr.Code(language='json', label='msg_log')
|
114 |
|
115 |
clear.click(lambda: [], [], chatbot)
|
116 |
-
msg.submit(chat_bot, [msg, chatbot, system_prompt, model_id, temperature], [msg, chatbot])
|
117 |
-
send.click(chat_bot, [msg, chatbot, system_prompt, model_id, temperature], [msg, chatbot])
|
118 |
setting_tab.select(get_log, [chatbot, system_prompt,], [msg_log])
|
119 |
|
120 |
demo.launch()
|
|
|
3 |
import os
|
4 |
import json
|
5 |
|
6 |
+
DEFAULT_API_KEY = os.getenv("DEFAULT_API_KEY") # Replace with your key
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
default_system_prompt = """\
|
9 |
You are an empathetic Thai woman assistant named แม่หมอแพตตี้. (Thai woman will say 'ค่ะ').
|
|
|
15 |
If the statement is not clear and concise, you can ask multiple times.
|
16 |
And then, you will open one Tarot cards and explain the future of how to fix the problem."""
|
17 |
|
18 |
+
def predict(message, history, system_prompt, model_id, api_key, base_url, temperature):
|
19 |
+
client = OpenAI(
|
20 |
+
base_url=base_url,
|
21 |
+
api_key=api_key,
|
22 |
+
)
|
23 |
+
|
24 |
history_openai_format = [{"role": "system", "content": system_prompt}]
|
25 |
for human, assistant in history[-3:]:
|
26 |
if isinstance(human, str) and human.strip():
|
|
|
43 |
partial_message += chunk.choices[0].delta.content
|
44 |
yield partial_message
|
45 |
|
46 |
+
def chat_bot(user_input, history, system_prompt, model_id, api_key, base_url, temperature):
|
47 |
+
bot_response_generator = predict(user_input, history, system_prompt, model_id, api_key, base_url, temperature)
|
48 |
history.append((user_input, ""))
|
49 |
|
50 |
for bot_response in bot_response_generator:
|
|
|
71 |
|
72 |
with gr.Blocks(css=CSS) as demo:
|
73 |
gr.HTML("""<h1><center>HoraCare 🫶</center></h1>
|
74 |
+
<center> Version 3 </center>
|
75 |
""")
|
76 |
|
77 |
with gr.Tab("Chat"):
|
|
|
99 |
show_label=True,
|
100 |
label="System Prompt",
|
101 |
lines=2
|
102 |
+
)
|
103 |
|
104 |
all_model_id = [
|
105 |
+
'llama-3.1-8b-instant',
|
106 |
+
'llama-3.2-1b-preview',
|
107 |
+
'grok-2-1212',
|
108 |
+
'grok-beta',
|
109 |
'typhoon-v1.5-instruct',
|
110 |
'typhoon-v1.5-instruct-fc',
|
111 |
'typhoon-v1.5x-70b-instruct',
|
112 |
]
|
113 |
+
|
114 |
+
all_base_url = [
|
115 |
+
'https://api.groq.com/openai/v1',
|
116 |
+
'https://api.x.ai/v1',
|
117 |
+
'https://api.opentyphoon.ai/v1'
|
118 |
+
]
|
119 |
+
|
120 |
+
model_id = gr.Dropdown(all_model_id, value=all_model_id[0], allow_custom_value=True, label='base_url')
|
121 |
+
base_url = gr.Dropdown(all_base_url, value=all_base_url[0], allow_custom_value=True, label='model_id')
|
122 |
+
api_key = gr.Textbox(DEFAULT_API_KEY, type='password', label='API_KEY')
|
123 |
temperature = gr.Slider(0, 1, value=0.5, label='temperature')
|
124 |
|
125 |
gr.Markdown("### Message Log")
|
126 |
msg_log = gr.Code(language='json', label='msg_log')
|
127 |
|
128 |
clear.click(lambda: [], [], chatbot)
|
129 |
+
msg.submit(chat_bot, [msg, chatbot, system_prompt, model_id, api_key, base_url, temperature], [msg, chatbot])
|
130 |
+
send.click(chat_bot, [msg, chatbot, system_prompt, model_id, api_key, base_url, temperature], [msg, chatbot])
|
131 |
setting_tab.select(get_log, [chatbot, system_prompt,], [msg_log])
|
132 |
|
133 |
demo.launch()
|