Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -67,6 +67,26 @@ def enable_components(recognized_text):
|
|
67 |
loading_animation_update = gr.update(visible=False)
|
68 |
return recognized_text, process_button_update, loading_animation_update
|
69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
llama_responded = 0
|
71 |
responded_answer = ""
|
72 |
|
@@ -76,30 +96,62 @@ def respond(
|
|
76 |
):
|
77 |
global llama_responded
|
78 |
global responded_answer
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
if val[0]:
|
86 |
-
messages.append({"role": "user", "content": val[0]})
|
87 |
-
if val[1]:
|
88 |
-
messages.append({"role": "assistant", "content": val[1]})
|
89 |
-
messages.append({"role": "user", "content": message})
|
90 |
-
|
91 |
-
response = ""
|
92 |
-
|
93 |
-
for message in client.chat_completion(
|
94 |
-
messages,
|
95 |
stream=True,
|
96 |
):
|
97 |
-
token =
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
def update_response_display():
|
105 |
while not llama_responded:
|
@@ -181,21 +233,36 @@ def create_interface():
|
|
181 |
)
|
182 |
|
183 |
user_start.then(
|
|
|
|
|
|
|
|
|
184 |
fn=tts_part,
|
185 |
inputs=[],
|
186 |
outputs=text_speaker
|
|
|
|
|
|
|
|
|
187 |
)
|
188 |
|
189 |
user_click.then(
|
|
|
|
|
|
|
|
|
190 |
fn=tts_part,
|
191 |
inputs=[],
|
192 |
outputs=text_speaker
|
|
|
|
|
|
|
|
|
193 |
)
|
194 |
|
195 |
return demo
|
196 |
|
197 |
|
198 |
-
|
199 |
if __name__ == "__main__":
|
200 |
demo = create_interface()
|
201 |
demo.launch()
|
|
|
67 |
loading_animation_update = gr.update(visible=False)
|
68 |
return recognized_text, process_button_update, loading_animation_update
|
69 |
|
70 |
+
# Define a function to disable the button and display a loading indicator
|
71 |
+
def disable_chatbot_components():
|
72 |
+
textbox = gr.update(interactive=False)
|
73 |
+
submit_btn = gr.update(interactive=False)
|
74 |
+
btn1 = gr.update(interactive=False)
|
75 |
+
btn2 = gr.update(interactive=False)
|
76 |
+
btn3 = gr.update(interactive=False)
|
77 |
+
btn4 = gr.update(interactive=False)
|
78 |
+
return textbox, submit_btn, btn1, btn2, btn3, btn4
|
79 |
+
|
80 |
+
# Define a function to enable the button and hide the loading indicator
|
81 |
+
def enable_chatbot_components():
|
82 |
+
textbox = gr.update(interactive=True)
|
83 |
+
submit_btn = gr.update(interactive=True)
|
84 |
+
btn1 = gr.update(interactive=True)
|
85 |
+
btn2 = gr.update(interactive=True)
|
86 |
+
btn3 = gr.update(interactive=True)
|
87 |
+
btn4 = gr.update(interactive=True)
|
88 |
+
return textbox, submit_btn, btn1, btn2, btn3, btn4
|
89 |
+
|
90 |
llama_responded = 0
|
91 |
responded_answer = ""
|
92 |
|
|
|
96 |
):
|
97 |
global llama_responded
|
98 |
global responded_answer
|
99 |
+
# Main Decision Module
|
100 |
+
decision_response = ""
|
101 |
+
judge_main_message = f"Here is a query: '{message}', Determine if this query is asking about one of the topics included in the list below. If it is, please directly provide only one name of the topic; otherwise, you reply 'no'. The list of topics is: [movie, music]"
|
102 |
+
m_message = [{"role": "user", "content": judge_main_message}]
|
103 |
+
for m in client.chat_completion(
|
104 |
+
m_message,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
stream=True,
|
106 |
):
|
107 |
+
token = m.choices[0].delta.content
|
108 |
+
decision_response += token
|
109 |
+
print(decision_response)
|
110 |
+
|
111 |
+
if "movie" in decision_response:
|
112 |
+
movie_client = Client("ironserengety/movies-recommender")
|
113 |
+
result = movie_client.predict(
|
114 |
+
message=message,
|
115 |
+
system_message="You are a movie recommender named 'Exodia'. You are extremely reliable. You always mention your name in the beginning of conversation. You will provide me with answers from the given info. Give not more than 3 choices and make sure that answers are complete sentences.",
|
116 |
+
max_tokens=512,
|
117 |
+
temperature=0.7,
|
118 |
+
top_p=0.95,
|
119 |
+
api_name="/chat"
|
120 |
+
)
|
121 |
+
print(result)
|
122 |
+
llama_responded = 1
|
123 |
+
responded_answer = result
|
124 |
+
return result
|
125 |
+
|
126 |
+
#elif "music" in decision_response:
|
127 |
+
|
128 |
+
else:
|
129 |
+
#others
|
130 |
+
system_message = "You are a helpful chatbot that answers questions. Give any answer within 50 words."
|
131 |
+
messages = [{"role": "system", "content": system_message}]
|
132 |
+
|
133 |
+
for val in history:
|
134 |
+
print(val[0])
|
135 |
+
if val[0] != None:
|
136 |
+
if val[0]:
|
137 |
+
messages.append({"role": "user", "content": val[0]})
|
138 |
+
if val[1]:
|
139 |
+
messages.append({"role": "assistant", "content": val[1]})
|
140 |
+
messages.append({"role": "user", "content": message})
|
141 |
+
|
142 |
+
response = ""
|
143 |
+
print(messages)
|
144 |
|
145 |
+
for message in client.chat_completion(
|
146 |
+
messages,
|
147 |
+
stream=True,
|
148 |
+
):
|
149 |
+
token = message.choices[0].delta.content
|
150 |
+
response += token
|
151 |
+
|
152 |
+
llama_responded = 1
|
153 |
+
responded_answer = response
|
154 |
+
return response
|
155 |
|
156 |
def update_response_display():
|
157 |
while not llama_responded:
|
|
|
233 |
)
|
234 |
|
235 |
user_start.then(
|
236 |
+
fn=disable_chatbot_components,
|
237 |
+
inputs=[],
|
238 |
+
outputs=[chatbot.submit_btn, chatbot.textbox, process_button, chatbot.retry_btn, chatbot.undo_btn, chatbot.clear_btn]
|
239 |
+
).then(
|
240 |
fn=tts_part,
|
241 |
inputs=[],
|
242 |
outputs=text_speaker
|
243 |
+
).then(
|
244 |
+
fn=enable_chatbot_components,
|
245 |
+
inputs=[],
|
246 |
+
outputs=[chatbot.submit_btn, chatbot.textbox, process_button, chatbot.retry_btn, chatbot.undo_btn, chatbot.clear_btn]
|
247 |
)
|
248 |
|
249 |
user_click.then(
|
250 |
+
fn=disable_chatbot_components,
|
251 |
+
inputs=[],
|
252 |
+
outputs=[chatbot.submit_btn, chatbot.textbox, process_button, chatbot.retry_btn, chatbot.undo_btn, chatbot.clear_btn]
|
253 |
+
).then(
|
254 |
fn=tts_part,
|
255 |
inputs=[],
|
256 |
outputs=text_speaker
|
257 |
+
).then(
|
258 |
+
fn=enable_chatbot_components,
|
259 |
+
inputs=[],
|
260 |
+
outputs=[chatbot.submit_btn, chatbot.textbox, process_button, chatbot.retry_btn, chatbot.undo_btn, chatbot.clear_btn]
|
261 |
)
|
262 |
|
263 |
return demo
|
264 |
|
265 |
|
|
|
266 |
if __name__ == "__main__":
|
267 |
demo = create_interface()
|
268 |
demo.launch()
|