Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -52,7 +52,7 @@ def process_audio(audio_data):
|
|
52 |
# 定义函数以禁用按钮并显示加载指示器
|
53 |
def disable_components():
|
54 |
# 更新 recognized_text 的内容,提示用户正在处理
|
55 |
-
recognized_text_update = gr.update(value='
|
56 |
# 禁用 process_button
|
57 |
process_button_update = gr.update(interactive=False)
|
58 |
# 显示加载动画
|
@@ -67,12 +67,14 @@ def enable_components(recognized_text):
|
|
67 |
return recognized_text, process_button_update, loading_animation_update
|
68 |
|
69 |
llama_responded = 0
|
|
|
70 |
|
71 |
def respond(
|
72 |
message,
|
73 |
history: list[tuple[str, str]]
|
74 |
):
|
75 |
global llama_responded
|
|
|
76 |
system_message = "You are a helpful chatbot that answers questions. Give any answer within 50 words."
|
77 |
messages = [{"role": "system", "content": system_message}]
|
78 |
|
@@ -95,42 +97,52 @@ def respond(
|
|
95 |
response += token
|
96 |
|
97 |
llama_responded = 1
|
|
|
98 |
return response
|
99 |
|
100 |
def update_response_display():
|
101 |
while not llama_responded:
|
102 |
time.sleep(1)
|
103 |
|
104 |
-
def
|
105 |
global llama_responded
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
|
112 |
def create_interface():
|
113 |
with gr.Blocks() as demo:
|
114 |
-
|
115 |
# Chat interface using the custom chatbot instance
|
116 |
chatbot = gr.ChatInterface(
|
117 |
title="Exodia AI Assistant",
|
|
|
118 |
fn=respond,
|
119 |
submit_btn="Start Chatting"
|
120 |
)
|
121 |
-
|
122 |
user_start = chatbot.textbox.submit(
|
123 |
fn=update_response_display,
|
124 |
inputs=[],
|
125 |
outputs=[],
|
126 |
)
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
outputs=chatbot.chatbot
|
132 |
)
|
133 |
|
|
|
134 |
# Audio input section
|
135 |
with gr.Row():
|
136 |
audio_input = gr.Audio(
|
@@ -148,6 +160,10 @@ def create_interface():
|
|
148 |
value='<div style="text-align: center;"><span style="font-size: 18px;">ASR Model is running...</span></div>',
|
149 |
visible=False
|
150 |
)
|
|
|
|
|
|
|
|
|
151 |
|
152 |
# Associate audio processing function and update component states on click
|
153 |
process_button.click(
|
@@ -163,12 +179,23 @@ def create_interface():
|
|
163 |
inputs=[recognized_text],
|
164 |
outputs=[recognized_text, process_button, loading_animation]
|
165 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
166 |
|
167 |
return demo
|
168 |
|
169 |
|
170 |
|
171 |
-
|
172 |
if __name__ == "__main__":
|
173 |
demo = create_interface()
|
174 |
demo.launch()
|
|
|
52 |
# 定义函数以禁用按钮并显示加载指示器
|
53 |
def disable_components():
|
54 |
# 更新 recognized_text 的内容,提示用户正在处理
|
55 |
+
recognized_text_update = gr.update(value='Voice Recognization Running...')
|
56 |
# 禁用 process_button
|
57 |
process_button_update = gr.update(interactive=False)
|
58 |
# 显示加载动画
|
|
|
67 |
return recognized_text, process_button_update, loading_animation_update
|
68 |
|
69 |
llama_responded = 0
|
70 |
+
responded_answer = ""
|
71 |
|
72 |
def respond(
|
73 |
message,
|
74 |
history: list[tuple[str, str]]
|
75 |
):
|
76 |
global llama_responded
|
77 |
+
global responded_answer
|
78 |
system_message = "You are a helpful chatbot that answers questions. Give any answer within 50 words."
|
79 |
messages = [{"role": "system", "content": system_message}]
|
80 |
|
|
|
97 |
response += token
|
98 |
|
99 |
llama_responded = 1
|
100 |
+
responded_answer = response
|
101 |
return response
|
102 |
|
103 |
def update_response_display():
|
104 |
while not llama_responded:
|
105 |
time.sleep(1)
|
106 |
|
107 |
+
def tts_part():
|
108 |
global llama_responded
|
109 |
+
global responded_answer
|
110 |
+
result = ""
|
111 |
+
if responded_answer != "":
|
112 |
+
text = responded_answer
|
113 |
+
|
114 |
+
client = Client("tonyassi/voice-clone")
|
115 |
+
result = client.predict(
|
116 |
+
text,
|
117 |
+
audio=file('siri.wav'),
|
118 |
+
api_name="/predict"
|
119 |
+
)
|
120 |
+
llama_responded = 0
|
121 |
+
responded_answer = ""
|
122 |
+
return result
|
123 |
|
124 |
def create_interface():
|
125 |
with gr.Blocks() as demo:
|
126 |
+
|
127 |
# Chat interface using the custom chatbot instance
|
128 |
chatbot = gr.ChatInterface(
|
129 |
title="Exodia AI Assistant",
|
130 |
+
fill_height=True,
|
131 |
fn=respond,
|
132 |
submit_btn="Start Chatting"
|
133 |
)
|
|
|
134 |
user_start = chatbot.textbox.submit(
|
135 |
fn=update_response_display,
|
136 |
inputs=[],
|
137 |
outputs=[],
|
138 |
)
|
139 |
+
user_click = chatbot.submit_btn.click(
|
140 |
+
fn=update_response_display,
|
141 |
+
inputs=[],
|
142 |
+
outputs=[],
|
|
|
143 |
)
|
144 |
|
145 |
+
|
146 |
# Audio input section
|
147 |
with gr.Row():
|
148 |
audio_input = gr.Audio(
|
|
|
160 |
value='<div style="text-align: center;"><span style="font-size: 18px;">ASR Model is running...</span></div>',
|
161 |
visible=False
|
162 |
)
|
163 |
+
|
164 |
+
text_speaker = gr.Audio(
|
165 |
+
label="Generated Audio"
|
166 |
+
)
|
167 |
|
168 |
# Associate audio processing function and update component states on click
|
169 |
process_button.click(
|
|
|
179 |
inputs=[recognized_text],
|
180 |
outputs=[recognized_text, process_button, loading_animation]
|
181 |
)
|
182 |
+
|
183 |
+
user_start.then(
|
184 |
+
fn=tts_part,
|
185 |
+
inputs=[],
|
186 |
+
outputs=text_speaker
|
187 |
+
)
|
188 |
+
|
189 |
+
user_click.then(
|
190 |
+
fn=tts_part,
|
191 |
+
inputs=[],
|
192 |
+
outputs=text_speaker
|
193 |
+
)
|
194 |
|
195 |
return demo
|
196 |
|
197 |
|
198 |
|
|
|
199 |
if __name__ == "__main__":
|
200 |
demo = create_interface()
|
201 |
demo.launch()
|