Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -111,26 +111,9 @@ def bot(history):
|
|
111 |
|
112 |
def create_interface():
|
113 |
with gr.Blocks() as demo:
|
|
|
114 |
# Title
|
115 |
gr.Markdown("# Exodia AI Assistant")
|
116 |
-
|
117 |
-
# Audio input section
|
118 |
-
with gr.Row():
|
119 |
-
audio_input = gr.Audio(
|
120 |
-
sources="microphone",
|
121 |
-
type="numpy", # Get audio data and sample rate
|
122 |
-
label="Say Something..."
|
123 |
-
)
|
124 |
-
recognized_text = gr.Textbox(label="Recognized Text",interactive=False)
|
125 |
-
|
126 |
-
# Process audio button
|
127 |
-
process_button = gr.Button("Process Audio")
|
128 |
-
|
129 |
-
# Loading animation
|
130 |
-
loading_animation = gr.HTML(
|
131 |
-
value='<div style="text-align: center;"><span style="font-size: 18px;">ASR Model is running...</span></div>',
|
132 |
-
visible=False
|
133 |
-
)
|
134 |
|
135 |
# Chat interface using the custom chatbot instance
|
136 |
chatbot = gr.ChatInterface(
|
@@ -149,6 +132,24 @@ def create_interface():
|
|
149 |
outputs=chatbot.chatbot
|
150 |
)
|
151 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
# Associate audio processing function and update component states on click
|
153 |
process_button.click(
|
154 |
fn=disable_components,
|
@@ -163,10 +164,6 @@ def create_interface():
|
|
163 |
inputs=[recognized_text],
|
164 |
outputs=[recognized_text, process_button, loading_animation]
|
165 |
)
|
166 |
-
|
167 |
-
# Layout includes Chatbot
|
168 |
-
with gr.Row():
|
169 |
-
chatbot_output = chatbot
|
170 |
|
171 |
return demo
|
172 |
|
@@ -176,8 +173,3 @@ def create_interface():
|
|
176 |
if __name__ == "__main__":
|
177 |
demo = create_interface()
|
178 |
demo.launch()
|
179 |
-
|
180 |
-
|
181 |
-
if __name__ == "__main__":
|
182 |
-
demo = create_interface()
|
183 |
-
demo.launch()
|
|
|
111 |
|
112 |
def create_interface():
|
113 |
with gr.Blocks() as demo:
|
114 |
+
|
115 |
# Title
|
116 |
gr.Markdown("# Exodia AI Assistant")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
# Chat interface using the custom chatbot instance
|
119 |
chatbot = gr.ChatInterface(
|
|
|
132 |
outputs=chatbot.chatbot
|
133 |
)
|
134 |
|
135 |
+
# Audio input section
|
136 |
+
with gr.Row():
|
137 |
+
audio_input = gr.Audio(
|
138 |
+
sources="microphone",
|
139 |
+
type="numpy", # Get audio data and sample rate
|
140 |
+
label="Say Something..."
|
141 |
+
)
|
142 |
+
recognized_text = gr.Textbox(label="Recognized Text",interactive=False)
|
143 |
+
|
144 |
+
# Process audio button
|
145 |
+
process_button = gr.Button("Process Audio")
|
146 |
+
|
147 |
+
# Loading animation
|
148 |
+
loading_animation = gr.HTML(
|
149 |
+
value='<div style="text-align: center;"><span style="font-size: 18px;">ASR Model is running...</span></div>',
|
150 |
+
visible=False
|
151 |
+
)
|
152 |
+
|
153 |
# Associate audio processing function and update component states on click
|
154 |
process_button.click(
|
155 |
fn=disable_components,
|
|
|
164 |
inputs=[recognized_text],
|
165 |
outputs=[recognized_text, process_button, loading_animation]
|
166 |
)
|
|
|
|
|
|
|
|
|
167 |
|
168 |
return demo
|
169 |
|
|
|
173 |
if __name__ == "__main__":
|
174 |
demo = create_interface()
|
175 |
demo.launch()
|
|
|
|
|
|
|
|
|
|