Spaces:
Sleeping
Sleeping
File size: 9,085 Bytes
d4a5e8c 97cc5f3 d4a5e8c d0a28d9 9d03774 867343a 68f0d8d 94023b9 d4a5e8c 27444e5 9d03774 27444e5 d4a5e8c 9d03774 68f0d8d 9338b19 282a1f2 9d03774 68f0d8d c949392 867343a 68f0d8d 9d03774 867343a 9d03774 867343a 9d03774 68f0d8d 9d03774 68f0d8d 9d03774 68f0d8d 9d03774 282a1f2 9d03774 282a1f2 9d03774 282a1f2 9d03774 282a1f2 9d03774 282a1f2 9d03774 4683e56 68f0d8d 5915a6a 68f0d8d 5915a6a 4683e56 68f0d8d 4683e56 68f0d8d 4683e56 68f0d8d 5915a6a 68f0d8d 5915a6a 9d03774 5b71666 5915a6a 68f0d8d ea1feff 5915a6a 68f0d8d 0546cb0 68f0d8d 5915a6a 68f0d8d a09c8c9 282a1f2 a09c8c9 5915a6a a09c8c9 68f0d8d 9d03774 68f0d8d 9d03774 5915a6a 4683e56 5915a6a 4683e56 5915a6a 4683e56 5915a6a 4683e56 5915a6a 9d03774 c949392 d4a5e8c d01e2e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 |
import gradio as gr
import numpy as np
from huggingface_hub import InferenceClient
import os
import requests
import scipy.io.wavfile
import io
import time
from gradio_client import Client, file
client = InferenceClient(
"meta-llama/Meta-Llama-3-8B-Instruct",
token=os.getenv('hf_token')
)
def process_audio(audio_data):
if audio_data is None:
return "No audio provided.", ""
# Check if audio_data is a tuple and extract data
if isinstance(audio_data, tuple):
sample_rate, data = audio_data
else:
return "Invalid audio data format.", ""
# Convert the audio data to WAV format in memory
buf = io.BytesIO()
scipy.io.wavfile.write(buf, sample_rate, data)
wav_bytes = buf.getvalue()
buf.close()
API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v2"
headers = {"Authorization": f"Bearer {os.getenv('hf_token')}"}
def query(wav_data):
response = requests.post(API_URL, headers=headers, data=wav_data)
return response.json()
# Call the API to process the audio
output = query(wav_bytes)
print(output) # Check output in console (logs in HF space)
# Check the API response
if 'text' in output:
recognized_text = output['text']
return recognized_text, recognized_text
else:
recognized_text = "The ASR module is still loading, please press the button again!"
return recognized_text, ""
# Define a function to disable the button and display a loading indicator
def disable_components():
# Update recognized_text content, indicating that processing is ongoing
recognized_text_update = gr.update(value='Voice Recognition Running...')
# Disable process_button
process_button_update = gr.update(interactive=False)
# Display loading animation
loading_animation_update = gr.update(visible=True)
return recognized_text_update, process_button_update, loading_animation_update
# Define a function to enable the button and hide the loading indicator
def enable_components(recognized_text):
process_button_update = gr.update(interactive=True)
# Hide loading animation
loading_animation_update = gr.update(visible=False)
return recognized_text, process_button_update, loading_animation_update
# Define a function to disable the button and display a loading indicator
def disable_chatbot_components():
textbox = gr.update(interactive=False)
submit_btn = gr.update(interactive=False)
btn1 = gr.update(interactive=False)
btn2 = gr.update(interactive=False)
btn3 = gr.update(interactive=False)
btn4 = gr.update(interactive=False)
return textbox, submit_btn, btn1, btn2, btn3, btn4
# Define a function to enable the button and hide the loading indicator
def enable_chatbot_components():
textbox = gr.update(interactive=True)
submit_btn = gr.update(interactive=True)
btn1 = gr.update(interactive=True)
btn2 = gr.update(interactive=True)
btn3 = gr.update(interactive=True)
btn4 = gr.update(interactive=True)
return textbox, submit_btn, btn1, btn2, btn3, btn4
llama_responded = 0
responded_answer = ""
def respond(
message,
history: list[tuple[str, str]]
):
global llama_responded
global responded_answer
# Main Decision Module
decision_response = ""
judge_main_message = f"Here is a query: '{message}', Determine if this query is asking about one of the topics included in the list below. If it is, please directly provide only one name of the topic; otherwise, you reply 'no'. The list of topics is: [movie, music]"
m_message = [{"role": "user", "content": judge_main_message}]
for m in client.chat_completion(
m_message,
stream=True,
):
token = m.choices[0].delta.content
decision_response += token
print(decision_response)
if "movie" in decision_response:
movie_client = Client("ironserengety/movies-recommender")
result = movie_client.predict(
message=message,
system_message="You are a movie recommender named 'Exodia'. You are extremely reliable. You always mention your name in the beginning of conversation. You will provide me with answers from the given info. Give not more than 3 choices and make sure that answers are complete sentences.",
max_tokens=512,
temperature=0.7,
top_p=0.95,
api_name="/chat"
)
print(result)
llama_responded = 1
responded_answer = result
return result
#elif "music" in decision_response:
else:
#others
system_message = "You are a helpful chatbot that answers questions. Give any answer within 50 words."
messages = [{"role": "system", "content": system_message}]
for val in history:
print(val[0])
if val[0] != None:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
print(messages)
for message in client.chat_completion(
messages,
stream=True,
):
token = message.choices[0].delta.content
response += token
llama_responded = 1
responded_answer = response
return response
def update_response_display():
while not llama_responded:
time.sleep(1)
def tts_part():
global llama_responded
global responded_answer
result = ""
if responded_answer != "":
text = responded_answer
client = Client("tonyassi/voice-clone")
result = client.predict(
text,
audio=file('siri.wav'),
api_name="/predict"
)
llama_responded = 0
responded_answer = ""
return result
def create_interface():
with gr.Blocks() as demo:
# Chat interface using the custom chatbot instance
chatbot = gr.ChatInterface(
title="Exodia AI Assistant",
fill_height=True,
fn=respond,
submit_btn="Start Chatting"
)
user_start = chatbot.textbox.submit(
fn=update_response_display,
inputs=[],
outputs=[],
)
user_click = chatbot.submit_btn.click(
fn=update_response_display,
inputs=[],
outputs=[],
)
# Audio input section
with gr.Row():
audio_input = gr.Audio(
sources="microphone",
type="numpy", # Get audio data and sample rate
label="Say Something..."
)
recognized_text = gr.Textbox(label="Recognized Text", interactive=False)
# Process audio button
process_button = gr.Button("Process Audio")
# Loading animation
loading_animation = gr.HTML(
value='<div style="text-align: center;"><span style="font-size: 18px;">ASR Model is running...</span></div>',
visible=False
)
text_speaker = gr.Audio(
label="Generated Audio"
)
# Associate audio processing function and update component states on click
process_button.click(
fn=disable_components,
inputs=[],
outputs=[recognized_text, process_button, loading_animation]
).then(
fn=process_audio,
inputs=[audio_input],
outputs=[recognized_text, chatbot.textbox]
).then(
fn=enable_components,
inputs=[recognized_text],
outputs=[recognized_text, process_button, loading_animation]
)
user_start.then(
fn=disable_chatbot_components,
inputs=[],
outputs=[chatbot.submit_btn, chatbot.textbox, process_button, chatbot.retry_btn, chatbot.undo_btn, chatbot.clear_btn]
).then(
fn=tts_part,
inputs=[],
outputs=text_speaker
).then(
fn=enable_chatbot_components,
inputs=[],
outputs=[chatbot.submit_btn, chatbot.textbox, process_button, chatbot.retry_btn, chatbot.undo_btn, chatbot.clear_btn]
)
user_click.then(
fn=disable_chatbot_components,
inputs=[],
outputs=[chatbot.submit_btn, chatbot.textbox, process_button, chatbot.retry_btn, chatbot.undo_btn, chatbot.clear_btn]
).then(
fn=tts_part,
inputs=[],
outputs=text_speaker
).then(
fn=enable_chatbot_components,
inputs=[],
outputs=[chatbot.submit_btn, chatbot.textbox, process_button, chatbot.retry_btn, chatbot.undo_btn, chatbot.clear_btn]
)
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch()
|