Spaces:
Running
Running
File size: 6,364 Bytes
d58f539 f7de418 d58f539 f7de418 d58f539 f7de418 d6247a0 f7de418 dec22aa f7de418 f8bd65e a0f34aa f7de418 ef46ff0 913a139 a0f34aa ef46ff0 c2d4418 ef46ff0 16020a5 a0f34aa f8bd65e a0f34aa 913a139 a0f34aa e27f5f8 a0f34aa f8bd65e d6247a0 913a139 d6247a0 a0f34aa d6247a0 913a139 d6247a0 f8bd65e 649a30c 9f81a69 d6247a0 f8bd65e d58f539 f8bd65e 913a139 649a30c a0f34aa f8bd65e ef46ff0 e65a834 a0f34aa 649a30c e65a834 649a30c e65a834 649a30c a0f34aa 913a139 e65a834 a0f34aa e27f5f8 a0f34aa e27f5f8 a0f34aa ef46ff0 913a139 f8bd65e f7de418 ef46ff0 a0f34aa ef46ff0 a0f34aa 913a139 a0f34aa f7de418 f8bd65e e27f5f8 f8bd65e 9f81a69 a0f34aa f8bd65e ef46ff0 913a139 f8bd65e 913a139 d6247a0 a0f34aa f8bd65e 649a30c f8bd65e a0f34aa e27f5f8 a0f34aa e27f5f8 f8bd65e e27f5f8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 |
import gradio as gr
import numpy as np
import io
from pydub import AudioSegment
import tempfile
import openai
import time
from dataclasses import dataclass, field
from threading import Lock
import base64
@dataclass
class AppState:
stream: np.ndarray | None = None
sampling_rate: int = 0
pause_detected: bool = False
conversation: list = field(default_factory=list)
client: openai.OpenAI = None
output_format: str = "mp3"
stopped: bool = False
# Global lock for thread safety
state_lock = Lock()
def create_client(api_key):
return openai.OpenAI(
base_url="https://llama3-1-8b.lepton.run/api/v1/",
api_key=api_key
)
def determine_pause(audio, sampling_rate, state):
# Take the last 1 second of audio
pause_length = int(sampling_rate * 1) # 1 second
if len(audio) < pause_length:
return False
last_audio = audio[-pause_length:]
amplitude = np.abs(last_audio)
# Calculate the average amplitude in the last 1 second
avg_amplitude = np.mean(amplitude)
silence_threshold = 0.01 # Adjust this threshold as needed
if avg_amplitude < silence_threshold:
return True
else:
return False
def process_audio(audio: tuple, state: AppState):
if state.stream is None:
state.stream = audio[1]
state.sampling_rate = audio[0]
else:
state.stream = np.concatenate((state.stream, audio[1]))
pause_detected = determine_pause(state.stream, state.sampling_rate, state)
state.pause_detected = pause_detected
if state.pause_detected:
return gr.update(recording=False), state
else:
return None, state
def generate_response_and_audio(audio_bytes: bytes, state: AppState):
if state.client is None:
raise gr.Error("Please enter a valid API key first.")
format_ = state.output_format
bitrate = 128 if format_ == "mp3" else 32 # Higher bitrate for MP3, lower for OPUS
audio_data = base64.b64encode(audio_bytes).decode()
try:
stream = state.client.chat.completions.create(
extra_body={
"require_audio": True,
"tts_preset_id": "jessica",
"tts_audio_format": format_,
"tts_audio_bitrate": bitrate
},
model="llama3.1-8b",
messages=[{"role": "user", "content": [{"type": "audio", "data": audio_data}]}],
temperature=0.7,
max_tokens=256,
stream=True,
)
for chunk in stream:
if not chunk.choices:
continue
content = chunk.choices[0].delta.content
audio = getattr(chunk.choices[0], 'audio', [])
if content or audio:
audio_bytes = b''.join([base64.b64decode(a) for a in audio]) if audio else None
yield content, audio_bytes, state
except Exception as e:
raise gr.Error(f"Error during audio streaming: {e}")
def response(state: AppState):
if state.stream is None or len(state.stream) == 0:
yield None, None, state
return
audio_buffer = io.BytesIO()
segment = AudioSegment(
state.stream.tobytes(),
frame_rate=state.sampling_rate,
sample_width=state.stream.dtype.itemsize,
channels=(1 if len(state.stream.shape) == 1 else state.stream.shape[1]),
)
segment.export(audio_buffer, format="wav")
generator = generate_response_and_audio(audio_buffer.getvalue(), state)
# Add the user's audio input to the conversation
state.conversation.append({"role": "user", "content": "Audio input"})
# Prepare assistant's message
assistant_message = {"role": "assistant", "content": ""}
state.conversation.append(assistant_message)
for text, audio, updated_state in generator:
if text:
assistant_message["content"] += text
state = updated_state
chatbot_output = state.conversation[-2:] # Get the last two messages
yield chatbot_output, audio, state
# Reset the audio stream for the next interaction
state.stream = None
state.pause_detected = False
def start_recording_user(state: AppState):
if not state.stopped:
return gr.update(recording=True)
else:
return gr.update(recording=False)
def set_api_key(api_key, state):
if not api_key:
raise gr.Error("Please enter a valid API key.")
state.client = create_client(api_key)
return "API key set successfully!", state
def update_format(format, state):
state.output_format = format
return state
with gr.Blocks() as demo:
with gr.Row():
api_key_input = gr.Textbox(type="password", label="Enter your Lepton API Key")
set_key_button = gr.Button("Set API Key")
api_key_status = gr.Textbox(label="API Key Status", interactive=False)
with gr.Row():
format_dropdown = gr.Dropdown(choices=["mp3", "opus"], value="mp3", label="Output Audio Format")
with gr.Row():
with gr.Column():
input_audio = gr.Audio(label="Input Audio", source="microphone", type="numpy")
with gr.Column():
chatbot = gr.Chatbot(label="Conversation", type="messages")
output_audio = gr.Audio(label="Output Audio", autoplay=True)
state = gr.State(AppState())
set_key_button.click(set_api_key, inputs=[api_key_input, state], outputs=[api_key_status, state])
format_dropdown.change(update_format, inputs=[format_dropdown, state], outputs=[state])
stream = input_audio.stream(
process_audio,
[input_audio, state],
[input_audio, state],
stream_every=0.25, # Reduced to make it more responsive
time_limit=60, # Increased to allow for longer messages
)
respond = input_audio.stop_recording(
response,
[state],
[chatbot, output_audio, state],
)
# Automatically restart recording after the assistant's response
restart = output_audio.change(
start_recording_user,
[state],
[input_audio]
)
# Add a "Stop Conversation" button
cancel = gr.Button("Stop Conversation", variant="stop")
cancel.click(lambda: (AppState(stopped=True), gr.update(recording=False)), None,
[state, input_audio], cancels=[respond, restart])
demo.launch(queue=True, stream=True)
|