Spaces:
Running
Running
Commit
•
bdf2bf2
1
Parent(s):
af4cf59
try bytes
Browse files- app.py +34 -3
- librispeech.mp3 +0 -0
app.py
CHANGED
@@ -1,6 +1,35 @@
|
|
1 |
import gradio as gr
|
2 |
import math
|
3 |
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
def stream(audio, chunk_length_s):
|
6 |
start_time = time.time()
|
@@ -15,10 +44,11 @@ def stream(audio, chunk_length_s):
|
|
15 |
start_pos = idx * chunk_length
|
16 |
end_pos = min((idx + 1) * chunk_length, audio_length)
|
17 |
chunk = array[start_pos : end_pos]
|
|
|
18 |
if idx == 0:
|
19 |
first_time = round(time.time() - start_time, 2)
|
20 |
run_time = round(time.time() - start_time, 2)
|
21 |
-
yield (sampling_rate, chunk), first_time, run_time
|
22 |
|
23 |
with gr.Blocks() as demo:
|
24 |
with gr.Row():
|
@@ -27,10 +57,11 @@ with gr.Blocks() as demo:
|
|
27 |
chunk_length = gr.Slider(minimum=2, maximum=10, value=2, step=2, label="Chunk length (s)")
|
28 |
run_button = gr.Button("Stream audio")
|
29 |
with gr.Column():
|
30 |
-
audio_out = gr.Audio(streaming=True, autoplay=True)
|
|
|
31 |
first_time = gr.Textbox(label="Time to first chunk (s)")
|
32 |
run_time = gr.Textbox(label="Time to current chunk (s)")
|
33 |
|
34 |
-
run_button.click(fn=stream, inputs=[audio_in, chunk_length], outputs=[audio_out, first_time, run_time])
|
35 |
|
36 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import math
|
3 |
import time
|
4 |
+
import numpy as np
|
5 |
+
from pydub import AudioSegment
|
6 |
+
import io
|
7 |
+
|
8 |
+
|
9 |
+
def numpy_to_mp3(audio_array, sampling_rate):
|
10 |
+
# Normalize audio_array if it's floating-point
|
11 |
+
if np.issubdtype(audio_array.dtype, np.floating):
|
12 |
+
max_val = np.max(np.abs(audio_array))
|
13 |
+
audio_array = (audio_array / max_val) * 32767 # Normalize to 16-bit range
|
14 |
+
audio_array = audio_array.astype(np.int16)
|
15 |
+
|
16 |
+
# Create an audio segment from the numpy array
|
17 |
+
audio_segment = AudioSegment(
|
18 |
+
audio_array.tobytes(),
|
19 |
+
frame_rate=sampling_rate,
|
20 |
+
sample_width=audio_array.dtype.itemsize,
|
21 |
+
channels=1
|
22 |
+
)
|
23 |
+
|
24 |
+
# Export the audio segment to MP3 bytes
|
25 |
+
mp3_io = io.BytesIO()
|
26 |
+
audio_segment.export(mp3_io, format="mp3")
|
27 |
+
|
28 |
+
# Get the MP3 bytes
|
29 |
+
mp3_bytes = mp3_io.getvalue()
|
30 |
+
mp3_io.close()
|
31 |
+
|
32 |
+
return mp3_bytes
|
33 |
|
34 |
def stream(audio, chunk_length_s):
|
35 |
start_time = time.time()
|
|
|
44 |
start_pos = idx * chunk_length
|
45 |
end_pos = min((idx + 1) * chunk_length, audio_length)
|
46 |
chunk = array[start_pos : end_pos]
|
47 |
+
chunk_mp3 = numpy_to_mp3(chunk, sampling_rate=sampling_rate)
|
48 |
if idx == 0:
|
49 |
first_time = round(time.time() - start_time, 2)
|
50 |
run_time = round(time.time() - start_time, 2)
|
51 |
+
yield (sampling_rate, chunk), chunk_mp3, first_time, run_time
|
52 |
|
53 |
with gr.Blocks() as demo:
|
54 |
with gr.Row():
|
|
|
57 |
chunk_length = gr.Slider(minimum=2, maximum=10, value=2, step=2, label="Chunk length (s)")
|
58 |
run_button = gr.Button("Stream audio")
|
59 |
with gr.Column():
|
60 |
+
audio_out = gr.Audio(streaming=True, autoplay=True, label="wav")
|
61 |
+
audio_out_mp3 = gr.Audio(streaming=True, autoplay=True, format="mp3", label="mp3")
|
62 |
first_time = gr.Textbox(label="Time to first chunk (s)")
|
63 |
run_time = gr.Textbox(label="Time to current chunk (s)")
|
64 |
|
65 |
+
run_button.click(fn=stream, inputs=[audio_in, chunk_length], outputs=[audio_out, audio_out_mp3, first_time, run_time])
|
66 |
|
67 |
demo.launch()
|
librispeech.mp3
ADDED
Binary file (627 kB). View file
|
|