Update app.py
Browse files
app.py
CHANGED
@@ -6,12 +6,7 @@ from voice_processing import tts, get_model_names, voice_mapping
|
|
6 |
from io import BytesIO
|
7 |
import asyncio
|
8 |
|
9 |
-
|
10 |
-
info, edge_tts_output_path, tts_output_data, edge_output_file = await tts(
|
11 |
-
model_name, tts_text, edge_tts_voice, slang_rate, use_uploaded_voice, voice_upload_file
|
12 |
-
)
|
13 |
-
return info, tts_output_data
|
14 |
-
|
15 |
async def convert_tts(model_name, tts_text, selected_voice, slang_rate, use_uploaded_voice, voice_upload):
|
16 |
edge_tts_voice = voice_mapping.get(selected_voice)
|
17 |
if not edge_tts_voice:
|
@@ -19,15 +14,17 @@ async def convert_tts(model_name, tts_text, selected_voice, slang_rate, use_uplo
|
|
19 |
|
20 |
voice_upload_file = None
|
21 |
if use_uploaded_voice and voice_upload is not None:
|
22 |
-
|
|
|
23 |
|
24 |
-
|
|
|
25 |
model_name, tts_text, edge_tts_voice, slang_rate, use_uploaded_voice, voice_upload_file
|
26 |
)
|
27 |
|
28 |
_, audio_output = tts_output_data
|
29 |
|
30 |
-
#
|
31 |
audio_bytes = None
|
32 |
if isinstance(audio_output, np.ndarray):
|
33 |
byte_io = BytesIO()
|
@@ -46,6 +43,7 @@ def get_models():
|
|
46 |
def get_voices():
|
47 |
return list(voice_mapping.keys())
|
48 |
|
|
|
49 |
iface = gr.Interface(
|
50 |
fn=convert_tts,
|
51 |
inputs=[
|
@@ -63,5 +61,5 @@ iface = gr.Interface(
|
|
63 |
title="Text-to-Speech Conversion"
|
64 |
)
|
65 |
|
66 |
-
|
67 |
-
|
|
|
6 |
from io import BytesIO
|
7 |
import asyncio
|
8 |
|
9 |
+
# Define an asynchronous function for the Gradio interface
|
|
|
|
|
|
|
|
|
|
|
10 |
async def convert_tts(model_name, tts_text, selected_voice, slang_rate, use_uploaded_voice, voice_upload):
|
11 |
edge_tts_voice = voice_mapping.get(selected_voice)
|
12 |
if not edge_tts_voice:
|
|
|
14 |
|
15 |
voice_upload_file = None
|
16 |
if use_uploaded_voice and voice_upload is not None:
|
17 |
+
with open(voice_upload.name, 'rb') as f:
|
18 |
+
voice_upload_file = f.read()
|
19 |
|
20 |
+
# Asynchronous call to your tts processing function
|
21 |
+
info, edge_tts_output_path, tts_output_data, edge_output_file = await tts(
|
22 |
model_name, tts_text, edge_tts_voice, slang_rate, use_uploaded_voice, voice_upload_file
|
23 |
)
|
24 |
|
25 |
_, audio_output = tts_output_data
|
26 |
|
27 |
+
# Process audio output to bytes
|
28 |
audio_bytes = None
|
29 |
if isinstance(audio_output, np.ndarray):
|
30 |
byte_io = BytesIO()
|
|
|
43 |
def get_voices():
|
44 |
return list(voice_mapping.keys())
|
45 |
|
46 |
+
# Initialize the Gradio interface
|
47 |
iface = gr.Interface(
|
48 |
fn=convert_tts,
|
49 |
inputs=[
|
|
|
61 |
title="Text-to-Speech Conversion"
|
62 |
)
|
63 |
|
64 |
+
# Launch the interface
|
65 |
+
iface.launch()
|