Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -159,12 +159,7 @@ async def load_model_data():
|
|
159 |
sys.path.append('./content/piper/src/python')
|
160 |
models_path = "./content/piper/src/python"
|
161 |
logging.basicConfig(level=logging.DEBUG)
|
162 |
-
|
163 |
-
"CPUExecutionProvider"
|
164 |
-
if use_gpu is False
|
165 |
-
else ("CUDAExecutionProvider", {"cudnn_conv_algo_search": "DEFAULT"})
|
166 |
-
]
|
167 |
-
sess_options = onnxruntime.SessionOptions()
|
168 |
|
169 |
# Collect data for all models in the directory and populate model_configurations
|
170 |
model_names, config_names = detect_onnx_models(models_path)
|
@@ -229,34 +224,39 @@ async def main(
|
|
229 |
# ... (previous code)
|
230 |
|
231 |
if selected_model in onnx_models:
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
|
|
|
|
|
|
|
|
|
|
260 |
|
261 |
else:
|
262 |
# The selected_model is not found in the list; handle this case as needed
|
|
|
159 |
sys.path.append('./content/piper/src/python')
|
160 |
models_path = "./content/piper/src/python"
|
161 |
logging.basicConfig(level=logging.DEBUG)
|
162 |
+
|
|
|
|
|
|
|
|
|
|
|
163 |
|
164 |
# Collect data for all models in the directory and populate model_configurations
|
165 |
model_names, config_names = detect_onnx_models(models_path)
|
|
|
224 |
# ... (previous code)
|
225 |
|
226 |
if selected_model in onnx_models:
|
227 |
+
# model_name = selected_model
|
228 |
+
# onnx_model = selected_model # Replace with the actual key for your ONNX model file
|
229 |
+
|
230 |
+
providers = ["CPUExecutionProvider"
|
231 |
+
if use_gpu is False
|
232 |
+
else ("CUDAExecutionProvider", {"cudnn_conv_algo_search": "DEFAULT"})
|
233 |
+
]
|
234 |
+
sess_options = onnxruntime.SessionOptions()
|
235 |
+
model, config = load_onnx(selected_model, sess_options, providers)
|
236 |
+
speaker_id_map = config.get("speaker_id_map", {})
|
237 |
+
|
238 |
+
auto_play = play
|
239 |
+
audio = inferencing(model, config, selected_speaker_id, text_input, speed_slider, noise_scale_slider, noise_scale_w_slider, auto_play)
|
240 |
+
temp_dir = tempfile.mkdtemp()
|
241 |
+
renamed_audio_file = os.path.join(temp_dir, "download.mp3")
|
242 |
+
audio.export(renamed_audio_file, format="mp3")
|
243 |
+
|
244 |
+
# Generate a unique file ID
|
245 |
+
file_id = str(uuid.uuid4())
|
246 |
+
|
247 |
+
# Store the file path with the generated file ID
|
248 |
+
files[file_id] = renamed_audio_file
|
249 |
+
|
250 |
+
# Create a URL to download the file
|
251 |
+
file_url = f'/download?fileId={file_id}'
|
252 |
+
|
253 |
+
# Restore the form and return the response
|
254 |
+
response_html = """
|
255 |
+
<script>
|
256 |
+
document.getElementById("loading-message").innerText = "Audio generated successfully!";
|
257 |
+
document.getElementById("synthesize_button").disabled = false;
|
258 |
+
</script>
|
259 |
+
"""
|
260 |
|
261 |
else:
|
262 |
# The selected_model is not found in the list; handle this case as needed
|