Spaces:
Runtime error
Runtime error
cupcakes323
commited on
Create create and train voice model
Browse files- create and train voice model +62 -0
create and train voice model
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import torch
|
4 |
+
import librosa
|
5 |
+
|
6 |
+
# URL to the external app.py file
|
7 |
+
FILE_URL = "https://huggingface.co/data-science-123/abcd/new/main?filename=app.py"
|
8 |
+
|
9 |
+
# Fetch the external app.py (or additional files) if needed
|
10 |
+
def fetch_external_file(url):
|
11 |
+
response = requests.get(url)
|
12 |
+
if response.status_code == 200:
|
13 |
+
# Save to a local file if needed, or execute dynamically
|
14 |
+
with open('external_app.py', 'wb') as file:
|
15 |
+
file.write(response.content)
|
16 |
+
else:
|
17 |
+
raise Exception(f"Failed to fetch the file: {url}")
|
18 |
+
|
19 |
+
# Fetch the file if you need to load any logic from it
|
20 |
+
fetch_external_file(FILE_URL)
|
21 |
+
|
22 |
+
# Load the pre-trained model (replace with your RVC model path or logic)
|
23 |
+
from model import load_model, convert_voice
|
24 |
+
|
25 |
+
model = load_model("path_to_pretrained_model")
|
26 |
+
|
27 |
+
# Define the voice conversion logic
|
28 |
+
def voice_conversion(source_audio, target_voice):
|
29 |
+
# Load and preprocess audio
|
30 |
+
y, sr = librosa.load(source_audio)
|
31 |
+
input_audio = torch.tensor(y).unsqueeze(0)
|
32 |
+
|
33 |
+
# Use the model for voice conversion
|
34 |
+
converted_audio = convert_voice(model, input_audio, target_voice)
|
35 |
+
|
36 |
+
# Convert the output tensor to a numpy array and save it
|
37 |
+
converted_audio_np = converted_audio.detach().cpu().numpy()
|
38 |
+
output_file = "output_converted.wav"
|
39 |
+
librosa.output.write_wav(output_file, converted_audio_np, sr)
|
40 |
+
|
41 |
+
return output_file
|
42 |
+
|
43 |
+
# Gradio interface
|
44 |
+
def infer(source_audio, target_voice):
|
45 |
+
# Call voice conversion function
|
46 |
+
result_audio = voice_conversion(source_audio, target_voice)
|
47 |
+
return result_audio
|
48 |
+
|
49 |
+
# Create Gradio interface
|
50 |
+
iface = gr.Interface(
|
51 |
+
fn=infer,
|
52 |
+
inputs=[
|
53 |
+
gr.Audio(source="microphone", type="filepath", label="Source Audio"),
|
54 |
+
gr.Dropdown(["Voice1", "Voice2", "Voice3"], label="Target Voice")
|
55 |
+
],
|
56 |
+
outputs=gr.Audio(type="file", label="Converted Audio"),
|
57 |
+
title="Retrieval-based Voice Conversion",
|
58 |
+
description="Convert voice from a source audio to a target voice style."
|
59 |
+
)
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
iface.launch()
|