Spaces:
Runtime error
Runtime error
SayaSS
commited on
Commit
•
26277ae
1
Parent(s):
814e97c
update
Browse files- app-slice.py +111 -0
- app.py +13 -11
- inference/__pycache__/infer_tool.cpython-38.pyc +0 -0
- inference/infer_tool.py +30 -27
app-slice.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
import librosa
|
4 |
+
import numpy as np
|
5 |
+
from pathlib import Path
|
6 |
+
import inference.infer_tool as infer_tool
|
7 |
+
import utils
|
8 |
+
from inference.infer_tool import Svc
|
9 |
+
import logging
|
10 |
+
import webbrowser
|
11 |
+
import argparse
|
12 |
+
import soundfile
|
13 |
+
import gradio.processing_utils as gr_processing_utils
|
14 |
+
logging.getLogger('numba').setLevel(logging.WARNING)
|
15 |
+
logging.getLogger('markdown_it').setLevel(logging.WARNING)
|
16 |
+
logging.getLogger('urllib3').setLevel(logging.WARNING)
|
17 |
+
logging.getLogger('matplotlib').setLevel(logging.WARNING)
|
18 |
+
|
19 |
+
limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
|
20 |
+
|
21 |
+
audio_postprocess_ori = gr.Audio.postprocess
|
22 |
+
|
23 |
+
def audio_postprocess(self, y):
|
24 |
+
data = audio_postprocess_ori(self, y)
|
25 |
+
if data is None:
|
26 |
+
return None
|
27 |
+
return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
|
28 |
+
|
29 |
+
|
30 |
+
gr.Audio.postprocess = audio_postprocess
|
31 |
+
def create_vc_fn(model, sid):
|
32 |
+
def vc_fn(input_audio, vc_transform, auto_f0, slice_db, noise_scale, pad_seconds):
|
33 |
+
if input_audio is None:
|
34 |
+
return "You need to select an audio", None
|
35 |
+
raw_audio_path = f"raw/{input_audio}"
|
36 |
+
if "." not in raw_audio_path:
|
37 |
+
raw_audio_path += ".wav"
|
38 |
+
infer_tool.format_wav(raw_audio_path)
|
39 |
+
wav_path = Path(raw_audio_path).with_suffix('.wav')
|
40 |
+
_audio = model.slice_inference(
|
41 |
+
wav_path, sid, vc_transform, slice_db,
|
42 |
+
cluster_infer_ratio=0,
|
43 |
+
auto_predict_f0=auto_f0,
|
44 |
+
noice_scale=noise_scale,
|
45 |
+
pad_seconds=pad_seconds)
|
46 |
+
model.clear_empty()
|
47 |
+
return "Success", (44100, _audio)
|
48 |
+
return vc_fn
|
49 |
+
|
50 |
+
def refresh_raw_wav():
|
51 |
+
return gr.Dropdown.update(choices=os.listdir("raw"))
|
52 |
+
|
53 |
+
|
54 |
+
if __name__ == '__main__':
|
55 |
+
parser = argparse.ArgumentParser()
|
56 |
+
parser.add_argument('--device', type=str, default='cpu')
|
57 |
+
parser.add_argument('--api', action="store_true", default=False)
|
58 |
+
parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
|
59 |
+
parser.add_argument("--colab", action="store_true", default=False, help="share gradio app")
|
60 |
+
args = parser.parse_args()
|
61 |
+
hubert_model = utils.get_hubert_model().to(args.device)
|
62 |
+
models = []
|
63 |
+
raw = os.listdir("raw")
|
64 |
+
for f in os.listdir("models"):
|
65 |
+
name = f
|
66 |
+
model = Svc(fr"models/{f}/{f}.pth", f"models/{f}/config.json", device=args.device, hubert_model=hubert_model)
|
67 |
+
cover = f"models/{f}/cover.png" if os.path.exists(f"models/{f}/cover.png") else None
|
68 |
+
models.append((name, cover, create_vc_fn(model, name)))
|
69 |
+
with gr.Blocks() as app:
|
70 |
+
gr.Markdown(
|
71 |
+
"# <center> Sovits Models\n"
|
72 |
+
"## <center> The input audio should be clean and pure voice without background music.\n"
|
73 |
+
"![visitor badge](https://visitor-badge.glitch.me/badge?page_id=sayashi.Sovits-Umamusume)\n\n"
|
74 |
+
"[Open In Colab](https://colab.research.google.com/drive/1wfsBbMzmtLflOJeqc5ZnJiLY7L239hJW?usp=share_link)"
|
75 |
+
" without queue and length limitation.\n\n"
|
76 |
+
"[Original Repo](https://github.com/svc-develop-team/so-vits-svc)\n\n"
|
77 |
+
"Other models:\n"
|
78 |
+
"[rudolf](https://huggingface.co/spaces/sayashi/sovits-rudolf)\n"
|
79 |
+
"[teio](https://huggingface.co/spaces/sayashi/sovits-teio)\n"
|
80 |
+
"[goldship](https://huggingface.co/spaces/sayashi/sovits-goldship)\n"
|
81 |
+
"[tannhauser](https://huggingface.co/spaces/sayashi/sovits-tannhauser)\n"
|
82 |
+
|
83 |
+
)
|
84 |
+
with gr.Tabs():
|
85 |
+
for (name, cover, vc_fn) in models:
|
86 |
+
with gr.TabItem(name):
|
87 |
+
with gr.Row():
|
88 |
+
gr.Markdown(
|
89 |
+
'<div align="center">'
|
90 |
+
f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else ""
|
91 |
+
'</div>'
|
92 |
+
)
|
93 |
+
with gr.Row():
|
94 |
+
with gr.Column():
|
95 |
+
with gr.Row():
|
96 |
+
vc_input = gr.Dropdown(label="Input audio", choices=raw)
|
97 |
+
vc_refresh = gr.Button("🔁", variant="primary")
|
98 |
+
vc_transform = gr.Number(label="vc_transform", value=0)
|
99 |
+
slice_db = gr.Number(label="slice_db", value=-40)
|
100 |
+
noise_scale = gr.Number(label="noise_scale", value=0.4)
|
101 |
+
pad_seconds = gr.Number(label="pad_seconds", value=0.5)
|
102 |
+
auto_f0 = gr.Checkbox(label="auto_f0", value=False)
|
103 |
+
vc_submit = gr.Button("Generate", variant="primary")
|
104 |
+
with gr.Column():
|
105 |
+
vc_output1 = gr.Textbox(label="Output Message")
|
106 |
+
vc_output2 = gr.Audio(label="Output Audio")
|
107 |
+
vc_submit.click(vc_fn, [vc_input, vc_transform, auto_f0, slice_db, noise_scale, pad_seconds], [vc_output1, vc_output2])
|
108 |
+
vc_refresh.click(refresh_raw_wav, [], [vc_input])
|
109 |
+
if args.colab:
|
110 |
+
webbrowser.open("http://127.0.0.1:7860")
|
111 |
+
app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
|
app.py
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
import os
|
|
|
2 |
import gradio as gr
|
3 |
import librosa
|
4 |
import numpy as np
|
5 |
import utils
|
6 |
from inference.infer_tool import Svc
|
7 |
import logging
|
8 |
-
import
|
9 |
import argparse
|
10 |
import gradio.processing_utils as gr_processing_utils
|
11 |
logging.getLogger('numba').setLevel(logging.WARNING)
|
@@ -31,15 +32,19 @@ def create_vc_fn(model, sid):
|
|
31 |
return "You need to upload an audio", None
|
32 |
sampling_rate, audio = input_audio
|
33 |
duration = audio.shape[0] / sampling_rate
|
34 |
-
if duration >
|
35 |
-
return "Please upload an audio file that is less than
|
36 |
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
37 |
if len(audio.shape) > 1:
|
38 |
audio = librosa.to_mono(audio.transpose(1, 0))
|
39 |
-
if sampling_rate !=
|
40 |
-
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
43 |
return "Success", (44100, out_audio.cpu().numpy())
|
44 |
return vc_fn
|
45 |
|
@@ -48,7 +53,6 @@ if __name__ == '__main__':
|
|
48 |
parser.add_argument('--device', type=str, default='cpu')
|
49 |
parser.add_argument('--api', action="store_true", default=False)
|
50 |
parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
|
51 |
-
parser.add_argument("--colab", action="store_true", default=False, help="share gradio app")
|
52 |
args = parser.parse_args()
|
53 |
hubert_model = utils.get_hubert_model().to(args.device)
|
54 |
models = []
|
@@ -83,7 +87,7 @@ if __name__ == '__main__':
|
|
83 |
)
|
84 |
with gr.Row():
|
85 |
with gr.Column():
|
86 |
-
vc_input = gr.Audio(label="Input audio"+' (less than
|
87 |
vc_transform = gr.Number(label="vc_transform", value=0)
|
88 |
auto_f0 = gr.Checkbox(label="auto_f0", value=False)
|
89 |
vc_submit = gr.Button("Generate", variant="primary")
|
@@ -91,6 +95,4 @@ if __name__ == '__main__':
|
|
91 |
vc_output1 = gr.Textbox(label="Output Message")
|
92 |
vc_output2 = gr.Audio(label="Output Audio")
|
93 |
vc_submit.click(vc_fn, [vc_input, vc_transform, auto_f0], [vc_output1, vc_output2])
|
94 |
-
if args.colab:
|
95 |
-
webbrowser.open("http://127.0.0.1:7860")
|
96 |
app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
|
|
|
1 |
import os
|
2 |
+
import io
|
3 |
import gradio as gr
|
4 |
import librosa
|
5 |
import numpy as np
|
6 |
import utils
|
7 |
from inference.infer_tool import Svc
|
8 |
import logging
|
9 |
+
import soundfile
|
10 |
import argparse
|
11 |
import gradio.processing_utils as gr_processing_utils
|
12 |
logging.getLogger('numba').setLevel(logging.WARNING)
|
|
|
32 |
return "You need to upload an audio", None
|
33 |
sampling_rate, audio = input_audio
|
34 |
duration = audio.shape[0] / sampling_rate
|
35 |
+
if duration > 45 and limitation:
|
36 |
+
return "Please upload an audio file that is less than 45 seconds. If you need to generate a longer audio file, please use Colab.", None
|
37 |
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
38 |
if len(audio.shape) > 1:
|
39 |
audio = librosa.to_mono(audio.transpose(1, 0))
|
40 |
+
if sampling_rate != 16000:
|
41 |
+
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
|
42 |
+
raw_path = io.BytesIO()
|
43 |
+
soundfile.write(raw_path, audio, 16000, format="wav")
|
44 |
+
raw_path.seek(0)
|
45 |
+
out_audio, out_sr = model.infer(sid, vc_transform, raw_path,
|
46 |
+
auto_predict_f0=auto_f0,
|
47 |
+
)
|
48 |
return "Success", (44100, out_audio.cpu().numpy())
|
49 |
return vc_fn
|
50 |
|
|
|
53 |
parser.add_argument('--device', type=str, default='cpu')
|
54 |
parser.add_argument('--api', action="store_true", default=False)
|
55 |
parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
|
|
|
56 |
args = parser.parse_args()
|
57 |
hubert_model = utils.get_hubert_model().to(args.device)
|
58 |
models = []
|
|
|
87 |
)
|
88 |
with gr.Row():
|
89 |
with gr.Column():
|
90 |
+
vc_input = gr.Audio(label="Input audio"+' (less than 10 seconds)' if limitation else '')
|
91 |
vc_transform = gr.Number(label="vc_transform", value=0)
|
92 |
auto_f0 = gr.Checkbox(label="auto_f0", value=False)
|
93 |
vc_submit = gr.Button("Generate", variant="primary")
|
|
|
95 |
vc_output1 = gr.Textbox(label="Output Message")
|
96 |
vc_output2 = gr.Audio(label="Output Audio")
|
97 |
vc_submit.click(vc_fn, [vc_input, vc_transform, auto_f0], [vc_output1, vc_output2])
|
|
|
|
|
98 |
app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
|
inference/__pycache__/infer_tool.cpython-38.pyc
CHANGED
Binary files a/inference/__pycache__/infer_tool.cpython-38.pyc and b/inference/__pycache__/infer_tool.cpython-38.pyc differ
|
|
inference/infer_tool.py
CHANGED
@@ -142,7 +142,8 @@ class Svc(object):
|
|
142 |
|
143 |
|
144 |
|
145 |
-
def get_unit_f0(self,
|
|
|
146 |
f0 = utils.compute_f0_parselmouth(wav, sampling_rate=self.target_sample, hop_length=self.hop_size)
|
147 |
f0, uv = utils.interpolate_f0(f0)
|
148 |
f0 = torch.FloatTensor(f0)
|
@@ -151,12 +152,12 @@ class Svc(object):
|
|
151 |
f0 = f0.unsqueeze(0).to(self.dev)
|
152 |
uv = uv.unsqueeze(0).to(self.dev)
|
153 |
|
154 |
-
wav16k = librosa.resample(wav, orig_sr=
|
155 |
wav16k = torch.from_numpy(wav16k).to(self.dev)
|
156 |
c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k)
|
157 |
c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
|
158 |
|
159 |
-
if cluster_infer_ratio !=0:
|
160 |
cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T
|
161 |
cluster_c = torch.FloatTensor(cluster_c).to(self.dev)
|
162 |
c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c
|
@@ -164,7 +165,7 @@ class Svc(object):
|
|
164 |
c = c.unsqueeze(0)
|
165 |
return c, f0, uv
|
166 |
|
167 |
-
def infer(self, speaker, tran,
|
168 |
cluster_infer_ratio=0,
|
169 |
auto_predict_f0=False,
|
170 |
noice_scale=0.4):
|
@@ -173,7 +174,7 @@ class Svc(object):
|
|
173 |
if len(self.spk2id.__dict__) >= speaker:
|
174 |
speaker_id = speaker
|
175 |
sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0)
|
176 |
-
c, f0, uv = self.get_unit_f0(
|
177 |
if "half" in self.net_g_path and torch.cuda.is_available():
|
178 |
c = c.half()
|
179 |
with torch.no_grad():
|
@@ -187,17 +188,18 @@ class Svc(object):
|
|
187 |
# 清理显存
|
188 |
torch.cuda.empty_cache()
|
189 |
|
190 |
-
def slice_inference(self,raw_audio_path, spk, tran, slice_db,cluster_infer_ratio, auto_predict_f0,noice_scale,
|
|
|
191 |
wav_path = raw_audio_path
|
192 |
chunks = slicer.cut(wav_path, db_thresh=slice_db)
|
193 |
audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
|
194 |
-
per_size = int(clip_seconds*audio_sr)
|
195 |
-
lg_size = int(lg_num*audio_sr)
|
196 |
-
lg_size_r = int(lg_size*lgr_num)
|
197 |
-
lg_size_c_l = (lg_size-lg_size_r)//2
|
198 |
-
lg_size_c_r = lg_size-lg_size_r-lg_size_c_l
|
199 |
-
lg = np.linspace(0,1,lg_size_r) if lg_size!=0 else 0
|
200 |
-
|
201 |
audio = []
|
202 |
for (slice_tag, data) in audio_data:
|
203 |
print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
|
@@ -209,12 +211,12 @@ class Svc(object):
|
|
209 |
audio.extend(list(pad_array(_audio, length)))
|
210 |
continue
|
211 |
if per_size != 0:
|
212 |
-
datas = split_list_by_n(data, per_size,lg_size)
|
213 |
else:
|
214 |
datas = [data]
|
215 |
-
for k,dat in enumerate(datas):
|
216 |
-
per_length = int(np.ceil(len(dat) / audio_sr * self.target_sample)) if clip_seconds!=0 else length
|
217 |
-
if clip_seconds!=0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======')
|
218 |
# padd
|
219 |
pad_len = int(audio_sr * pad_seconds)
|
220 |
dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])])
|
@@ -222,24 +224,25 @@ class Svc(object):
|
|
222 |
soundfile.write(raw_path, dat, audio_sr, format="wav")
|
223 |
raw_path.seek(0)
|
224 |
out_audio, out_sr = self.infer(spk, tran, raw_path,
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
_audio = out_audio.cpu().numpy()
|
230 |
pad_len = int(self.target_sample * pad_seconds)
|
231 |
_audio = _audio[pad_len:-pad_len]
|
232 |
_audio = pad_array(_audio, per_length)
|
233 |
-
if lg_size!=0 and k!=0:
|
234 |
-
lg1 = audio[-(lg_size_r+lg_size_c_r):-lg_size_c_r] if lgr_num != 1 else audio[-lg_size:]
|
235 |
-
lg2 = _audio[lg_size_c_l:lg_size_c_l+lg_size_r]
|
236 |
-
lg_pre = lg1*(1-lg)+lg2*lg
|
237 |
-
audio = audio[0:-(lg_size_r+lg_size_c_r)] if lgr_num != 1 else audio[0:-lg_size]
|
238 |
audio.extend(lg_pre)
|
239 |
-
_audio = _audio[lg_size_c_l+lg_size_r:] if lgr_num != 1 else _audio[lg_size:]
|
240 |
audio.extend(list(_audio))
|
241 |
return np.array(audio)
|
242 |
|
|
|
243 |
class RealTimeVC:
|
244 |
def __init__(self):
|
245 |
self.last_chunk = None
|
|
|
142 |
|
143 |
|
144 |
|
145 |
+
def get_unit_f0(self, in_path, tran, cluster_infer_ratio, speaker):
|
146 |
+
wav, sr = librosa.load(in_path, sr=self.target_sample)
|
147 |
f0 = utils.compute_f0_parselmouth(wav, sampling_rate=self.target_sample, hop_length=self.hop_size)
|
148 |
f0, uv = utils.interpolate_f0(f0)
|
149 |
f0 = torch.FloatTensor(f0)
|
|
|
152 |
f0 = f0.unsqueeze(0).to(self.dev)
|
153 |
uv = uv.unsqueeze(0).to(self.dev)
|
154 |
|
155 |
+
wav16k = librosa.resample(wav, orig_sr=self.target_sample, target_sr=16000)
|
156 |
wav16k = torch.from_numpy(wav16k).to(self.dev)
|
157 |
c = utils.get_hubert_content(self.hubert_model, wav_16k_tensor=wav16k)
|
158 |
c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[1])
|
159 |
|
160 |
+
if cluster_infer_ratio != 0:
|
161 |
cluster_c = cluster.get_cluster_center_result(self.cluster_model, c.cpu().numpy().T, speaker).T
|
162 |
cluster_c = torch.FloatTensor(cluster_c).to(self.dev)
|
163 |
c = cluster_infer_ratio * cluster_c + (1 - cluster_infer_ratio) * c
|
|
|
165 |
c = c.unsqueeze(0)
|
166 |
return c, f0, uv
|
167 |
|
168 |
+
def infer(self, speaker, tran, raw_path,
|
169 |
cluster_infer_ratio=0,
|
170 |
auto_predict_f0=False,
|
171 |
noice_scale=0.4):
|
|
|
174 |
if len(self.spk2id.__dict__) >= speaker:
|
175 |
speaker_id = speaker
|
176 |
sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0)
|
177 |
+
c, f0, uv = self.get_unit_f0(raw_path, tran, cluster_infer_ratio, speaker)
|
178 |
if "half" in self.net_g_path and torch.cuda.is_available():
|
179 |
c = c.half()
|
180 |
with torch.no_grad():
|
|
|
188 |
# 清理显存
|
189 |
torch.cuda.empty_cache()
|
190 |
|
191 |
+
def slice_inference(self, raw_audio_path, spk, tran, slice_db, cluster_infer_ratio, auto_predict_f0, noice_scale,
|
192 |
+
pad_seconds=0.5, clip_seconds=0, lg_num=0, lgr_num=0.75):
|
193 |
wav_path = raw_audio_path
|
194 |
chunks = slicer.cut(wav_path, db_thresh=slice_db)
|
195 |
audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks)
|
196 |
+
per_size = int(clip_seconds * audio_sr)
|
197 |
+
lg_size = int(lg_num * audio_sr)
|
198 |
+
lg_size_r = int(lg_size * lgr_num)
|
199 |
+
lg_size_c_l = (lg_size - lg_size_r) // 2
|
200 |
+
lg_size_c_r = lg_size - lg_size_r - lg_size_c_l
|
201 |
+
lg = np.linspace(0, 1, lg_size_r) if lg_size != 0 else 0
|
202 |
+
|
203 |
audio = []
|
204 |
for (slice_tag, data) in audio_data:
|
205 |
print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======')
|
|
|
211 |
audio.extend(list(pad_array(_audio, length)))
|
212 |
continue
|
213 |
if per_size != 0:
|
214 |
+
datas = split_list_by_n(data, per_size, lg_size)
|
215 |
else:
|
216 |
datas = [data]
|
217 |
+
for k, dat in enumerate(datas):
|
218 |
+
per_length = int(np.ceil(len(dat) / audio_sr * self.target_sample)) if clip_seconds != 0 else length
|
219 |
+
if clip_seconds != 0: print(f'###=====segment clip start, {round(len(dat) / audio_sr, 3)}s======')
|
220 |
# padd
|
221 |
pad_len = int(audio_sr * pad_seconds)
|
222 |
dat = np.concatenate([np.zeros([pad_len]), dat, np.zeros([pad_len])])
|
|
|
224 |
soundfile.write(raw_path, dat, audio_sr, format="wav")
|
225 |
raw_path.seek(0)
|
226 |
out_audio, out_sr = self.infer(spk, tran, raw_path,
|
227 |
+
cluster_infer_ratio=cluster_infer_ratio,
|
228 |
+
auto_predict_f0=auto_predict_f0,
|
229 |
+
noice_scale=noice_scale
|
230 |
+
)
|
231 |
_audio = out_audio.cpu().numpy()
|
232 |
pad_len = int(self.target_sample * pad_seconds)
|
233 |
_audio = _audio[pad_len:-pad_len]
|
234 |
_audio = pad_array(_audio, per_length)
|
235 |
+
if lg_size != 0 and k != 0:
|
236 |
+
lg1 = audio[-(lg_size_r + lg_size_c_r):-lg_size_c_r] if lgr_num != 1 else audio[-lg_size:]
|
237 |
+
lg2 = _audio[lg_size_c_l:lg_size_c_l + lg_size_r] if lgr_num != 1 else _audio[0:lg_size]
|
238 |
+
lg_pre = lg1 * (1 - lg) + lg2 * lg
|
239 |
+
audio = audio[0:-(lg_size_r + lg_size_c_r)] if lgr_num != 1 else audio[0:-lg_size]
|
240 |
audio.extend(lg_pre)
|
241 |
+
_audio = _audio[lg_size_c_l + lg_size_r:] if lgr_num != 1 else _audio[lg_size:]
|
242 |
audio.extend(list(_audio))
|
243 |
return np.array(audio)
|
244 |
|
245 |
+
|
246 |
class RealTimeVC:
|
247 |
def __init__(self):
|
248 |
self.last_chunk = None
|