kevinwang676 commited on
Commit
fb33941
1 Parent(s): 2239797

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -651
app.py CHANGED
@@ -1,653 +1,3 @@
1
- import spaces
2
  import os
3
- import glob
4
- import json
5
- import traceback
6
- import logging
7
- import gradio as gr
8
- import numpy as np
9
- import librosa
10
- import torch
11
- import asyncio
12
- import ffmpeg
13
- import subprocess
14
- import sys
15
- import io
16
- import wave
17
- from datetime import datetime
18
- import urllib.request
19
- import zipfile
20
- import shutil
21
- import gradio as gr
22
- from textwrap import dedent
23
- import pprint
24
- import time
25
 
26
- import re
27
- import requests
28
- import subprocess
29
- from pathlib import Path
30
- from scipy.io.wavfile import write
31
- from scipy.io import wavfile
32
- import soundfile as sf
33
-
34
- from lib.infer_pack.models import (
35
- SynthesizerTrnMs256NSFsid,
36
- SynthesizerTrnMs256NSFsid_nono,
37
- SynthesizerTrnMs768NSFsid,
38
- SynthesizerTrnMs768NSFsid_nono,
39
- )
40
- from vc_infer_pipeline import VC
41
- from config import Config
42
- config = Config()
43
- logging.getLogger("numba").setLevel(logging.WARNING)
44
- spaces_hf = True #os.getenv("SYSTEM") == "spaces"
45
- force_support = True
46
-
47
- audio_mode = []
48
- f0method_mode = []
49
- f0method_info = ""
50
-
51
- headers = {
52
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
53
- }
54
- pattern = r'//www\.bilibili\.com/video[^"]*'
55
-
56
- # Download models
57
-
58
- #urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/hubert_base", "hubert_base.pt")
59
- #urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/rmvpe", "rmvpe.pt")
60
-
61
- # Get zip name
62
-
63
- pattern_zip = r"/([^/]+)\.zip$"
64
-
65
- def get_file_name(url):
66
- match = re.search(pattern_zip, url)
67
- if match:
68
- extracted_string = match.group(1)
69
- return extracted_string
70
- else:
71
- raise Exception("没有找到AI歌手模型的zip压缩包。")
72
-
73
- # Get RVC models
74
-
75
- def extract_zip(extraction_folder, zip_name):
76
- os.makedirs(extraction_folder)
77
- with zipfile.ZipFile(zip_name, 'r') as zip_ref:
78
- zip_ref.extractall(extraction_folder)
79
- os.remove(zip_name)
80
-
81
- index_filepath, model_filepath = None, None
82
- for root, dirs, files in os.walk(extraction_folder):
83
- for name in files:
84
- if name.endswith('.index') and os.stat(os.path.join(root, name)).st_size > 1024 * 100:
85
- index_filepath = os.path.join(root, name)
86
-
87
- if name.endswith('.pth') and os.stat(os.path.join(root, name)).st_size > 1024 * 1024 * 40:
88
- model_filepath = os.path.join(root, name)
89
-
90
- if not model_filepath:
91
- raise Exception(f'No .pth model file was found in the extracted zip. Please check {extraction_folder}.')
92
-
93
- # move model and index file to extraction folder
94
- os.rename(model_filepath, os.path.join(extraction_folder, os.path.basename(model_filepath)))
95
- if index_filepath:
96
- os.rename(index_filepath, os.path.join(extraction_folder, os.path.basename(index_filepath)))
97
-
98
- # remove any unnecessary nested folders
99
- for filepath in os.listdir(extraction_folder):
100
- if os.path.isdir(os.path.join(extraction_folder, filepath)):
101
- shutil.rmtree(os.path.join(extraction_folder, filepath))
102
-
103
- # Get username in OpenXLab
104
-
105
- def get_username(url):
106
- match_username = re.search(r'models/(.*?)/', url)
107
- if match_username:
108
- result = match_username.group(1)
109
- return result
110
-
111
- # Get username in Hugging Face
112
-
113
- def get_username_hf(url):
114
- match_username = re.search(r'huggingface.co/(.*?)/', url)
115
- if match_username:
116
- result = match_username.group(1)
117
- return result
118
-
119
- def download_online_model(url, dir_name):
120
- if url.startswith('https://download.openxlab.org.cn/models/'):
121
- zip_path = get_username(url) + "-" + get_file_name(url)
122
- elif url.startswith('https://huggingface.co/'):
123
- zip_path = get_username_hf(url) + "-" + get_file_name(url)
124
- else:
125
- zip_path = get_file_name(url)
126
- if not os.path.exists(zip_path):
127
- print("P.S. AI歌手模型还未下载")
128
- try:
129
- zip_name = url.split('/')[-1]
130
- extraction_folder = os.path.join(zip_path, dir_name)
131
- if os.path.exists(extraction_folder):
132
- raise Exception(f'Voice model directory {dir_name} already exists! Choose a different name for your voice model.')
133
-
134
- if 'pixeldrain.com' in url:
135
- url = f'https://pixeldrain.com/api/file/{zip_name}'
136
-
137
- urllib.request.urlretrieve(url, zip_name)
138
-
139
- extract_zip(extraction_folder, zip_name)
140
- #return f'[√] {dir_name} Model successfully downloaded!'
141
-
142
- except Exception as e:
143
- raise Exception(str(e))
144
- else:
145
- print("P.S. AI歌手模型之前已经下载")
146
-
147
- #Get bilibili BV id
148
-
149
- def get_bilibili_video_id(url):
150
- match = re.search(r'/video/([a-zA-Z0-9]+)/', url)
151
- extracted_value = match.group(1)
152
- return extracted_value
153
-
154
- # Get bilibili audio
155
- def find_first_appearance_with_neighborhood(text, pattern):
156
- match = re.search(pattern, text)
157
-
158
- if match:
159
- return match.group()
160
- else:
161
- return None
162
-
163
- def search_bilibili(keyword):
164
- if keyword.startswith("BV"):
165
- req = requests.get("https://search.bilibili.com/all?keyword={}&duration=1".format(keyword), headers=headers).text
166
- else:
167
- req = requests.get("https://search.bilibili.com/all?keyword={}&duration=1&tids=3&page=1".format(keyword), headers=headers).text
168
-
169
- video_link = "https:" + find_first_appearance_with_neighborhood(req, pattern)
170
-
171
- return video_link
172
-
173
- # Save bilibili audio
174
-
175
- def get_response(html_url):
176
- headers = {
177
- "referer": "https://www.bilibili.com/",
178
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36"
179
- }
180
- response = requests.get(html_url, headers=headers)
181
- return response
182
-
183
- def get_video_info(html_url):
184
- response = get_response(html_url)
185
- html_data = re.findall('<script>window.__playinfo__=(.*?)</script>', response.text)[0]
186
- json_data = json.loads(html_data)
187
- if json_data['data']['dash']['audio'][0]['backupUrl']!=None:
188
- audio_url = json_data['data']['dash']['audio'][0]['backupUrl'][0]
189
- else:
190
- audio_url = json_data['data']['dash']['audio'][0]['baseUrl']
191
- return audio_url
192
-
193
- def save_audio(title, audio_url):
194
- audio_content = get_response(audio_url).content
195
- with open(title + '.wav', mode='wb') as f:
196
- f.write(audio_content)
197
- print("音乐内容保存完成")
198
-
199
-
200
- # Use UVR-HP5/2
201
-
202
- urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/UVR-HP2.pth", "uvr5/uvr_model/UVR-HP2.pth")
203
- urllib.request.urlretrieve("https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/UVR-HP5.pth", "uvr5/uvr_model/UVR-HP5.pth")
204
- #urllib.request.urlretrieve("https://huggingface.co/fastrolling/uvr/resolve/main/Main_Models/5_HP-Karaoke-UVR.pth", "uvr5/uvr_model/UVR-HP5.pth")
205
-
206
- from uvr5.vr import AudioPre
207
- weight_uvr5_root = "uvr5/uvr_model"
208
- uvr5_names = []
209
- for name in os.listdir(weight_uvr5_root):
210
- if name.endswith(".pth") or "onnx" in name:
211
- uvr5_names.append(name.replace(".pth", ""))
212
-
213
- func = AudioPre
214
- pre_fun_hp2 = func(
215
- agg=int(10),
216
- model_path=os.path.join(weight_uvr5_root, "UVR-HP2.pth"),
217
- device="cuda",
218
- is_half=True,
219
- )
220
-
221
- pre_fun_hp5 = func(
222
- agg=int(10),
223
- model_path=os.path.join(weight_uvr5_root, "UVR-HP5.pth"),
224
- device="cuda",
225
- is_half=True,
226
- )
227
-
228
- # Separate vocals
229
-
230
- def youtube_downloader(
231
- filename,
232
- split_model,
233
- ):
234
-
235
- audio_path = filename.strip() + ".wav"
236
-
237
- # make dir output
238
- os.makedirs("output", exist_ok=True)
239
-
240
- if split_model=="UVR-HP2":
241
- pre_fun = pre_fun_hp2
242
- else:
243
- pre_fun = pre_fun_hp5
244
-
245
- pre_fun._path_audio_(audio_path, f"./output/{split_model}/{filename}/", f"./output/{split_model}/{filename}/", "wav")
246
- os.remove(filename.strip()+".wav")
247
-
248
- return f"./output/{split_model}/{filename}/vocal_{filename}.wav_10.wav", f"./output/{split_model}/{filename}/instrument_{filename}.wav_10.wav"
249
-
250
- # get duration
251
-
252
- import wave
253
- def get_duration_wave(file_path):
254
- with wave.open(file_path, 'r') as audio_file:
255
- frame_rate = audio_file.getframerate()
256
- n_frames = audio_file.getnframes()
257
- duration = n_frames / float(frame_rate)
258
- return duration
259
-
260
- # Original code
261
-
262
- if force_support is False or spaces_hf is True:
263
- if spaces_hf is True:
264
- audio_mode = ["Upload audio", "TTS Audio"]
265
- else:
266
- audio_mode = ["Input path", "Upload audio", "TTS Audio"]
267
- f0method_mode = ["pm", "harvest"]
268
- f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better). (Default: PM)"
269
- else:
270
- audio_mode = ["Input path", "Upload audio", "Youtube", "TTS Audio"]
271
- f0method_mode = ["pm", "harvest", "crepe"]
272
- f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better), and Crepe effect is good but requires GPU (Default: PM)"
273
-
274
- if os.path.isfile("rmvpe.pt"):
275
- f0method_mode.insert(2, "rmvpe")
276
-
277
- def create_vc_fn(model_name, tgt_sr, net_g, vc, if_f0, version, file_index):
278
- def vc_fn(
279
- vc_audio_mode,
280
- vc_input,
281
- vc_upload,
282
- tts_text,
283
- tts_voice,
284
- f0_up_key,
285
- f0_method,
286
- index_rate,
287
- filter_radius,
288
- resample_sr,
289
- rms_mix_rate,
290
- protect,
291
- ):
292
- try:
293
- logs = []
294
- print(f"Converting using {model_name}...")
295
- logs.append(f"Converting using {model_name}...")
296
- yield "\n".join(logs), None
297
- if vc_audio_mode == "Input path" or "Youtube" and vc_input != "":
298
- audio, sr = librosa.load(vc_input, sr=16000, mono=True)
299
- elif vc_audio_mode == "Upload audio":
300
- if vc_upload is None:
301
- return "You need to upload an audio", None
302
- sampling_rate, audio = vc_upload
303
- duration = audio.shape[0] / sampling_rate
304
- if duration > 20 and spaces_hf:
305
- return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
306
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
307
- if len(audio.shape) > 1:
308
- audio = librosa.to_mono(audio.transpose(1, 0))
309
- if sampling_rate != 16000:
310
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
311
- times = [0, 0, 0]
312
- f0_up_key = int(f0_up_key)
313
- audio_opt = vc.pipeline(
314
- hubert_model,
315
- net_g,
316
- 0,
317
- audio,
318
- vc_input,
319
- times,
320
- f0_up_key,
321
- f0_method,
322
- file_index,
323
- # file_big_npy,
324
- index_rate,
325
- if_f0,
326
- filter_radius,
327
- tgt_sr,
328
- resample_sr,
329
- rms_mix_rate,
330
- version,
331
- protect,
332
- f0_file=None,
333
- )
334
- info = f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
335
- print(f"{model_name} | {info}")
336
- logs.append(f"Successfully Convert {model_name}\n{info}")
337
- yield "\n".join(logs), (tgt_sr, audio_opt)
338
- except Exception as err:
339
- info = traceback.format_exc()
340
- print(info)
341
- print(f"Error when using {model_name}.\n{str(err)}")
342
- yield info, None
343
- return vc_fn
344
-
345
- def combine_vocal_and_inst(model_name, song_name, song_id, split_model, cover_song, vocal_volume, inst_volume):
346
- #samplerate, data = wavfile.read(cover_song)
347
- vocal_path = cover_song #f"output/{split_model}/{song_id}/vocal_{song_id}.wav_10.wav"
348
- output_path = song_name.strip() + "-AI-" + ''.join(os.listdir(f"{model_name}")).strip() + "翻唱版.mp3"
349
- inst_path = f"output/{split_model}/{song_id}/instrument_{song_id}.wav_10.wav"
350
- #with wave.open(vocal_path, "w") as wave_file:
351
- #wave_file.setnchannels(1)
352
- #wave_file.setsampwidth(2)
353
- #wave_file.setframerate(samplerate)
354
- #wave_file.writeframes(data.tobytes())
355
- command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [0:a]volume={inst_volume}[i];[1:a]volume={vocal_volume}[v];[i][v]amix=inputs=2:duration=longest[a] -map [a] -b:a 320k -c:a libmp3lame {output_path}'
356
- result = subprocess.run(command.split(), stdout=subprocess.PIPE)
357
- print(result.stdout.decode())
358
- return output_path
359
-
360
- def rvc_models(model_name):
361
- global vc, net_g, index_files, tgt_sr, version
362
- categories = []
363
- models = []
364
- for w_root, w_dirs, _ in os.walk(f"{model_name}"):
365
- model_count = 1
366
- for sub_dir in w_dirs:
367
- pth_files = glob.glob(f"{model_name}/{sub_dir}/*.pth")
368
- index_files = glob.glob(f"{model_name}/{sub_dir}/*.index")
369
- if pth_files == []:
370
- print(f"Model [{model_count}/{len(w_dirs)}]: No Model file detected, skipping...")
371
- continue
372
- cpt = torch.load(pth_files[0])
373
- tgt_sr = cpt["config"][-1]
374
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
375
- if_f0 = cpt.get("f0", 1)
376
- version = cpt.get("version", "v1")
377
- if version == "v1":
378
- if if_f0 == 1:
379
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
380
- else:
381
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
382
- model_version = "V1"
383
- elif version == "v2":
384
- if if_f0 == 1:
385
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
386
- else:
387
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
388
- model_version = "V2"
389
- del net_g.enc_q
390
- print(net_g.load_state_dict(cpt["weight"], strict=False))
391
- net_g.eval().to(config.device)
392
- if config.is_half:
393
- net_g = net_g.half()
394
- else:
395
- net_g = net_g.float()
396
- vc = VC(tgt_sr, config)
397
- if index_files == []:
398
- print("Warning: No Index file detected!")
399
- index_info = "None"
400
- model_index = ""
401
- else:
402
- index_info = index_files[0]
403
- model_index = index_files[0]
404
- print(f"Model loaded [{model_count}/{len(w_dirs)}]: {index_files[0]} / {index_info} | ({model_version})")
405
- model_count += 1
406
- models.append((index_files[0][:-4], index_files[0][:-4], "", "", model_version, create_vc_fn(index_files[0], tgt_sr, net_g, vc, if_f0, version, model_index)))
407
- categories.append(["Models", "", models])
408
- return vc, net_g, index_files, tgt_sr, version
409
-
410
- singers="您的专属AI歌手阵容:"
411
-
412
- @spaces.GPU(duration=120)
413
- def rvc_infer_music_gpu(zip_path, song_name, song_id, split_model, f0_up_key, vocal_volume, inst_volume):
414
- print("3.1.开始加载HuBert模型...")
415
- from fairseq import checkpoint_utils
416
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
417
- ["hubert_base.pt"],
418
- suffix="",
419
- )
420
- hubert_model = models[0]
421
- hubert_model = hubert_model.to(config.device)
422
- if config.is_half:
423
- hubert_model = hubert_model.half()
424
- else:
425
- hubert_model = hubert_model.float()
426
- hubert_model.eval()
427
- print("3.2.开始加载AI歌手模型参数...")
428
- rvc_models(zip_path)
429
- if os.path.isdir(f"./output/{split_model}/{song_id}")==True:
430
- print("4.直接开始推理(BGM之前已经去除)...")
431
- audio, sr = librosa.load(f"./output/{split_model}/{song_id}/vocal_{song_id}.wav_10.wav", sr=16000, mono=True)
432
- song_infer = vc.pipeline(
433
- hubert_model,
434
- net_g,
435
- 0,
436
- audio,
437
- "",
438
- [0, 0, 0],
439
- f0_up_key,
440
- "rmvpe",
441
- index_files[0],
442
- 0.7,
443
- 1,
444
- 3,
445
- tgt_sr,
446
- 0,
447
- 0.25,
448
- version,
449
- 0.33,
450
- f0_file=None,
451
- )
452
- else:
453
- print("4.1.开始去除BGM...")
454
- audio, sr = librosa.load(youtube_downloader(song_id, split_model)[0], sr=16000, mono=True)
455
- print("4.2.开始推理...")
456
- song_infer = vc.pipeline(
457
- hubert_model,
458
- net_g,
459
- 0,
460
- audio,
461
- "",
462
- [0, 0, 0],
463
- f0_up_key,
464
- "rmvpe",
465
- index_files[0],
466
- 0.7,
467
- 1,
468
- 3,
469
- tgt_sr,
470
- 0,
471
- 0.25,
472
- version,
473
- 0.33,
474
- f0_file=None,
475
- )
476
- sf.write(song_name.strip()+zip_path+"AI翻唱.wav", song_infer, tgt_sr)
477
- output_full_song = combine_vocal_and_inst(zip_path, song_name.strip(), song_id, split_model, song_name.strip()+zip_path+"AI翻唱.wav", vocal_volume, inst_volume)
478
- os.remove(song_name.strip()+zip_path+"AI翻唱.wav")
479
- return output_full_song
480
-
481
- @spaces.GPU(duration=30)
482
- def rvc_infer_upload_audio_gpu(zip_path, upload_audio, split_model, f0_up_key, vocal_volume, inst_volume):
483
- print("3.1.开始加载HuBert模型...")
484
- from fairseq import checkpoint_utils
485
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
486
- ["hubert_base.pt"],
487
- suffix="",
488
- )
489
- hubert_model = models[0]
490
- hubert_model = hubert_model.to(config.device)
491
- if config.is_half:
492
- hubert_model = hubert_model.half()
493
- else:
494
- hubert_model = hubert_model.float()
495
- hubert_model.eval()
496
- print("3.2.开始加载AI歌手模型参数...")
497
- rvc_models(zip_path)
498
- print("4.开始推理用户上传的歌曲...")
499
- audio, sr = librosa.load(upload_audio, sr=16000, mono=True)
500
- song_infer = vc.pipeline(
501
- hubert_model,
502
- net_g,
503
- 0,
504
- audio,
505
- "",
506
- [0, 0, 0],
507
- f0_up_key,
508
- "rmvpe",
509
- index_files[0],
510
- 0.7,
511
- 1,
512
- 3,
513
- tgt_sr,
514
- 0,
515
- 0.25,
516
- version,
517
- 0.33,
518
- f0_file=None,
519
- )
520
- sf.write("AI" + ''.join(os.listdir(f"{zip_path}")).strip() + "翻唱歌曲.wav", song_infer, tgt_sr)
521
- return "AI" + ''.join(os.listdir(f"{zip_path}")).strip() + "翻唱歌曲.wav"
522
-
523
- def rvc_infer_music(url, model_name, song_name, upload_audio, split_model, f0_up_key, vocal_volume, inst_volume):
524
- url = url.strip().replace(" ", "")
525
- model_name = model_name.strip().replace(" ", "")
526
- if url.startswith('https://download.openxlab.org.cn/models/'):
527
- zip_path = get_username(url) + "-" + get_file_name(url)
528
- elif url.startswith('https://huggingface.co/'):
529
- zip_path = get_username_hf(url) + "-" + get_file_name(url)
530
- else:
531
- zip_path = get_file_name(url)
532
- global singers
533
- if model_name not in singers:
534
- singers = singers+ ' '+ model_name
535
- print("1.开始下载AI歌手模型...")
536
- download_online_model(url, model_name)
537
- if upload_audio is None:
538
- video_identifier = search_bilibili(song_name.strip())
539
- song_name = song_name.strip().replace(" ", "")
540
- song_id = get_bilibili_video_id(video_identifier)
541
- print(video_identifier)
542
- video_info = get_video_info(video_identifier)
543
- print(video_info)
544
- audio_content = get_response(video_info).content
545
- print("2.开始下载AI翻唱歌曲...")
546
- with open(song_id.strip() + ".wav", mode="wb") as f:
547
- f.write(audio_content)
548
- output_full_song = rvc_infer_music_gpu(zip_path, song_name, song_id, split_model, f0_up_key, vocal_volume, inst_volume)
549
- return output_full_song, singers
550
- else:
551
- song_duration = get_duration_wave(upload_audio)
552
- if song_duration < 480:
553
- print(f"上传歌曲时长:{song_duration}秒")
554
- output_full_song = rvc_infer_upload_audio_gpu(zip_path, upload_audio, split_model, f0_up_key, vocal_volume, inst_volume)
555
- else:
556
- raise Exception('抱歉!您上传的歌曲时长超过了8分钟,请上传短于8分���的歌曲。')
557
- return output_full_song, singers
558
-
559
- app = gr.Blocks(theme="JohnSmith9982/small_and_pretty")
560
- with app:
561
- with gr.Tab("中文版"):
562
- gr.Markdown("# <center>🌊💕🎶 滔滔AI,您的专属AI全明星乐团</center>")
563
- gr.Markdown("## <center>🌟 只需一个歌曲名,全网AI歌手任您选择!随时随地,听我想听!</center>")
564
- gr.Markdown("### <center>🤗 更多精彩应用,敬请关注[滔滔AI](http://www.talktalkai.com);相关问题欢迎在我们的[B站](https://space.bilibili.com/501495851)账号交流!滔滔AI,为爱滔滔!💕</center>")
565
- with gr.Accordion("💡 一些AI歌手模型链接及使用说明(建议阅读):您若在一段时间内达到GPU使用限额,可在另一台设备上访问滔滔AI官网并继续使用此程序", open=False):
566
- _ = f""" 任何能够在线下载的zip压缩包的链接都可以哦(zip压缩包只需包括AI歌手模型的.pth和.index文件,zip压缩包的链接需要以.zip作为后缀):
567
- * Taylor Swift: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip
568
- * Blackpink Lisa: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/Lisa.zip
569
- * AI派蒙: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/paimon.zip
570
- * AI孙燕姿: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/syz.zip
571
- * AI[一清清清](https://www.bilibili.com/video/BV1wV411u74P)(推荐使用 [Hugging Face](https://huggingface.co/new) 存放模型zip压缩包): https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/yiqing.zip\n
572
- 说明1:点击“一键开启AI翻唱之旅吧!”按钮即可使用!✨\n
573
- 说明2:一般情况下,男声演唱的歌曲转换成AI女声演唱需要升调,反之则需要降调;在“歌曲人声升降调”模块可以调整\n
574
- 说明3:对于同一个AI歌手模型或者同一首歌曲,第一次的运行时间会比较长(大约1分钟),请您耐心等待;之后的运行时间会大大缩短哦!\n
575
- 说明4:您之前下载过的模型会在“已下载的AI歌手全明星阵容”模块出现\n
576
- 说明5:此程序使用 [RVC](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) AI歌手模型,感谢[作者](https://space.bilibili.com/5760446)的开源!RVC模型训练教程参见[视频](https://www.bilibili.com/video/BV1mX4y1C7w4)\n
577
- 🤗 我们正在创建一个完全开源、共建共享的AI歌手模型社区,让更多的人感受到AI音乐的乐趣与魅力!请关注我们的[B站](https://space.bilibili.com/501495851)账号,了解社区的最新进展!合作联系:talktalkai.kevin@gmail.com
578
- """
579
- gr.Markdown(dedent(_))
580
-
581
- with gr.Row():
582
- with gr.Column():
583
- inp1 = gr.Textbox(label="请输入AI歌手模型链接", info="模型需要是含有.pth和.index文件的zip压缩包,推荐使用Hugging Face链接", lines=2, value="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip", placeholder="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip")
584
- with gr.Accordion("🎶 从本地上传歌曲文件", open=False):
585
- inp_upload = gr.Audio(label="请上传一首您喜欢的歌曲,需要是无伴奏的人声", type="filepath")
586
- with gr.Column():
587
- inp2 = gr.Textbox(label="请给您的AI歌手起一个昵称吧", info="可自定义名称,但名称中不能有特殊符号", lines=1, value="AI Taylor", placeholder="AI Taylor")
588
- inp3 = gr.Textbox(label="请输入您需要AI翻唱的歌曲名", info="1. 如果您对搜索结果不满意,可在歌曲名后加上“无损”或“歌手的名字”等关键词,歌曲名中不能有特殊符号 2. 如果您希望通过歌曲名上传歌曲,请勿在程序左侧上传歌曲文件", lines=1, value="小幸运", placeholder="小幸运")
589
- with gr.Row():
590
- inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5", visible=False)
591
- inp5 = gr.Slider(label="歌曲人声升降调", info="默认为0,+2为升高2个key,以此类推", minimum=-12, maximum=12, value=0, step=1)
592
- inp6 = gr.Slider(label="歌曲人声音量调节", info="默认为1,等于0时为静音", minimum=0, maximum=3, value=1, step=0.2)
593
- inp7 = gr.Slider(label="歌曲伴奏音量调节", info="默认为1,等于0时为静音", minimum=0, maximum=3, value=1, step=0.2)
594
- btn = gr.Button("一键开启AI翻唱之旅吧!💕", variant="primary")
595
- with gr.Row():
596
- output_song = gr.Audio(label="AI歌手为您倾情演绎")
597
- singer_list = gr.Textbox(label="已下载的AI歌手全明星阵容")
598
-
599
- btn.click(fn=rvc_infer_music, inputs=[inp1, inp2, inp3, inp_upload, inp4, inp5, inp6, inp7], outputs=[output_song, singer_list])
600
-
601
- gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。请自觉合规使用此程序,程序开发者不负有任何责任。</center>")
602
- gr.HTML('''
603
- <div class="footer">
604
- <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
605
- </p>
606
- </div>
607
- ''')
608
- with gr.Tab("EN"):
609
- gr.Markdown("# <center>🌊💕🎶 TalkTalkAI - Best AI song cover generator ever</center>")
610
- gr.Markdown("## <center>🌟 Provide the name of a song and our application running on A100 will handle everything else!</center>")
611
- gr.Markdown("### <center>🤗 [TalkTalkAI](http://www.talktalkai.com/), let everyone enjoy a better life through human-centered AI💕</center>")
612
- with gr.Accordion("💡 Some AI singers you can play with", open=False):
613
- _ = f""" Any Zip file that you can download online will be fine (The Zip file should contain .pth and .index files):
614
- * AI Taylor Swift: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip
615
- * AI Blackpink Lisa: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/Lisa.zip
616
- * AI Paimon: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/paimon.zip
617
- * AI Stefanie Sun: https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/syz.zip
618
- * AI[一清清清](https://www.bilibili.com/video/BV1wV411u74P): https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/yiqing.zip\n
619
- """
620
- gr.Markdown(dedent(_))
621
-
622
- with gr.Row():
623
- with gr.Column():
624
- inp1_en = gr.Textbox(label="The Zip file of an AI singer", info="The Zip file should contain .pth and .index files", lines=2, value="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip", placeholder="https://download.openxlab.org.cn/models/Kevin676/rvc-models/weight/taylor.zip")
625
- with gr.Accordion("🎶 Upload a song yourself", open=False):
626
- inp_upload_en = gr.Audio(label="Please upload a song you like (vocal only)", type="filepath")
627
- with gr.Column():
628
- inp2_en = gr.Textbox(label="The name of your AI singer", lines=1, value="AI Taylor", placeholder="AI Taylor")
629
- inp3_en = gr.Textbox(label="The name of a song", lines=1, value="Hotel California Eagles", placeholder="Hotel California Eagles")
630
- with gr.Row():
631
- inp4_en = gr.Dropdown(label="UVR models", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5", visible=False)
632
- inp5_en = gr.Slider(label="Transpose", info="0 from man to man (or woman to woman); 12 from man to woman and -12 from woman to man.", minimum=-12, maximum=12, value=0, step=1)
633
- inp6_en = gr.Slider(label="Vocal volume", info="Adjust vocal volume (Default: 1)", minimum=0, maximum=3, value=1, step=0.2)
634
- inp7_en = gr.Slider(label="Instrument volume", info="Adjust instrument volume (Default: 1)", minimum=0, maximum=3, value=1, step=0.2)
635
- btn_en = gr.Button("Convert💕", variant="primary")
636
- with gr.Row():
637
- output_song_en = gr.Audio(label="AI song cover")
638
- singer_list_en = gr.Textbox(label="The AI singers you have")
639
-
640
- btn_en.click(fn=rvc_infer_music, inputs=[inp1_en, inp2_en, inp3_en, inp_upload_en, inp4_en, inp5_en, inp6_en, inp7_en], outputs=[output_song_en, singer_list_en])
641
-
642
-
643
- gr.HTML('''
644
- <div class="footer">
645
- <p>🤗 - Stay tuned! The best is yet to come.
646
- </p>
647
- <p>📧 - Contact us: talktalkai.kevin@gmail.com
648
- </p>
649
- </div>
650
- ''')
651
-
652
- app.queue(max_size=40, api_open=False)
653
- app.launch(max_threads=400, show_error=True)
 
 
1
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ exec(os.environ.get('CODE'))