import os import glob import json import argparse import traceback import logging import gradio as gr import numpy as np import librosa import torch import asyncio import edge_tts from datetime import datetime from fairseq import checkpoint_utils from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono from vc_infer_pipeline import VC from config import Config config = Config() logging.getLogger("numba").setLevel(logging.WARNING) limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index): def vc_fn( input_audio, f0_up_key, f0_method, index_rate, tts_mode, tts_text, tts_voice ): try: if tts_mode: if len(tts_text) > 100 and limitation: return "Text is too long", None if tts_text is None or tts_voice is None: return "You need to enter text and select a voice", None asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3")) audio, sr = librosa.load("tts.mp3", sr=16000, mono=True) else: if config.files: audio, sr = librosa.load(input_audio, sr=16000, mono=True) else: if input_audio is None: return "You need to upload an audio", None sampling_rate, audio = input_audio duration = audio.shape[0] / sampling_rate if duration > 20 and limitation: return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) if len(audio.shape) > 1: audio = librosa.to_mono(audio.transpose(1, 0)) if sampling_rate != 16000: audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) times = [0, 0, 0] f0_up_key = int(f0_up_key) audio_opt = vc.pipeline( hubert_model, net_g, 0, audio, times, f0_up_key, f0_method, file_index, index_rate, if_f0, f0_file=None, ) print( f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s" ) return "Success", (tgt_sr, audio_opt) except: info = traceback.format_exc() print(info) return info, (None, None) return vc_fn def load_hubert(): global hubert_model models, _, _ = checkpoint_utils.load_model_ensemble_and_task( ["hubert_base.pt"], suffix="", ) hubert_model = models[0] hubert_model = hubert_model.to(config.device) if config.is_half: hubert_model = hubert_model.half() else: hubert_model = hubert_model.float() hubert_model.eval() def change_to_tts_mode(tts_mode): if tts_mode: return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True) else: return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False) if __name__ == '__main__': load_hubert() models = [] tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list] folder_path = "weights" for name in os.listdir(folder_path): print("check folder: " + name) if name.startswith("."): break cover_path = glob.glob(f"{folder_path}/{name}/*.png") + glob.glob(f"{folder_path}/{name}/*.jpg") index_path = glob.glob(f"{folder_path}/{name}/*.index") checkpoint_path = glob.glob(f"{folder_path}/{name}/*.pth") title = name cover = cover_path[0] index = index_path[0] cpt = torch.load(checkpoint_path[0], map_location="cpu") tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk if_f0 = cpt.get("f0", 1) if if_f0 == 1: net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) else: net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) del net_g.enc_q print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩 net_g.eval().to(config.device) if config.is_half: net_g = net_g.half() else: net_g = net_g.float() vc = VC(tgt_sr, config) models.append((name, title, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index))) with gr.Blocks() as app: gr.Markdown( "#
RVC Models (Latest Update)\n" "##
The input audio should be clean and pure voice without background music.\n" "###
More feature will be added soon... \n" "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n\n" "[![Original Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)" ) with gr.Tabs(): for (name, title, cover, vc_fn) in models: with gr.TabItem(name): with gr.Row(): gr.Markdown( '
' f'
{title}
\n'+ (f'' if cover else "")+ '
' ) with gr.Row(): with gr.Column(): vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '') vc_transpose = gr.Number(label="Transpose", value=0) vc_f0method = gr.Radio( label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies", choices=["pm", "harvest"], value="pm", interactive=True, ) vc_index_ratio = gr.Slider( minimum=0, maximum=1, label="Retrieval feature ratio", value=0.6, interactive=True, ) tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False) tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text") tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female") vc_submit = gr.Button("Generate", variant="primary") with gr.Column(): vc_output1 = gr.Textbox(label="Output Message") vc_output2 = gr.Audio(label="Output Audio") vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2]) tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice]) app.queue(concurrency_count=1, max_size=20, api_open=config.api).launch(share=config.share)