GPT-SoVITS / app.py
None1145's picture
Update app.py
08ba840 verified
raw
history blame
15.2 kB
import os
def list_files_tree(directory, indent=""):
# 获取当前目录下的所有文件和文件夹
items = os.listdir(directory)
for i, item in enumerate(items):
# 定义前缀,最后一个文件或文件夹使用不同的前缀
prefix = "└── " if i == len(items) - 1 else "├── "
# 打印文件或文件夹
print(indent + prefix + item)
# 如果是文件夹,则递归调用
item_path = os.path.join(directory, item)
if os.path.isdir(item_path):
# 如果是最后一个子文件夹,使用缩进
next_indent = indent + (" " if i == len(items) - 1 else "│ ")
list_files_tree(item_path, next_indent)
from huggingface_hub import snapshot_download
print("Models...")
models_id = """None1145/GPT-SoVITS-Lappland-the-Decadenza
None1145/GPT-SoVITS-Theresa
None1145/GPT-SoVITS-Vulpisfoglia"""
for model_id in models_id.split("\n"):
if model_id in ["", " "]:
break
snapshot_download(repo_id=model_id, local_dir=f"./Models/{model_id}")
print("Models!!!")
print("PretrainedModels...")
model_id = "None1145/GPT-SoVITS-Base"
snapshot_download(repo_id=model_id, local_dir=f"./PretrainedModels/{model_id}")
print("PretrainedModels!!!")
list_files_tree("./")
cnhubert_base_path = f"./PretrainedModels/{model_id}/chinese-hubert-base"
bert_path = f"./PretrainedModels/{model_id}/chinese-roberta-wwm-ext-large"
import gradio as gr
from transformers import AutoModelForMaskedLM, AutoTokenizer
import sys, torch, numpy as np
from pathlib import Path
from pydub import AudioSegment
import librosa, math, traceback, requests, argparse, torch, multiprocessing, pandas as pd, torch.multiprocessing as mp, soundfile
from random import shuffle
from AR.utils import get_newest_ckpt
from glob import glob
from tqdm import tqdm
from feature_extractor import cnhubert
cnhubert.cnhubert_base_path=cnhubert_base_path
from io import BytesIO
from module.models import SynthesizerTrn
from AR.models.t2s_lightning_module import Text2SemanticLightningModule
from AR.utils.io import load_yaml_config
from text import cleaned_text_to_sequence
from text.cleaner import text_to_sequence, clean_text
from time import time as ttime
from module.mel_processing import spectrogram_torch
from my_utils import load_audio
import re
import logging
logging.getLogger('httpx').setLevel(logging.WARNING)
logging.getLogger('httpcore').setLevel(logging.WARNING)
logging.getLogger('multipart').setLevel(logging.WARNING)
device = "cpu"
is_half = False
tokenizer = AutoTokenizer.from_pretrained(bert_path)
bert_model=AutoModelForMaskedLM.from_pretrained(bert_path)
if(is_half==True):bert_model=bert_model.half().to(device)
else:bert_model=bert_model.to(device)
def get_bert_feature(text, word2ph):
with torch.no_grad():
inputs = tokenizer(text, return_tensors="pt")
for i in inputs:
inputs[i] = inputs[i].to(device)
res = bert_model(**inputs, output_hidden_states=True)
res = torch.cat(res["hidden_states"][-3:-2], -1)[0].cpu()[1:-1]
assert len(word2ph) == len(text)
phone_level_feature = []
for i in range(len(word2ph)):
repeat_feature = res[i].repeat(word2ph[i], 1)
phone_level_feature.append(repeat_feature)
phone_level_feature = torch.cat(phone_level_feature, dim=0)
return phone_level_feature.T
def load_model(sovits_path, gpt_path):
n_semantic = 1024
dict_s2 = torch.load(sovits_path, map_location="cpu")
hps = dict_s2["config"]
class DictToAttrRecursive:
def __init__(self, input_dict):
for key, value in input_dict.items():
if isinstance(value, dict):
# 如果值是字典,递归调用构造函数
setattr(self, key, DictToAttrRecursive(value))
else:
setattr(self, key, value)
hps = DictToAttrRecursive(hps)
hps.model.semantic_frame_rate = "25hz"
dict_s1 = torch.load(gpt_path, map_location="cpu")
config = dict_s1["config"]
ssl_model = cnhubert.get_model()
if (is_half == True):
ssl_model = ssl_model.half().to(device)
else:
ssl_model = ssl_model.to(device)
vq_model = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model)
if (is_half == True):
vq_model = vq_model.half().to(device)
else:
vq_model = vq_model.to(device)
vq_model.eval()
vq_model.load_state_dict(dict_s2["weight"], strict=False)
hz = 50
max_sec = config['data']['max_sec']
# t2s_model = Text2SemanticLightningModule.load_from_checkpoint(checkpoint_path=gpt_path, config=config, map_location="cpu")#########todo
t2s_model = Text2SemanticLightningModule(config, "ojbk", is_train=False)
t2s_model.load_state_dict(dict_s1["weight"])
if (is_half == True): t2s_model = t2s_model.half()
t2s_model = t2s_model.to(device)
t2s_model.eval()
total = sum([param.nelement() for param in t2s_model.parameters()])
print("Number of parameter: %.2fM" % (total / 1e6))
return vq_model, ssl_model, t2s_model, hps, config, hz, max_sec
def get_spepc(hps, filename):
audio=load_audio(filename,int(hps.data.sampling_rate))
audio=torch.FloatTensor(audio)
audio_norm = audio
audio_norm = audio_norm.unsqueeze(0)
spec = spectrogram_torch(audio_norm, hps.data.filter_length,hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length,center=False)
return spec
def create_tts_fn(vq_model, ssl_model, t2s_model, hps, config, hz, max_sec):
def tts_fn(ref_wav_path, prompt_text, prompt_language, text, text_language):
t0 = ttime()
prompt_text=prompt_text.strip("\n")
prompt_language,text=prompt_language,text.strip("\n")
print(text)
if len(text) > 50:
return f"Error: Text is too long, ({len(text)}>50)", None
with torch.no_grad():
wav16k, sr = librosa.load(ref_wav_path, sr=16000) # 派蒙
wav16k = torch.from_numpy(wav16k)
if(is_half==True):wav16k=wav16k.half().to(device)
else:wav16k=wav16k.to(device)
ssl_content = ssl_model.model(wav16k.unsqueeze(0))["last_hidden_state"].transpose(1, 2)#.float()
codes = vq_model.extract_latent(ssl_content)
prompt_semantic = codes[0, 0]
t1 = ttime()
phones1, word2ph1, norm_text1 = clean_text(prompt_text, prompt_language)
phones1=cleaned_text_to_sequence(phones1)
texts=text.split("\n")
audio_opt = []
zero_wav=np.zeros(int(hps.data.sampling_rate*0.3),dtype=np.float16 if is_half==True else np.float32)
for text in texts:
phones2, word2ph2, norm_text2 = clean_text(text, text_language)
phones2 = cleaned_text_to_sequence(phones2)
if(prompt_language=="zh"):bert1 = get_bert_feature(norm_text1, word2ph1).to(device)
else:bert1 = torch.zeros((1024, len(phones1)),dtype=torch.float16 if is_half==True else torch.float32).to(device)
if(text_language=="zh"):bert2 = get_bert_feature(norm_text2, word2ph2).to(device)
else:bert2 = torch.zeros((1024, len(phones2))).to(bert1)
bert = torch.cat([bert1, bert2], 1)
all_phoneme_ids = torch.LongTensor(phones1+phones2).to(device).unsqueeze(0)
bert = bert.to(device).unsqueeze(0)
all_phoneme_len = torch.tensor([all_phoneme_ids.shape[-1]]).to(device)
prompt = prompt_semantic.unsqueeze(0).to(device)
t2 = ttime()
with torch.no_grad():
# pred_semantic = t2s_model.model.infer(
pred_semantic,idx = t2s_model.model.infer_panel(
all_phoneme_ids,
all_phoneme_len,
prompt,
bert,
# prompt_phone_len=ph_offset,
top_k=config['inference']['top_k'],
early_stop_num=hz * max_sec)
t3 = ttime()
# print(pred_semantic.shape,idx)
pred_semantic = pred_semantic[:,-idx:].unsqueeze(0) # .unsqueeze(0)#mq要多unsqueeze一次
refer = get_spepc(hps, ref_wav_path)#.to(device)
if(is_half==True):refer=refer.half().to(device)
else:refer=refer.to(device)
# audio = vq_model.decode(pred_semantic, all_phoneme_ids, refer).detach().cpu().numpy()[0, 0]
audio = vq_model.decode(pred_semantic, torch.LongTensor(phones2).to(device).unsqueeze(0), refer).detach().cpu().numpy()[0, 0]###试试重建不带上prompt部分
audio_opt.append(audio)
audio_opt.append(zero_wav)
t4 = ttime()
print("%.3f\t%.3f\t%.3f\t%.3f" % (t1 - t0, t2 - t1, t3 - t2, t4 - t3))
return "Success", (hps.data.sampling_rate,(np.concatenate(audio_opt,0)*32768).astype(np.int16))
return tts_fn
splits={",","。","?","!",",",".","?","!","~",":",":","—","…",}#不考虑省略号
def split(todo_text):
todo_text = todo_text.replace("……", "。").replace("——", ",")
if (todo_text[-1] not in splits): todo_text += "。"
i_split_head = i_split_tail = 0
len_text = len(todo_text)
todo_texts = []
while (1):
if (i_split_head >= len_text): break # 结尾一定有标点,所以直接跳出即可,最后一段在上次已加入
if (todo_text[i_split_head] in splits):
i_split_head += 1
todo_texts.append(todo_text[i_split_tail:i_split_head])
i_split_tail = i_split_head
else:
i_split_head += 1
return todo_texts
def change_reference_audio(prompt_text, transcripts):
return transcripts[prompt_text]
def get_audio_duration(path):
audio = AudioSegment.from_wav(path)
return len(audio) / 1000
def select_audio_file(wav_paths):
import random
eligible_files = [path for path in wav_paths if 2 <= get_audio_duration(path) <= 5]
if eligible_files:
selected_file = random.choice(eligible_files)
else:
selected_file = random.choice(wav_paths)
return selected_file
models = []
models_info = {}
models_folder_path = "./Models/None1145"
folder_names = [name for name in os.listdir(models_folder_path) if os.path.isdir(os.path.join(models_folder_path, name))]
for folder_name in folder_names:
speaker = folder_name[11:]
models_info[speaker] = {}
models_info[speaker]["title"] = speaker
pattern = re.compile(r"s(\d+)\.pth$")
max_value = -1
max_file = None
sovits_path = f"{models_folder_path}/{folder_name}/SoVITS_weights"
for filename in os.listdir(sovits_path):
match = pattern.search(filename)
if match:
value = int(match.group(1))
if value > max_value:
max_value = value
max_file = filename
models_info[speaker]["sovits_weight"] = f"{sovits_path}/{max_file}"
pattern = re.compile(r"e(\d+)\.ckpt$")
max_value = -1
max_file = None
gpt_path = f"{models_folder_path}/{folder_name}/GPT_weights"
for filename in os.listdir(gpt_path):
match = pattern.search(filename)
if match:
value = int(match.group(1))
if value > max_value:
max_value = value
max_file = filename
models_info[speaker]["gpt_weight"] = f"{gpt_path}/{max_file}"
data_path = f"{models_folder_path}/{folder_name}/Data"
models_info[speaker]["transcript"] = {}
wavs = []
tmp = {}
with open(f"{data_path}/{speaker}.list", "r", encoding="utf-8") as f:
for line in f.read().split("\n"):
try:
wav = f"{models_folder_path}/{folder_name}/Data/{line.split('|')[0].split('/')[1]}"
except:
break
text = line.split("|")[3]
print(wav, text)
wavs.append(wav)
tmp[wav] = text
models_info[speaker]["transcript"][text] = wav
models_info[speaker]["example_reference"] = tmp[select_audio_file(wavs)]
print(models_info)
for speaker in models_info:
speaker_info = models_info[speaker]
title = speaker_info["title"]
sovits_weight = speaker_info["sovits_weight"]
gpt_weight = speaker_info["gpt_weight"]
model_id = "None1145/GPT-SoVITS-Base"
# vq_model, ssl_model, t2s_model, hps, config, hz, max_sec = load_model(sovits_weight, gpt_weight)
vq_model, ssl_model, t2s_model, hps, config, hz, max_sec = load_model(sovits_weight, f"./PretrainedModels/{model_id}/GPT.ckpt")
models.append(
(
speaker,
title,
speaker_info["transcript"],
speaker_info["example_reference"],
create_tts_fn(
vq_model, ssl_model, t2s_model, hps, config, hz, max_sec
)
)
)
print(models)
with gr.Blocks() as app:
with gr.Tabs():
for (name, title, transcript, example_reference, tts_fn) in models:
with gr.TabItem(name):
with gr.Row():
gr.Markdown(
'<div align="center">'
f'<a><strong>{title}</strong></a>'
'</div>')
with gr.Row():
with gr.Column():
prompt_text = gr.Dropdown(
label="Transcript of the Reference Audio",
value=example_reference,
choices=list(transcript.keys())
)
inp_ref_audio = gr.Audio(
label="Reference Audio",
type="filepath",
interactive=False,
value=transcript[example_reference]
)
transcripts_state = gr.State(value=transcript)
prompt_text.change(
fn=change_reference_audio,
inputs=[prompt_text, transcripts_state],
outputs=[inp_ref_audio]
)
prompt_language = gr.State(value="zh")
with gr.Column():
text = gr.Textbox(label="Input Text", value="你好")
text_language = gr.Dropdown(
label="Language",
choices=["zh", "en", "ja"],
value="zh"
)
inference_button = gr.Button("Generate", variant="primary")
om = gr.Textbox(label="Output Message")
output = gr.Audio(label="Output Audio")
inference_button.click(
fn=tts_fn,
inputs=[inp_ref_audio, prompt_text, prompt_language, text, text_language],
outputs=[om, output]
)
app.queue().launch()