import gradio as gr import numpy as np import torch import os import re_matching from tools.sentence import split_by_language, sentence_split import utils from infer import infer, latest_version, get_net_g import gradio as gr import webbrowser from config import config from tools.translate import translate from webui import reload_javascript device = config.webui_config.device if device == "mps": os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" def generate_audio( slices, sdp_ratio, noise_scale, noise_scale_w, length_scale, speaker, language, ): audio_list = [] silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16) with torch.no_grad(): for piece in slices: audio = infer( piece, sdp_ratio=sdp_ratio, noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale, sid=speaker, language=language, hps=hps, net_g=net_g, device=device, ) audio16bit = gr.processing_utils.convert_to_16_bit_wav(audio) audio_list.append(audio16bit) audio_list.append(silence) # 将静音添加到列表中 return audio_list def speak( text: str, speaker="TalkFlower_CNzh", sdp_ratio=0.2, # SDP/DP混合比 noise_scale=0.6, # 感情 noise_scale_w=0.6, # 音素长度 length_scale=0.9, # 语速 language="ZH" ): audio_list = [] audio_list.extend( generate_audio( text.split("|"), sdp_ratio, noise_scale, noise_scale_w, length_scale, speaker, language, ) ) audio_concat = np.concatenate(audio_list) return (hps.data.sampling_rate, audio_concat) with open("./css/style.css", "r", encoding="utf-8") as f: customCSS = f.read() with gr.Blocks(css=customCSS) as demo: # talkingFlowerModel = gr.HTML("""