11Labs-TTS-Free-VC-NEW / app_share.py
kevinwang676's picture
Update app_share.py
40d4af7 verified
raw
history blame
No virus
17 kB
import os
import torch
import librosa
import gradio as gr
from scipy.io.wavfile import write
from transformers import WavLMModel
import utils
from models import SynthesizerTrn
from mel_processing import mel_spectrogram_torch
from speaker_encoder.voice_encoder import SpeakerEncoder
'''
def get_wavlm():
os.system('gdown https://drive.google.com/uc?id=12-cB34qCTvByWT-QtOcZaqwwO21FLSqU')
shutil.move('WavLM-Large.pt', 'wavlm')
'''
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Loading FreeVC...")
hps = utils.get_hparams_from_file("configs/freevc.json")
freevc = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).to(device)
_ = freevc.eval()
_ = utils.load_checkpoint("checkpoints/freevc.pth", freevc, None)
smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt')
print("Loading FreeVC(24k)...")
hps = utils.get_hparams_from_file("configs/freevc-24.json")
freevc_24 = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).to(device)
_ = freevc_24.eval()
_ = utils.load_checkpoint("checkpoints/freevc-24.pth", freevc_24, None)
print("Loading FreeVC-s...")
hps = utils.get_hparams_from_file("configs/freevc-s.json")
freevc_s = SynthesizerTrn(
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
**hps.model).to(device)
_ = freevc_s.eval()
_ = utils.load_checkpoint("checkpoints/freevc-s.pth", freevc_s, None)
print("Loading WavLM for content...")
cmodel = WavLMModel.from_pretrained("microsoft/wavlm-large").to(device)
import ffmpeg
import random
import numpy as np
from elevenlabs.client import ElevenLabs
def pad_buffer(audio):
# Pad buffer to multiple of 2 bytes
buffer_size = len(audio)
element_size = np.dtype(np.int16).itemsize
if buffer_size % element_size != 0:
audio = audio + b'\0' * (element_size - (buffer_size % element_size))
return audio
def generate_voice(api_key, text, voice):
client = ElevenLabs(
api_key=api_key, # Defaults to ELEVEN_API_KEY
)
audio = client.generate(text=text, voice=voice) #response.voices[0]
audio = b"".join(audio)
with open("output.mp3", "wb") as f:
f.write(audio)
return "output.mp3"
html_denoise = """
<html>
<head>
</script>
<link rel="stylesheet" href="https://gradio.s3-us-west-2.amazonaws.com/2.6.2/static/bundle.css">
</head>
<body>
<div id="target"></div>
<script src="https://gradio.s3-us-west-2.amazonaws.com/2.6.2/static/bundle.js"></script>
<script
type="module"
src="https://gradio.s3-us-west-2.amazonaws.com/4.15.0/gradio.js"
></script>
<iframe
src="https://g-app-center-40055665-8145-0zp6jbv.openxlab.space"
frameBorder="0"
width="1280"
height="700"
></iframe>
</body>
</html>
"""
def convert(api_key, text, tgt, voice, save_path):
model = "FreeVC (24kHz)"
with torch.no_grad():
# tgt
wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20)
if model == "FreeVC" or model == "FreeVC (24kHz)":
g_tgt = smodel.embed_utterance(wav_tgt)
g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device)
else:
wav_tgt = torch.from_numpy(wav_tgt).unsqueeze(0).to(device)
mel_tgt = mel_spectrogram_torch(
wav_tgt,
hps.data.filter_length,
hps.data.n_mel_channels,
hps.data.sampling_rate,
hps.data.hop_length,
hps.data.win_length,
hps.data.mel_fmin,
hps.data.mel_fmax
)
# src
src = generate_voice(api_key, text, voice)
wav_src, _ = librosa.load(src, sr=hps.data.sampling_rate)
wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device)
c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device)
# infer
if model == "FreeVC":
audio = freevc.infer(c, g=g_tgt)
elif model == "FreeVC-s":
audio = freevc_s.infer(c, mel=mel_tgt)
else:
audio = freevc_24.infer(c, g=g_tgt)
audio = audio[0][0].data.cpu().float().numpy()
if model == "FreeVC" or model == "FreeVC-s":
write(f"output/{save_path}.wav", hps.data.sampling_rate, audio)
else:
write(f"output/{save_path}.wav", 24000, audio)
return f"output/{save_path}.wav"
class subtitle:
def __init__(self,index:int, start_time, end_time, text:str):
self.index = int(index)
self.start_time = start_time
self.end_time = end_time
self.text = text.strip()
def normalize(self,ntype:str,fps=30):
if ntype=="prcsv":
h,m,s,fs=(self.start_time.replace(';',':')).split(":")#seconds
self.start_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,5)
h,m,s,fs=(self.end_time.replace(';',':')).split(":")
self.end_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,5)
elif ntype=="srt":
h,m,s=self.start_time.split(":")
s=s.replace(",",".")
self.start_time=int(h)*3600+int(m)*60+round(float(s),5)
h,m,s=self.end_time.split(":")
s=s.replace(",",".")
self.end_time=int(h)*3600+int(m)*60+round(float(s),5)
else:
raise ValueError
def add_offset(self,offset=0):
self.start_time+=offset
if self.start_time<0:
self.start_time=0
self.end_time+=offset
if self.end_time<0:
self.end_time=0
def __str__(self) -> str:
return f'id:{self.index},start:{self.start_time},end:{self.end_time},text:{self.text}'
def read_srt(uploaded_file):
offset=0
with open(uploaded_file.name,"r",encoding="utf-8") as f:
file=f.readlines()
subtitle_list=[]
indexlist=[]
filelength=len(file)
for i in range(0,filelength):
if " --> " in file[i]:
is_st=True
for char in file[i-1].strip().replace("\ufeff",""):
if char not in ['0','1','2','3','4','5','6','7','8','9']:
is_st=False
break
if is_st:
indexlist.append(i) #get line id
listlength=len(indexlist)
for i in range(0,listlength-1):
st,et=file[indexlist[i]].split(" --> ")
id=int(file[indexlist[i]-1].strip().replace("\ufeff",""))
text=""
for x in range(indexlist[i]+1,indexlist[i+1]-2):
text+=file[x]
st=subtitle(id,st,et,text)
st.normalize(ntype="srt")
st.add_offset(offset=offset)
subtitle_list.append(st)
st,et=file[indexlist[-1]].split(" --> ")
id=file[indexlist[-1]-1]
text=""
for x in range(indexlist[-1]+1,filelength):
text+=file[x]
st=subtitle(id,st,et,text)
st.normalize(ntype="srt")
st.add_offset(offset=offset)
subtitle_list.append(st)
return subtitle_list
import webrtcvad
from pydub import AudioSegment
from pydub.utils import make_chunks
def vad(audio_name, out_path_name):
audio = AudioSegment.from_file(audio_name, format="wav")
# Set the desired sample rate (WebRTC VAD supports only 8000, 16000, 32000, or 48000 Hz)
audio = audio.set_frame_rate(48000)
# Set single channel (mono)
audio = audio.set_channels(1)
# Initialize VAD
vad = webrtcvad.Vad()
# Set aggressiveness mode (an integer between 0 and 3, 3 is the most aggressive)
vad.set_mode(3)
# Convert pydub audio to bytes
frame_duration = 30 # Duration of a frame in ms
frame_width = int(audio.frame_rate * frame_duration / 1000) # width of a frame in samples
frames = make_chunks(audio, frame_duration)
# Perform voice activity detection
voiced_frames = []
for frame in frames:
if len(frame.raw_data) < frame_width * 2: # Ensure frame is correct length
break
is_speech = vad.is_speech(frame.raw_data, audio.frame_rate)
if is_speech:
voiced_frames.append(frame)
# Combine voiced frames back to an audio segment
voiced_audio = sum(voiced_frames, AudioSegment.silent(duration=0))
voiced_audio.export(f"{out_path_name}.wav", format="wav")
def trim_audio(intervals, input_file_path, output_file_path):
# load the audio file
audio = AudioSegment.from_file(input_file_path)
# iterate over the list of time intervals
for i, (start_time, end_time) in enumerate(intervals):
# extract the segment of the audio
segment = audio[start_time*1000:end_time*1000]
output_file_path_i = f"increased_{i}.wav"
if len(segment) < 5000:
# Calculate how many times to repeat the audio to make it at least 5 seconds long
repeat_count = (5000 // len(segment)) + 3
# Repeat the audio
longer_audio = segment * repeat_count
# Save the extended audio
print(f"Audio was less than 5 seconds. Extended to {len(longer_audio)} milliseconds.")
longer_audio.export(output_file_path_i, format='wav')
vad(f"{output_file_path_i}", f"{output_file_path}_{i}")
else:
print("Audio is already 5 seconds or longer.")
segment.export(f"{output_file_path}_{i}.wav", format='wav')
import re
def sort_key(file_name):
"""Extract the last number in the file name for sorting."""
numbers = re.findall(r'\d+', file_name)
if numbers:
return int(numbers[-1])
return -1 # In case there's no number, this ensures it goes to the start.
def merge_audios(folder_path):
output_file = "AI配音版.wav"
# Get all WAV files in the folder
files = [f for f in os.listdir(folder_path) if f.endswith('.wav')]
# Sort files based on the last digit in their names
sorted_files = sorted(files, key=sort_key)
# Initialize an empty audio segment
merged_audio = AudioSegment.empty()
# Loop through each file, in order, and concatenate them
for file in sorted_files:
audio = AudioSegment.from_wav(os.path.join(folder_path, file))
merged_audio += audio
print(f"Merged: {file}")
# Export the merged audio to a new file
merged_audio.export(output_file, format="wav")
return "AI配音版.wav"
import shutil
# get a zip file
import zipfile
def zip_sliced_files(directory, zip_filename):
# Create a ZipFile object
with zipfile.ZipFile(zip_filename, 'w') as zipf:
# Iterate over all files in the directory
for foldername, subfolders, filenames in os.walk(directory):
for filename in filenames:
# Check if the file starts with "sliced" and has a .wav extension
if filename.startswith("sliced") and filename.endswith(".wav"):
# Create the complete file path
file_path = os.path.join(foldername, filename)
# Add the file to the zip file
zipf.write(file_path, arcname=filename)
print(f"Added {filename} to {zip_filename}")
# set speed
def change_speed(audio_inp, speed=1.0):
audio = AudioSegment.from_file(audio_inp)
sound_with_altered_frame_rate = audio._spawn(audio.raw_data, overrides={
"frame_rate": int(audio.frame_rate * speed)
})
slower_audio = sound_with_altered_frame_rate.set_frame_rate(audio.frame_rate)
slower_audio.export("slower_speech.wav", format="wav")
return "slower_speech.wav"
# delete files first
def delete_sliced_files(directory):
# Iterate over all files in the directory
for foldername, subfolders, filenames in os.walk(directory):
for filename in filenames:
# Check if the file starts with "sliced"
if filename.startswith("sliced"):
# Create the complete file path
file_path = os.path.join(foldername, filename)
# Delete the file
os.remove(file_path)
print(f"Deleted {filename}")
def convert_from_srt(api_key, filename, audio_full, voice, multilingual):
subtitle_list = read_srt(filename)
delete_sliced_files("./")
#audio_data, sr = librosa.load(audio_full, sr=44100)
#write("audio_full.wav", sr, audio_data.astype(np.int16))
if os.path.isdir("output"):
shutil.rmtree("output")
if multilingual==False:
for i in subtitle_list:
try:
os.makedirs("output", exist_ok=True)
trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
print(f"正在合成第{i.index}条语音")
print(f"语音内容:{i.text}")
convert(api_key, i.text, f"sliced_audio_{i.index}_0.wav", voice, i.text + " " + str(i.index))
except Exception:
pass
else:
for i in subtitle_list:
try:
os.makedirs("output", exist_ok=True)
trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
print(f"正在合成第{i.index}条语音")
print(f"语音内容:{i.text.splitlines()[1]}")
convert(api_key, i.text.splitlines()[1], f"sliced_audio_{i.index}_0.wav", voice, i.text.splitlines()[1] + " " + str(i.index))
except Exception:
pass
merge_audios("output")
zip_sliced_files("./", "参考音频.zip")
return "AI配音版.wav", "参考音频.zip"
restart_markdown = ("""
### 若此页面无法正常显示,请点击[此链接](https://openxlab.org.cn/apps/detail/Kevin676/OpenAI-TTS)唤醒该程序!谢谢🍻
""")
import ffmpeg
def denoise(video_full):
if os.path.exists("audio_full.wav"):
os.remove("audio_full.wav")
ffmpeg.input(video_full).output("audio_full.wav", ac=2, ar=44100).run()
return "audio_full.wav"
with gr.Blocks() as app:
gr.Markdown("# <center>🌊💕🎶 11Labs TTS - SRT文件一键AI配音</center>")
gr.Markdown("### <center>🌟 只需上传SRT文件和原版配音文件即可,每次一集视频AI自动配音!Developed by Kevin Wang </center>")
with gr.Tab("📺视频转音频"):
with gr.Row():
inp_video = gr.Video(label="请上传一集包含原声配音的视频", info="需要是.mp4视频文件")
btn_convert = gr.Button("视频文件转音频", variant="primary")
out_audio = gr.Audio(label="视频对应的音频文件,可以下载至本地后进行降噪处理", type="filepath")
btn_convert.click(denoise, [inp_video], [out_audio])
with gr.Tab("🎶AI配音"):
with gr.Row():
with gr.Column():
inp0 = gr.Textbox(type='password', label='请输入您的11Labs API Key')
inp1 = gr.File(file_count="single", label="请上传一集视频对应的SRT文件")
inp2 = gr.Audio(label="请上传一集视频的配音文件", type="filepath")
inp3 = gr.Dropdown(choices=["Rachel", "Alice", "Chris", "Adam"], label='请选择一个说话人提供基础音色', info="试听音色链接:https://elevenlabs.io/app/speech-synthesis", value='Chris')
#inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", info="UVR-HP5去除背景音乐效果更好,但会对人声造成一定的损伤", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5")
inp4 = gr.Checkbox(label="SRT文件是否为双语字幕", info="若为双语字幕,请打勾选择(SRT文件中需要先出现中文字幕,后英文字幕;中英字幕各占一行)")
btn1 = gr.Button("一键开启AI配音吧💕", variant="primary")
with gr.Column():
out1 = gr.Audio(label="为您生成的AI完整配音", type="filepath")
out2 = gr.File(label="包含所有参考音频的zip文件")
inp_speed = gr.Slider(label="设置AI配音的速度", minimum=0.8, maximum=1.2, value=1.0, step=0.01)
btn2 = gr.Button("一键改变AI配音速度")
out3 = gr.Audio(label="变速后的AI配音", type="filepath")
btn1.click(convert_from_srt, [inp0, inp1, inp2, inp3, inp4], [out1, out2])
btn2.click(change_speed, [out1, inp_speed], [out3])
gr.Markdown("### <center>注意❗:请勿生成会对任何个人或组织造成侵害的内容,请尊重他人的著作权和知识产权。用户对此程序的任何使用行为与程序开发者无关。</center>")
gr.HTML('''
<div class="footer">
<p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
</p>
</div>
''')
app.launch(share=True, show_error=True)