kevinwang676 commited on
Commit
37981bf
1 Parent(s): 0744265

Create app_share.py

Browse files
Files changed (1) hide show
  1. app_share.py +328 -0
app_share.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import librosa
4
+ import gradio as gr
5
+ from scipy.io.wavfile import write
6
+ from transformers import WavLMModel
7
+
8
+ import utils
9
+ from models import SynthesizerTrn
10
+ from mel_processing import mel_spectrogram_torch
11
+ from speaker_encoder.voice_encoder import SpeakerEncoder
12
+
13
+ '''
14
+ def get_wavlm():
15
+ os.system('gdown https://drive.google.com/uc?id=12-cB34qCTvByWT-QtOcZaqwwO21FLSqU')
16
+ shutil.move('WavLM-Large.pt', 'wavlm')
17
+ '''
18
+
19
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
+
21
+ print("Loading FreeVC...")
22
+ hps = utils.get_hparams_from_file("configs/freevc.json")
23
+ freevc = SynthesizerTrn(
24
+ hps.data.filter_length // 2 + 1,
25
+ hps.train.segment_size // hps.data.hop_length,
26
+ **hps.model).to(device)
27
+ _ = freevc.eval()
28
+ _ = utils.load_checkpoint("checkpoints/freevc.pth", freevc, None)
29
+ smodel = SpeakerEncoder('speaker_encoder/ckpt/pretrained_bak_5805000.pt')
30
+
31
+ print("Loading FreeVC(24k)...")
32
+ hps = utils.get_hparams_from_file("configs/freevc-24.json")
33
+ freevc_24 = SynthesizerTrn(
34
+ hps.data.filter_length // 2 + 1,
35
+ hps.train.segment_size // hps.data.hop_length,
36
+ **hps.model).to(device)
37
+ _ = freevc_24.eval()
38
+ _ = utils.load_checkpoint("checkpoints/freevc-24.pth", freevc_24, None)
39
+
40
+ print("Loading FreeVC-s...")
41
+ hps = utils.get_hparams_from_file("configs/freevc-s.json")
42
+ freevc_s = SynthesizerTrn(
43
+ hps.data.filter_length // 2 + 1,
44
+ hps.train.segment_size // hps.data.hop_length,
45
+ **hps.model).to(device)
46
+ _ = freevc_s.eval()
47
+ _ = utils.load_checkpoint("checkpoints/freevc-s.pth", freevc_s, None)
48
+
49
+ print("Loading WavLM for content...")
50
+ cmodel = WavLMModel.from_pretrained("microsoft/wavlm-large").to(device)
51
+
52
+
53
+
54
+ import ffmpeg
55
+
56
+ import random
57
+ import numpy as np
58
+ from elevenlabs import voices, generate, set_api_key, UnauthenticatedRateLimitError
59
+
60
+ def pad_buffer(audio):
61
+ # Pad buffer to multiple of 2 bytes
62
+ buffer_size = len(audio)
63
+ element_size = np.dtype(np.int16).itemsize
64
+ if buffer_size % element_size != 0:
65
+ audio = audio + b'\0' * (element_size - (buffer_size % element_size))
66
+ return audio
67
+
68
+ def generate_voice(text, voice_name):
69
+ try:
70
+ audio = generate(
71
+ text[:250], # Limit to 250 characters
72
+ voice=voice_name,
73
+ model="eleven_multilingual_v2"
74
+ )
75
+ with open("output" + ".mp3", mode='wb') as f:
76
+ f.write(audio)
77
+ return "output.mp3"
78
+
79
+ except UnauthenticatedRateLimitError as e:
80
+ raise gr.Error("Thanks for trying out ElevenLabs TTS! You've reached the free tier limit. Please provide an API key to continue.")
81
+ except Exception as e:
82
+ raise gr.Error(e)
83
+
84
+ html_denoise = """
85
+ <html>
86
+ <head>
87
+ </script>
88
+ <link rel="stylesheet" href="https://gradio.s3-us-west-2.amazonaws.com/2.6.2/static/bundle.css">
89
+ </head>
90
+ <body>
91
+ <div id="target"></div>
92
+ <script src="https://gradio.s3-us-west-2.amazonaws.com/2.6.2/static/bundle.js"></script>
93
+ <script
94
+ type="module"
95
+ src="https://gradio.s3-us-west-2.amazonaws.com/4.15.0/gradio.js"
96
+ ></script>
97
+ <iframe
98
+ src="https://g-app-center-40055665-8145-0zp6jbv.openxlab.space"
99
+ frameBorder="0"
100
+ width="1280"
101
+ height="700"
102
+ ></iframe>
103
+
104
+ </body>
105
+ </html>
106
+ """
107
+
108
+ def convert(api_key, text, tgt, voice, save_path):
109
+ model = "FreeVC (24kHz)"
110
+ with torch.no_grad():
111
+ # tgt
112
+ wav_tgt, _ = librosa.load(tgt, sr=hps.data.sampling_rate)
113
+ wav_tgt, _ = librosa.effects.trim(wav_tgt, top_db=20)
114
+ if model == "FreeVC" or model == "FreeVC (24kHz)":
115
+ g_tgt = smodel.embed_utterance(wav_tgt)
116
+ g_tgt = torch.from_numpy(g_tgt).unsqueeze(0).to(device)
117
+ else:
118
+ wav_tgt = torch.from_numpy(wav_tgt).unsqueeze(0).to(device)
119
+ mel_tgt = mel_spectrogram_torch(
120
+ wav_tgt,
121
+ hps.data.filter_length,
122
+ hps.data.n_mel_channels,
123
+ hps.data.sampling_rate,
124
+ hps.data.hop_length,
125
+ hps.data.win_length,
126
+ hps.data.mel_fmin,
127
+ hps.data.mel_fmax
128
+ )
129
+ # src
130
+
131
+ os.environ["ELEVEN_API_KEY"] = api_key
132
+ src = generate_voice(text, voice)
133
+ wav_src, _ = librosa.load(src, sr=hps.data.sampling_rate)
134
+ wav_src = torch.from_numpy(wav_src).unsqueeze(0).to(device)
135
+ c = cmodel(wav_src).last_hidden_state.transpose(1, 2).to(device)
136
+ # infer
137
+ if model == "FreeVC":
138
+ audio = freevc.infer(c, g=g_tgt)
139
+ elif model == "FreeVC-s":
140
+ audio = freevc_s.infer(c, mel=mel_tgt)
141
+ else:
142
+ audio = freevc_24.infer(c, g=g_tgt)
143
+ audio = audio[0][0].data.cpu().float().numpy()
144
+ if model == "FreeVC" or model == "FreeVC-s":
145
+ write(f"output/{save_path}.wav", hps.data.sampling_rate, audio)
146
+ else:
147
+ write(f"output/{save_path}.wav", 24000, audio)
148
+ return f"output/{save_path}.wav"
149
+
150
+
151
+ class subtitle:
152
+ def __init__(self,index:int, start_time, end_time, text:str):
153
+ self.index = int(index)
154
+ self.start_time = start_time
155
+ self.end_time = end_time
156
+ self.text = text.strip()
157
+ def normalize(self,ntype:str,fps=30):
158
+ if ntype=="prcsv":
159
+ h,m,s,fs=(self.start_time.replace(';',':')).split(":")#seconds
160
+ self.start_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,2)
161
+ h,m,s,fs=(self.end_time.replace(';',':')).split(":")
162
+ self.end_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,2)
163
+ elif ntype=="srt":
164
+ h,m,s=self.start_time.split(":")
165
+ s=s.replace(",",".")
166
+ self.start_time=int(h)*3600+int(m)*60+round(float(s),2)
167
+ h,m,s=self.end_time.split(":")
168
+ s=s.replace(",",".")
169
+ self.end_time=int(h)*3600+int(m)*60+round(float(s),2)
170
+ else:
171
+ raise ValueError
172
+ def add_offset(self,offset=0):
173
+ self.start_time+=offset
174
+ if self.start_time<0:
175
+ self.start_time=0
176
+ self.end_time+=offset
177
+ if self.end_time<0:
178
+ self.end_time=0
179
+ def __str__(self) -> str:
180
+ return f'id:{self.index},start:{self.start_time},end:{self.end_time},text:{self.text}'
181
+
182
+ def read_srt(uploaded_file):
183
+ offset=0
184
+ with open(uploaded_file.name,"r",encoding="utf-8") as f:
185
+ file=f.readlines()
186
+ subtitle_list=[]
187
+ indexlist=[]
188
+ filelength=len(file)
189
+ for i in range(0,filelength):
190
+ if " --> " in file[i]:
191
+ is_st=True
192
+ for char in file[i-1].strip().replace("\ufeff",""):
193
+ if char not in ['0','1','2','3','4','5','6','7','8','9']:
194
+ is_st=False
195
+ break
196
+ if is_st:
197
+ indexlist.append(i) #get line id
198
+ listlength=len(indexlist)
199
+ for i in range(0,listlength-1):
200
+ st,et=file[indexlist[i]].split(" --> ")
201
+ id=int(file[indexlist[i]-1].strip().replace("\ufeff",""))
202
+ text=""
203
+ for x in range(indexlist[i]+1,indexlist[i+1]-2):
204
+ text+=file[x]
205
+ st=subtitle(id,st,et,text)
206
+ st.normalize(ntype="srt")
207
+ st.add_offset(offset=offset)
208
+ subtitle_list.append(st)
209
+ st,et=file[indexlist[-1]].split(" --> ")
210
+ id=file[indexlist[-1]-1]
211
+ text=""
212
+ for x in range(indexlist[-1]+1,filelength):
213
+ text+=file[x]
214
+ st=subtitle(id,st,et,text)
215
+ st.normalize(ntype="srt")
216
+ st.add_offset(offset=offset)
217
+ subtitle_list.append(st)
218
+ return subtitle_list
219
+
220
+ from pydub import AudioSegment
221
+
222
+ def trim_audio(intervals, input_file_path, output_file_path):
223
+ # load the audio file
224
+ audio = AudioSegment.from_file(input_file_path)
225
+
226
+ # iterate over the list of time intervals
227
+ for i, (start_time, end_time) in enumerate(intervals):
228
+ # extract the segment of the audio
229
+ segment = audio[start_time*1000:end_time*1000]
230
+
231
+ # construct the output file path
232
+ output_file_path_i = f"{output_file_path}_{i}.wav"
233
+
234
+ # export the segment to a file
235
+ segment.export(output_file_path_i, format='wav')
236
+
237
+ import re
238
+
239
+ def sort_key(file_name):
240
+ """Extract the last number in the file name for sorting."""
241
+ numbers = re.findall(r'\d+', file_name)
242
+ if numbers:
243
+ return int(numbers[-1])
244
+ return -1 # In case there's no number, this ensures it goes to the start.
245
+
246
+
247
+ def merge_audios(folder_path):
248
+ output_file = "AI配音版.wav"
249
+ # Get all WAV files in the folder
250
+ files = [f for f in os.listdir(folder_path) if f.endswith('.wav')]
251
+ # Sort files based on the last digit in their names
252
+ sorted_files = sorted(files, key=sort_key)
253
+
254
+ # Initialize an empty audio segment
255
+ merged_audio = AudioSegment.empty()
256
+
257
+ # Loop through each file, in order, and concatenate them
258
+ for file in sorted_files:
259
+ audio = AudioSegment.from_wav(os.path.join(folder_path, file))
260
+ merged_audio += audio
261
+ print(f"Merged: {file}")
262
+
263
+ # Export the merged audio to a new file
264
+ merged_audio.export(output_file, format="wav")
265
+ return "AI配音版.wav"
266
+
267
+ import shutil
268
+
269
+ def convert_from_srt(apikey, filename, audio_full, voice, multilingual):
270
+ subtitle_list = read_srt(filename)
271
+
272
+ #audio_data, sr = librosa.load(audio_full, sr=44100)
273
+
274
+ #write("audio_full.wav", sr, audio_data.astype(np.int16))
275
+
276
+ if os.path.isdir("output"):
277
+ shutil.rmtree("output")
278
+ if multilingual==False:
279
+ for i in subtitle_list:
280
+ os.makedirs("output", exist_ok=True)
281
+ trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
282
+ print(f"正在合成第{i.index}条语音")
283
+ print(f"语音内容:{i.text}")
284
+ convert(apikey, i.text, f"sliced_audio_{i.index}_0.wav", voice, i.text + " " + str(i.index))
285
+ else:
286
+ for i in subtitle_list:
287
+ os.makedirs("output", exist_ok=True)
288
+ trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
289
+ print(f"正在合成第{i.index}条语音")
290
+ print(f"语音内容:{i.text.splitlines()[1]}")
291
+ convert(apikey, i.text.splitlines()[1], f"sliced_audio_{i.index}_0.wav", voice, i.text.splitlines()[1] + " " + str(i.index))
292
+
293
+ merge_audios("output")
294
+
295
+ return "AI配音版.wav"
296
+
297
+ restart_markdown = ("""
298
+ ### 若此页面无法正常显示,请点击[此链接](https://openxlab.org.cn/apps/detail/Kevin676/OpenAI-TTS)唤醒该程序!谢谢🍻
299
+ """)
300
+
301
+ all_voices = voices()
302
+
303
+ with gr.Blocks() as app:
304
+ gr.Markdown("# <center>🌊💕🎶 11Labs TTS - SRT文件一键AI配音</center>")
305
+ gr.Markdown("### <center>🌟 只需上传SRT文件和原版配音文件即可,每次一集视频AI自动配音!Developed by Kevin Wang </center>")
306
+ with gr.Row():
307
+ with gr.Column():
308
+ inp0 = gr.Textbox(type='password', label='请输入您的11Labs API Key')
309
+ inp1 = gr.File(file_count="single", label="请上传一集视频对应的SRT文件")
310
+ inp2 = gr.Audio(label="请上传一集视频的配音文件", type="filepath")
311
+
312
+ inp3 = gr.Dropdown(choices=[ voice.name for voice in all_voices ], label='请选择一个说话人提供基础音色', info="试听音色链接:https://huggingface.co/spaces/elevenlabs/tts", value='Rachel')
313
+ #inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", info="UVR-HP5去除背景音乐效果更好,但会对人声造成一定的损伤", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5")
314
+ inp4 = gr.Checkbox(label="SRT文件是否为双语字幕", info="若为双语字幕,请打勾选择(SRT文件中需要先出现中文字幕,后英文字幕;中英字幕各占一行)")
315
+ btn = gr.Button("一键开启AI配音吧💕", variant="primary")
316
+ with gr.Column():
317
+ out1 = gr.Audio(label="为您生成的AI完整配音", type="filepath")
318
+
319
+ btn.click(convert_from_srt, [inp0, inp1, inp2, inp3, inp4], [out1])
320
+ gr.Markdown("### <center>注意❗:请勿生成会对任何个人或组织造成侵害的内容,请尊重他人的著作权和知识产权。用户对此程序的任何使用行为与程序开发者无关。</center>")
321
+ gr.HTML('''
322
+ <div class="footer">
323
+ <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
324
+ </p>
325
+ </div>
326
+ ''')
327
+
328
+ app.launch(share=True, show_error=True)