Spaces:
Running
Running
kevinwang676
commited on
Commit
•
4f9cd10
1
Parent(s):
189ad62
Update app_multi.py
Browse files- app_multi.py +209 -4
app_multi.py
CHANGED
@@ -8,6 +8,12 @@ import os
|
|
8 |
import time
|
9 |
import random
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
import asyncio
|
12 |
import json
|
13 |
import hashlib
|
@@ -18,8 +24,6 @@ import gradio as gr
|
|
18 |
|
19 |
import torch
|
20 |
|
21 |
-
import numpy as np
|
22 |
-
|
23 |
import edge_tts
|
24 |
|
25 |
from datetime import datetime
|
@@ -138,6 +142,187 @@ print(f'Models loaded: {len(loaded_models)}')
|
|
138 |
# Edge TTS speakers
|
139 |
tts_speakers_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) # noqa
|
140 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
# mix vocal and non-vocal
|
142 |
def mix(audio1, audio2):
|
143 |
sound1 = AudioSegment.from_file(audio1)
|
@@ -442,7 +627,7 @@ with app:
|
|
442 |
|
443 |
ydl_url_submit.click(fn=youtube_downloader, inputs=[ydl_url_input, start, end], outputs=[ydl_audio_output])
|
444 |
as_audio_submit.click(fn=audio_separated, inputs=[as_audio_input], outputs=[as_audio_vocals, as_audio_no_vocals, as_audio_message], show_progress=True, queue=True)
|
445 |
-
|
446 |
with gr.Row():
|
447 |
with gr.Column():
|
448 |
with gr.Tab('Audio conversion'):
|
@@ -468,7 +653,19 @@ with app:
|
|
468 |
)
|
469 |
|
470 |
tts_convert_btn = gr.Button('Convert', variant='primary')
|
471 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
472 |
pitch_adjust = gr.Slider(
|
473 |
label='Pitch',
|
474 |
minimum=-24,
|
@@ -608,6 +805,14 @@ with app:
|
|
608 |
show_progress=False,
|
609 |
queue=False
|
610 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
611 |
|
612 |
app.queue(
|
613 |
concurrency_count=1,
|
|
|
8 |
import time
|
9 |
import random
|
10 |
|
11 |
+
import matplotlib.pyplot as plt
|
12 |
+
import numpy as np
|
13 |
+
from PIL import Image, ImageDraw, ImageFont
|
14 |
+
from moviepy.editor import *
|
15 |
+
from moviepy.video.io.VideoFileClip import VideoFileClip
|
16 |
+
|
17 |
import asyncio
|
18 |
import json
|
19 |
import hashlib
|
|
|
24 |
|
25 |
import torch
|
26 |
|
|
|
|
|
27 |
import edge_tts
|
28 |
|
29 |
from datetime import datetime
|
|
|
142 |
# Edge TTS speakers
|
143 |
tts_speakers_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) # noqa
|
144 |
|
145 |
+
# Make MV
|
146 |
+
def make_bars_image(height_values, index, new_height):
|
147 |
+
|
148 |
+
# Define the size of the image
|
149 |
+
width = 512
|
150 |
+
height = new_height
|
151 |
+
|
152 |
+
# Create a new image with a transparent background
|
153 |
+
image = Image.new('RGBA', (width, height), color=(0, 0, 0, 0))
|
154 |
+
|
155 |
+
# Get the image drawing context
|
156 |
+
draw = ImageDraw.Draw(image)
|
157 |
+
|
158 |
+
# Define the rectangle width and spacing
|
159 |
+
rect_width = 2
|
160 |
+
spacing = 2
|
161 |
+
|
162 |
+
# Define the list of height values for the rectangles
|
163 |
+
#height_values = [20, 40, 60, 80, 100, 80, 60, 40]
|
164 |
+
num_bars = len(height_values)
|
165 |
+
# Calculate the total width of the rectangles and the spacing
|
166 |
+
total_width = num_bars * rect_width + (num_bars - 1) * spacing
|
167 |
+
|
168 |
+
# Calculate the starting position for the first rectangle
|
169 |
+
start_x = int((width - total_width) / 2)
|
170 |
+
# Define the buffer size
|
171 |
+
buffer_size = 80
|
172 |
+
# Draw the rectangles from left to right
|
173 |
+
x = start_x
|
174 |
+
for i, height in enumerate(height_values):
|
175 |
+
|
176 |
+
# Define the rectangle coordinates
|
177 |
+
y0 = buffer_size
|
178 |
+
y1 = height + buffer_size
|
179 |
+
x0 = x
|
180 |
+
x1 = x + rect_width
|
181 |
+
|
182 |
+
# Draw the rectangle
|
183 |
+
draw.rectangle([x0, y0, x1, y1], fill='white')
|
184 |
+
|
185 |
+
# Move to the next rectangle position
|
186 |
+
if i < num_bars - 1:
|
187 |
+
x += rect_width + spacing
|
188 |
+
|
189 |
+
|
190 |
+
# Rotate the image by 180 degrees
|
191 |
+
image = image.rotate(180)
|
192 |
+
|
193 |
+
# Mirror the image
|
194 |
+
image = image.transpose(Image.FLIP_LEFT_RIGHT)
|
195 |
+
|
196 |
+
# Save the image
|
197 |
+
image.save('audio_bars_'+ str(index) + '.png')
|
198 |
+
|
199 |
+
return 'audio_bars_'+ str(index) + '.png'
|
200 |
+
|
201 |
+
def db_to_height(db_value):
|
202 |
+
# Scale the dB value to a range between 0 and 1
|
203 |
+
scaled_value = (db_value + 80) / 80
|
204 |
+
|
205 |
+
# Convert the scaled value to a height between 0 and 100
|
206 |
+
height = scaled_value * 50
|
207 |
+
|
208 |
+
return height
|
209 |
+
|
210 |
+
def infer(title, audio_in, image_in):
|
211 |
+
# Load the audio file
|
212 |
+
audio_path = audio_in
|
213 |
+
audio_data, sr = librosa.load(audio_path)
|
214 |
+
|
215 |
+
# Get the duration in seconds
|
216 |
+
duration = librosa.get_duration(y=audio_data, sr=sr)
|
217 |
+
|
218 |
+
# Extract the audio data for the desired time
|
219 |
+
start_time = 0 # start time in seconds
|
220 |
+
end_time = duration # end time in seconds
|
221 |
+
|
222 |
+
start_index = int(start_time * sr)
|
223 |
+
end_index = int(end_time * sr)
|
224 |
+
|
225 |
+
audio_data = audio_data[start_index:end_index]
|
226 |
+
|
227 |
+
# Compute the short-time Fourier transform
|
228 |
+
hop_length = 512
|
229 |
+
|
230 |
+
|
231 |
+
stft = librosa.stft(audio_data, hop_length=hop_length)
|
232 |
+
spectrogram = librosa.amplitude_to_db(np.abs(stft), ref=np.max)
|
233 |
+
|
234 |
+
# Get the frequency values
|
235 |
+
freqs = librosa.fft_frequencies(sr=sr, n_fft=stft.shape[0])
|
236 |
+
|
237 |
+
# Select the indices of the frequency values that correspond to the desired frequencies
|
238 |
+
n_freqs = 114
|
239 |
+
freq_indices = np.linspace(0, len(freqs) - 1, n_freqs, dtype=int)
|
240 |
+
|
241 |
+
# Extract the dB values for the desired frequencies
|
242 |
+
db_values = []
|
243 |
+
for i in range(spectrogram.shape[1]):
|
244 |
+
db_values.append(list(zip(freqs[freq_indices], spectrogram[freq_indices, i])))
|
245 |
+
|
246 |
+
# Print the dB values for the first time frame
|
247 |
+
print(db_values[0])
|
248 |
+
|
249 |
+
proportional_values = []
|
250 |
+
|
251 |
+
for frame in db_values:
|
252 |
+
proportional_frame = [db_to_height(db) for f, db in frame]
|
253 |
+
proportional_values.append(proportional_frame)
|
254 |
+
|
255 |
+
print(proportional_values[0])
|
256 |
+
print("AUDIO CHUNK: " + str(len(proportional_values)))
|
257 |
+
|
258 |
+
# Open the background image
|
259 |
+
background_image = Image.open(image_in)
|
260 |
+
|
261 |
+
# Resize the image while keeping its aspect ratio
|
262 |
+
bg_width, bg_height = background_image.size
|
263 |
+
aspect_ratio = bg_width / bg_height
|
264 |
+
new_width = 512
|
265 |
+
new_height = int(new_width / aspect_ratio)
|
266 |
+
resized_bg = background_image.resize((new_width, new_height))
|
267 |
+
|
268 |
+
# Apply black cache for better visibility of the white text
|
269 |
+
bg_cache = Image.open('black_cache.png')
|
270 |
+
resized_bg.paste(bg_cache, (0, resized_bg.height - bg_cache.height), mask=bg_cache)
|
271 |
+
|
272 |
+
# Create a new ImageDraw object
|
273 |
+
draw = ImageDraw.Draw(resized_bg)
|
274 |
+
|
275 |
+
# Define the text to be added
|
276 |
+
text = title
|
277 |
+
font = ImageFont.truetype("Lato-Regular.ttf", 16)
|
278 |
+
text_color = (255, 255, 255) # white color
|
279 |
+
|
280 |
+
# Calculate the position of the text
|
281 |
+
text_width, text_height = draw.textsize(text, font=font)
|
282 |
+
x = 30
|
283 |
+
y = new_height - 70
|
284 |
+
|
285 |
+
# Draw the text on the image
|
286 |
+
draw.text((x, y), text, fill=text_color, font=font)
|
287 |
+
|
288 |
+
# Save the resized image
|
289 |
+
resized_bg.save('resized_background.jpg')
|
290 |
+
|
291 |
+
generated_frames = []
|
292 |
+
for i, frame in enumerate(proportional_values):
|
293 |
+
bars_img = make_bars_image(frame, i, new_height)
|
294 |
+
bars_img = Image.open(bars_img)
|
295 |
+
# Paste the audio bars image on top of the background image
|
296 |
+
fresh_bg = Image.open('resized_background.jpg')
|
297 |
+
fresh_bg.paste(bars_img, (0, 0), mask=bars_img)
|
298 |
+
# Save the image
|
299 |
+
fresh_bg.save('audio_bars_with_bg' + str(i) + '.jpg')
|
300 |
+
generated_frames.append('audio_bars_with_bg' + str(i) + '.jpg')
|
301 |
+
print(generated_frames)
|
302 |
+
|
303 |
+
# Create a video clip from the images
|
304 |
+
clip = ImageSequenceClip(generated_frames, fps=len(generated_frames)/(end_time-start_time))
|
305 |
+
audio_clip = AudioFileClip(audio_in)
|
306 |
+
clip = clip.set_audio(audio_clip)
|
307 |
+
# Set the output codec
|
308 |
+
codec = 'libx264'
|
309 |
+
audio_codec = 'aac'
|
310 |
+
# Save the video to a file
|
311 |
+
clip.write_videofile("my_video.mp4", codec=codec, audio_codec=audio_codec)
|
312 |
+
|
313 |
+
retimed_clip = VideoFileClip("my_video.mp4")
|
314 |
+
|
315 |
+
# Set the desired frame rate
|
316 |
+
new_fps = 25
|
317 |
+
|
318 |
+
# Create a new clip with the new frame rate
|
319 |
+
new_clip = retimed_clip.set_fps(new_fps)
|
320 |
+
|
321 |
+
# Save the new clip as a new video file
|
322 |
+
new_clip.write_videofile("my_video_retimed.mp4", codec=codec, audio_codec=audio_codec)
|
323 |
+
|
324 |
+
return "my_video_retimed.mp4"
|
325 |
+
|
326 |
# mix vocal and non-vocal
|
327 |
def mix(audio1, audio2):
|
328 |
sound1 = AudioSegment.from_file(audio1)
|
|
|
627 |
|
628 |
ydl_url_submit.click(fn=youtube_downloader, inputs=[ydl_url_input, start, end], outputs=[ydl_audio_output])
|
629 |
as_audio_submit.click(fn=audio_separated, inputs=[as_audio_input], outputs=[as_audio_vocals, as_audio_no_vocals, as_audio_message], show_progress=True, queue=True)
|
630 |
+
|
631 |
with gr.Row():
|
632 |
with gr.Column():
|
633 |
with gr.Tab('Audio conversion'):
|
|
|
653 |
)
|
654 |
|
655 |
tts_convert_btn = gr.Button('Convert', variant='primary')
|
656 |
+
|
657 |
+
with gr.Tab("📺 - 音乐视频"):
|
658 |
+
with gr.Row():
|
659 |
+
with gr.Column():
|
660 |
+
inp1 = gr.Textbox(label="为视频配上精彩的文案吧(选填;英文)")
|
661 |
+
inp2 = new_song
|
662 |
+
inp3 = gr.Image(source='upload', type='filepath', label="上传一张背景图片吧")
|
663 |
+
btn = gr.Button("生成您的专属音乐视频吧", variant="primary")
|
664 |
+
|
665 |
+
with gr.Column():
|
666 |
+
out1 = gr.Video(label='您的专属音乐视频')
|
667 |
+
btn.click(fn=infer, inputs=[inp1, inp2, inp3], outputs=[out1])
|
668 |
+
|
669 |
pitch_adjust = gr.Slider(
|
670 |
label='Pitch',
|
671 |
minimum=-24,
|
|
|
805 |
show_progress=False,
|
806 |
queue=False
|
807 |
)
|
808 |
+
|
809 |
+
gr.Markdown("### <center>注意❗:请不要生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及个人娱乐使用。</center>")
|
810 |
+
gr.HTML('''
|
811 |
+
<div class="footer">
|
812 |
+
<p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
|
813 |
+
</p>
|
814 |
+
</div>
|
815 |
+
''')
|
816 |
|
817 |
app.queue(
|
818 |
concurrency_count=1,
|