fffiloni's picture
Update app.py
f4f936c
raw
history blame
5.41 kB
"""
Copyright (c) Meta Platforms, Inc. and affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
from tempfile import NamedTemporaryFile
import torch
import gradio as gr
from scipy.io.wavfile import write
from audiocraft.models import MusicGen
import os
from audiocraft.data.audio import audio_write
MODEL = None
def split_process(audio, chosen_out_track):
os.makedirs("out", exist_ok=True)
write('test.wav', audio[0], audio[1])
os.system("python3 -m demucs.separate -n mdx_extra_q -d cpu test.wav -o out")
#return "./out/mdx_extra_q/test/vocals.wav","./out/mdx_extra_q/test/bass.wav","./out/mdx_extra_q/test/drums.wav","./out/mdx_extra_q/test/other.wav"
if chosen_out_track == "vocals":
return "./out/mdx_extra_q/test/vocals.wav"
elif chosen_out_track == "bass":
return "./out/mdx_extra_q/test/bass.wav"
elif chosen_out_track == "drums":
return "./out/mdx_extra_q/test/drums.wav"
elif chosen_out_track == "other":
return "./out/mdx_extra_q/test/other.wav"
elif chosen_out_track == "all-in":
return "test.wav"
def load_model(version):
print("Loading model", version)
return MusicGen.get_pretrained(version)
def predict(music_prompt, melody, duration, model):
text = music_prompt
global MODEL
topk = int(250)
if MODEL is None or MODEL.name != model:
MODEL = load_model(model)
if duration > MODEL.lm.cfg.dataset.segment_duration:
raise gr.Error("MusicGen currently supports durations of up to 30 seconds!")
MODEL.set_generation_params(
use_sampling=True,
top_k=250,
top_p=0,
temperature=1.0,
cfg_coef=3.0,
duration=duration,
)
if melody:
sr, melody = melody[0], torch.from_numpy(melody[1]).to(MODEL.device).float().t().unsqueeze(0)
print(melody.shape)
if melody.dim() == 2:
melody = melody[None]
melody = melody[..., :int(sr * MODEL.lm.cfg.dataset.segment_duration)]
output = MODEL.generate_with_chroma(
descriptions=[text],
melody_wavs=melody,
melody_sample_rate=sr,
progress=False
)
else:
output = MODEL.generate(descriptions=[text], progress=False)
output = output.detach().cpu().float()[0]
with NamedTemporaryFile("wb", suffix=".wav", delete=False) as file:
audio_write(file.name, output, MODEL.sample_rate, strategy="loudness", add_suffix=False)
#waveform_video = gr.make_waveform(file.name)
return file.name
css="""
#col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
a {text-decoration-line: underline; font-weight: 600;}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
# Split Audio to MusicGen
Upload an audio file, split audio tracks with Demucs, choose a track as conditional sound for MusicGen, get a remix !
<br/>
<a href="https://huggingface.co/spaces/fffiloni/SplitTrack2MusicGen?duplicate=true" style="display: inline-block;margin-top: .5em;margin-right: .25em;" target="_blank">
<img style="margin-bottom: 0em;display: inline;margin-top: -.25em;" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
for longer sequences, more control and no queue.</p>
"""
)
with gr.Column():
uploaded_sound = gr.Audio(type="numpy", label="Input", source="upload")
chosen_track = gr.Radio(["vocals", "bass", "drums", "other", "all-in"], label="Track", info="Which track from your audio do you want to mashup ?", value="vocals")
load_sound_btn = gr.Button('Load your sound')
#split_vocals = gr.Audio(type="filepath", label="Vocals")
#split_bass = gr.Audio(type="filepath", label="Bass")
#split_drums = gr.Audio(type="filepath", label="Drums")
#split_others = gr.Audio(type="filepath", label="Other")
with gr.Row():
music_prompt = gr.Textbox(label="Musical Prompt", info="Describe what kind of music you wish for", interactive=True)
melody = gr.Audio(source="upload", type="numpy", label="Track Condition (from previous step)", interactive=False)
with gr.Row():
model = gr.Radio(["melody", "medium", "small", "large"], label="Model", value="melody", interactive=True)
with gr.Row():
duration = gr.Slider(minimum=1, maximum=30, value=10, step=1, label="Generated Music Duration", interactive=True)
with gr.Row():
submit = gr.Button("Submit")
#with gr.Row():
# topk = gr.Number(label="Top-k", value=250, interactive=True)
# topp = gr.Number(label="Top-p", value=0, interactive=True)
# temperature = gr.Number(label="Temperature", value=1.0, interactive=True)
# cfg_coef = gr.Number(label="Classifier Free Guidance", value=3.0, interactive=True)
output = gr.Audio(label="Generated Music")
load_sound_btn.click(split_process, inputs=[uploaded_sound, chosen_track], outputs=[melody])
submit.click(predict, inputs=[music_prompt, melody, duration, model], outputs=[output])
demo.queue(max_size=32).launch()