Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
|
|
|
|
|
|
|
|
|
9 |
|
10 |
if __name__ == "__main__":
|
11 |
-
demo.launch()
|
|
|
1 |
+
import os
|
2 |
+
import subprocess
|
3 |
+
|
4 |
+
# Install flash attention
|
5 |
+
subprocess.run(
|
6 |
+
"pip install flash-attn --no-build-isolation",
|
7 |
+
env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"},
|
8 |
+
shell=True,
|
9 |
+
)
|
10 |
+
|
11 |
+
import spaces
|
12 |
+
import os
|
13 |
+
import torch
|
14 |
+
import numpy as np
|
15 |
+
from omegaconf import OmegaConf
|
16 |
+
import torchaudio
|
17 |
+
from torchaudio.transforms import Resample
|
18 |
+
import soundfile as sf
|
19 |
+
import uuid
|
20 |
+
from tqdm import tqdm
|
21 |
+
from einops import rearrange
|
22 |
import gradio as gr
|
23 |
+
import re
|
24 |
+
from collections import Counter
|
25 |
+
from codecmanipulator import CodecManipulator
|
26 |
+
from mmtokenizer import _MMSentencePieceTokenizer
|
27 |
+
from transformers import AutoModelForCausalLM, LogitsProcessor, LogitsProcessorList
|
28 |
+
from models.soundstream_hubert_new import SoundStream
|
29 |
+
from vocoder import build_codec_model, process_audio
|
30 |
+
from post_process_audio import replace_low_freq_with_energy_matched
|
31 |
+
|
32 |
+
# Initialize global variables and models
|
33 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
34 |
+
mmtokenizer = _MMSentencePieceTokenizer("./mm_tokenizer_v0.2_hf/tokenizer.model")
|
35 |
+
codectool = CodecManipulator("xcodec", 0, 1)
|
36 |
+
codectool_stage2 = CodecManipulator("xcodec", 0, 8)
|
37 |
+
|
38 |
+
# Load models once at startup
|
39 |
+
def load_models():
|
40 |
+
# Stage 1 Model
|
41 |
+
stage1_model = AutoModelForCausalLM.from_pretrained(
|
42 |
+
"m-a-p/YuE-s1-7B-anneal-en-cot",
|
43 |
+
torch_dtype=torch.bfloat16,
|
44 |
+
attn_implementation="flash_attention_2"
|
45 |
+
).to(device)
|
46 |
+
stage1_model.eval()
|
47 |
+
|
48 |
+
# Stage 2 Model
|
49 |
+
stage2_model = AutoModelForCausalLM.from_pretrained(
|
50 |
+
"m-a-p/YuE-s2-1B-general",
|
51 |
+
torch_dtype=torch.float16,
|
52 |
+
attn_implementation="flash_attention_2"
|
53 |
+
).to(device)
|
54 |
+
stage2_model.eval()
|
55 |
+
|
56 |
+
# Codec Model
|
57 |
+
model_config = OmegaConf.load('./xcodec_mini_infer/final_ckpt/config.yaml')
|
58 |
+
codec_model = eval(model_config.generator.name)(**model_config.generator.config).to(device)
|
59 |
+
parameter_dict = torch.load('./xcodec_mini_infer/final_ckpt/ckpt_00360000.pth', map_location='cpu')
|
60 |
+
codec_model.load_state_dict(parameter_dict['codec_model'])
|
61 |
+
codec_model.eval()
|
62 |
+
|
63 |
+
return stage1_model, stage2_model, codec_model
|
64 |
+
|
65 |
+
stage1_model, stage2_model, codec_model = load_models()
|
66 |
+
|
67 |
+
# Helper functions
|
68 |
+
def split_lyrics(lyrics):
|
69 |
+
pattern = r"\[(\w+)\](.*?)\n(?=\[|\Z)"
|
70 |
+
segments = re.findall(pattern, lyrics, re.DOTALL)
|
71 |
+
return [f"[{seg[0]}]\n{seg[1].strip()}\n\n" for seg in segments]
|
72 |
+
|
73 |
+
def load_audio_mono(filepath, sampling_rate=16000):
|
74 |
+
audio, sr = torchaudio.load(filepath)
|
75 |
+
audio = torch.mean(audio, dim=0, keepdim=True) # Convert to mono
|
76 |
+
if sr != sampling_rate:
|
77 |
+
resampler = Resample(orig_freq=sr, new_freq=sampling_rate)
|
78 |
+
audio = resampler(audio)
|
79 |
+
return audio
|
80 |
+
|
81 |
+
def save_audio(wav: torch.Tensor, path, sample_rate: int, rescale: bool = False):
|
82 |
+
folder_path = os.path.dirname(path)
|
83 |
+
if not os.path.exists(folder_path):
|
84 |
+
os.makedirs(folder_path)
|
85 |
+
limit = 0.99
|
86 |
+
max_val = wav.abs().max()
|
87 |
+
wav = wav * min(limit / max_val, 1) if rescale else wav.clamp(-limit, limit)
|
88 |
+
torchaudio.save(str(path), wav, sample_rate=sample_rate, encoding='PCM_S', bits_per_sample=16)
|
89 |
+
|
90 |
+
# Stage 1 Generation
|
91 |
+
def stage1_generate(genres, lyrics_text, use_audio_prompt, audio_prompt_path, prompt_start_time, prompt_end_time):
|
92 |
+
structured_lyrics = split_lyrics(lyrics_text)
|
93 |
+
full_lyrics = "\n".join(structured_lyrics)
|
94 |
+
prompt_texts = [f"Generate music from the given lyrics segment by segment.\n[Genre] {genres}\n{full_lyrics}"] + structured_lyrics
|
95 |
+
|
96 |
+
random_id = str(uuid.uuid4())
|
97 |
+
output_dir = os.path.join("./output", random_id)
|
98 |
+
os.makedirs(output_dir, exist_ok=True)
|
99 |
+
|
100 |
+
stage1_output_set = []
|
101 |
+
for i, p in enumerate(tqdm(prompt_texts)):
|
102 |
+
section_text = p.replace('[start_of_segment]', '').replace('[end_of_segment]', '')
|
103 |
+
guidance_scale = 1.5 if i <= 1 else 1.2
|
104 |
+
|
105 |
+
if i == 0:
|
106 |
+
continue
|
107 |
+
|
108 |
+
if i == 1 and use_audio_prompt:
|
109 |
+
audio_prompt = load_audio_mono(audio_prompt_path)
|
110 |
+
audio_prompt.unsqueeze_(0)
|
111 |
+
with torch.no_grad():
|
112 |
+
raw_codes = codec_model.encode(audio_prompt.to(device), target_bw=0.5)
|
113 |
+
raw_codes = raw_codes.transpose(0, 1).cpu().numpy().astype(np.int16)
|
114 |
+
audio_prompt_codec = codectool.npy2ids(raw_codes[0])[int(prompt_start_time * 50): int(prompt_end_time * 50)]
|
115 |
+
audio_prompt_codec_ids = [mmtokenizer.soa] + codectool.sep_ids + audio_prompt_codec + [mmtokenizer.eoa]
|
116 |
+
sentence_ids = mmtokenizer.tokenize("[start_of_reference]") + audio_prompt_codec_ids + mmtokenizer.tokenize("[end_of_reference]")
|
117 |
+
head_id = mmtokenizer.tokenize(prompt_texts[0]) + sentence_ids
|
118 |
+
else:
|
119 |
+
head_id = mmtokenizer.tokenize(prompt_texts[0])
|
120 |
+
|
121 |
+
prompt_ids = head_id + mmtokenizer.tokenize("[start_of_segment]") + mmtokenizer.tokenize(section_text) + [mmtokenizer.soa] + codectool.sep_ids
|
122 |
+
prompt_ids = torch.as_tensor(prompt_ids).unsqueeze(0).to(device)
|
123 |
+
|
124 |
+
with torch.no_grad():
|
125 |
+
output_seq = stage1_model.generate(
|
126 |
+
input_ids=prompt_ids,
|
127 |
+
max_new_tokens=3000,
|
128 |
+
min_new_tokens=100,
|
129 |
+
do_sample=True,
|
130 |
+
top_p=0.93,
|
131 |
+
temperature=1.0,
|
132 |
+
repetition_penalty=1.2,
|
133 |
+
eos_token_id=mmtokenizer.eoa,
|
134 |
+
pad_token_id=mmtokenizer.eoa,
|
135 |
+
)
|
136 |
+
|
137 |
+
if i > 1:
|
138 |
+
raw_output = torch.cat([raw_output, prompt_ids, output_seq[:, prompt_ids.shape[-1]:]], dim=1)
|
139 |
+
else:
|
140 |
+
raw_output = output_seq
|
141 |
+
|
142 |
+
# Save Stage 1 outputs
|
143 |
+
ids = raw_output[0].cpu().numpy()
|
144 |
+
soa_idx = np.where(ids == mmtokenizer.soa)[0].tolist()
|
145 |
+
eoa_idx = np.where(ids == mmtokenizer.eoa)[0].tolist()
|
146 |
+
|
147 |
+
vocals = []
|
148 |
+
instrumentals = []
|
149 |
+
for i in range(len(soa_idx)):
|
150 |
+
codec_ids = ids[soa_idx[i] + 1:eoa_idx[i]]
|
151 |
+
if codec_ids[0] == 32016:
|
152 |
+
codec_ids = codec_ids[1:]
|
153 |
+
codec_ids = codec_ids[:2 * (codec_ids.shape[0] // 2)]
|
154 |
+
vocals_ids = codectool.ids2npy(rearrange(codec_ids, "(n b) -> b n", b=2)[0])
|
155 |
+
vocals.append(vocals_ids)
|
156 |
+
instrumentals_ids = codectool.ids2npy(rearrange(codec_ids, "(n b) -> b n", b=2)[1])
|
157 |
+
instrumentals.append(instrumentals_ids)
|
158 |
+
|
159 |
+
vocals = np.concatenate(vocals, axis=1)
|
160 |
+
instrumentals = np.concatenate(instrumentals, axis=1)
|
161 |
+
vocal_save_path = os.path.join(output_dir, f"vocal_{random_id}.npy")
|
162 |
+
inst_save_path = os.path.join(output_dir, f"instrumental_{random_id}.npy")
|
163 |
+
np.save(vocal_save_path, vocals)
|
164 |
+
np.save(inst_save_path, instrumentals)
|
165 |
+
stage1_output_set.append(vocal_save_path)
|
166 |
+
stage1_output_set.append(inst_save_path)
|
167 |
+
|
168 |
+
return stage1_output_set, output_dir
|
169 |
+
|
170 |
+
# Stage 2 Generation
|
171 |
+
def stage2_generate(model, prompt, batch_size=16):
|
172 |
+
codec_ids = codectool.unflatten(prompt, n_quantizer=1)
|
173 |
+
codec_ids = codectool.offset_tok_ids(
|
174 |
+
codec_ids,
|
175 |
+
global_offset=codectool.global_offset,
|
176 |
+
codebook_size=codectool.codebook_size,
|
177 |
+
num_codebooks=codectool.num_codebooks,
|
178 |
+
).astype(np.int32)
|
179 |
+
|
180 |
+
if batch_size > 1:
|
181 |
+
codec_list = []
|
182 |
+
for i in range(batch_size):
|
183 |
+
idx_begin = i * 300
|
184 |
+
idx_end = (i + 1) * 300
|
185 |
+
codec_list.append(codec_ids[:, idx_begin:idx_end])
|
186 |
+
codec_ids = np.concatenate(codec_list, axis=0)
|
187 |
+
prompt_ids = np.concatenate(
|
188 |
+
[
|
189 |
+
np.tile([mmtokenizer.soa, mmtokenizer.stage_1], (batch_size, 1)),
|
190 |
+
codec_ids,
|
191 |
+
np.tile([mmtokenizer.stage_2], (batch_size, 1)),
|
192 |
+
],
|
193 |
+
axis=1
|
194 |
+
)
|
195 |
+
else:
|
196 |
+
prompt_ids = np.concatenate([
|
197 |
+
np.array([mmtokenizer.soa, mmtokenizer.stage_1]),
|
198 |
+
codec_ids.flatten(),
|
199 |
+
np.array([mmtokenizer.stage_2])
|
200 |
+
]).astype(np.int32)
|
201 |
+
prompt_ids = prompt_ids[np.newaxis, ...]
|
202 |
+
|
203 |
+
codec_ids = torch.as_tensor(codec_ids).to(device)
|
204 |
+
prompt_ids = torch.as_tensor(prompt_ids).to(device)
|
205 |
+
len_prompt = prompt_ids.shape[-1]
|
206 |
+
|
207 |
+
block_list = LogitsProcessorList([BlockTokenRangeProcessor(0, 46358), BlockTokenRangeProcessor(53526, mmtokenizer.vocab_size)])
|
208 |
+
|
209 |
+
for frames_idx in range(codec_ids.shape[1]):
|
210 |
+
cb0 = codec_ids[:, frames_idx:frames_idx + 1]
|
211 |
+
prompt_ids = torch.cat([prompt_ids, cb0], dim=1)
|
212 |
+
input_ids = prompt_ids
|
213 |
+
|
214 |
+
with torch.no_grad():
|
215 |
+
stage2_output = model.generate(
|
216 |
+
input_ids=input_ids,
|
217 |
+
min_new_tokens=7,
|
218 |
+
max_new_tokens=7,
|
219 |
+
eos_token_id=mmtokenizer.eoa,
|
220 |
+
pad_token_id=mmtokenizer.eoa,
|
221 |
+
logits_processor=block_list,
|
222 |
+
)
|
223 |
+
|
224 |
+
assert stage2_output.shape[1] - prompt_ids.shape[1] == 7, f"output new tokens={stage2_output.shape[1] - prompt_ids.shape[1]}"
|
225 |
+
prompt_ids = stage2_output
|
226 |
+
|
227 |
+
if batch_size > 1:
|
228 |
+
output = prompt_ids.cpu().numpy()[:, len_prompt:]
|
229 |
+
output_list = [output[i] for i in range(batch_size)]
|
230 |
+
output = np.concatenate(output_list, axis=0)
|
231 |
+
else:
|
232 |
+
output = prompt_ids[0].cpu().numpy()[len_prompt:]
|
233 |
+
|
234 |
+
return output
|
235 |
+
|
236 |
+
def stage2_inference(model, stage1_output_set, output_dir, batch_size=4):
|
237 |
+
stage2_result = []
|
238 |
+
for i in tqdm(range(len(stage1_output_set))):
|
239 |
+
output_filename = os.path.join(output_dir, os.path.basename(stage1_output_set[i]))
|
240 |
+
if os.path.exists(output_filename):
|
241 |
+
continue
|
242 |
+
|
243 |
+
prompt = np.load(stage1_output_set[i]).astype(np.int32)
|
244 |
+
output_duration = prompt.shape[-1] // 50 // 6 * 6
|
245 |
+
num_batch = output_duration // 6
|
246 |
+
|
247 |
+
if num_batch <= batch_size:
|
248 |
+
output = stage2_generate(model, prompt[:, :output_duration * 50], batch_size=num_batch)
|
249 |
+
else:
|
250 |
+
segments = []
|
251 |
+
num_segments = (num_batch // batch_size) + (1 if num_batch % batch_size != 0 else 0)
|
252 |
+
for seg in range(num_segments):
|
253 |
+
start_idx = seg * batch_size * 300
|
254 |
+
end_idx = min((seg + 1) * batch_size * 300, output_duration * 50)
|
255 |
+
current_batch_size = batch_size if seg != num_segments - 1 or num_batch % batch_size == 0 else num_batch % batch_size
|
256 |
+
segment = stage2_generate(model, prompt[:, start_idx:end_idx], batch_size=current_batch_size)
|
257 |
+
segments.append(segment)
|
258 |
+
output = np.concatenate(segments, axis=0)
|
259 |
+
|
260 |
+
if output_duration * 50 != prompt.shape[-1]:
|
261 |
+
ending = stage2_generate(model, prompt[:, output_duration * 50:], batch_size=1)
|
262 |
+
output = np.concatenate([output, ending], axis=0)
|
263 |
+
output = codectool_stage2.ids2npy(output)
|
264 |
+
|
265 |
+
fixed_output = copy.deepcopy(output)
|
266 |
+
for i, line in enumerate(output):
|
267 |
+
for j, element in enumerate(line):
|
268 |
+
if element < 0 or element > 1023:
|
269 |
+
counter = Counter(line)
|
270 |
+
most_frequant = sorted(counter.items(), key=lambda x: x[1], reverse=True)[0][0]
|
271 |
+
fixed_output[i, j] = most_frequant
|
272 |
+
np.save(output_filename, fixed_output)
|
273 |
+
stage2_result.append(output_filename)
|
274 |
+
return stage2_result
|
275 |
+
|
276 |
+
# Main Gradio function
|
277 |
+
@spaces.GPU()
|
278 |
+
def generate_music(genres, lyrics_text, use_audio_prompt, audio_prompt, start_time, end_time, progress=gr.Progress()):
|
279 |
+
progress(0.1, "Running Stage 1 Generation...")
|
280 |
+
stage1_output_set, output_dir = stage1_generate(genres, lyrics_text, use_audio_prompt, audio_prompt, start_time, end_time)
|
281 |
+
|
282 |
+
progress(0.6, "Running Stage 2 Refinement...")
|
283 |
+
stage2_result = stage2_inference(stage2_model, stage1_output_set, output_dir)
|
284 |
+
|
285 |
+
progress(0.8, "Processing Audio...")
|
286 |
+
vocal_decoder, inst_decoder = build_codec_model('./xcodec_mini_infer/decoders/config.yaml', './xcodec_mini_infer/decoders/decoder_131000.pth', './xcodec_mini_infer/decoders/decoder_151000.pth')
|
287 |
+
vocoder_output_dir = os.path.join(output_dir, "vocoder")
|
288 |
+
os.makedirs(vocoder_output_dir, exist_ok=True)
|
289 |
+
|
290 |
+
for npy in stage2_result:
|
291 |
+
if 'instrumental' in npy:
|
292 |
+
process_audio(npy, os.path.join(vocoder_output_dir, 'instrumental.mp3'), False, None, inst_decoder, codec_model)
|
293 |
+
else:
|
294 |
+
process_audio(npy, os.path.join(vocoder_output_dir, 'vocal.mp3'), False, None, vocal_decoder, codec_model)
|
295 |
|
296 |
+
return [
|
297 |
+
os.path.join(vocoder_output_dir, 'instrumental.mp3'),
|
298 |
+
os.path.join(vocoder_output_dir, 'vocal.mp3')
|
299 |
+
]
|
300 |
|
301 |
+
# Gradio UI
|
302 |
+
with gr.Blocks(title="AI Music Generation") as demo:
|
303 |
+
gr.Markdown("# 🎵 AI Music Generation Pipeline")
|
304 |
+
|
305 |
+
with gr.Row():
|
306 |
+
with gr.Column():
|
307 |
+
genre_input = gr.Textbox(label="Genre Tags", placeholder="e.g., Pop, Happy, Female Vocal")
|
308 |
+
lyrics_input = gr.Textbox(label="Lyrics", lines=10, placeholder="Enter lyrics with segments...")
|
309 |
+
use_audio_prompt = gr.Checkbox(label="Use Audio Prompt")
|
310 |
+
audio_input = gr.Audio(label="Reference Audio", type="filepath", visible=False)
|
311 |
+
start_time = gr.Number(label="Start Time (sec)", value=0.0, visible=False)
|
312 |
+
end_time = gr.Number(label="End Time (sec)", value=30.0, visible=False)
|
313 |
+
|
314 |
+
generate_btn = gr.Button("Generate Music", variant="primary")
|
315 |
+
|
316 |
+
with gr.Column():
|
317 |
+
vocal_output = gr.Audio(label="Vocal Track", interactive=False)
|
318 |
+
inst_output = gr.Audio(label="Instrumental Track", interactive=False)
|
319 |
|
320 |
+
use_audio_prompt.change(
|
321 |
+
lambda x: [gr.update(visible=x), gr.update(visible=x), gr.update(visible=x)],
|
322 |
+
inputs=use_audio_prompt,
|
323 |
+
outputs=[audio_input, start_time, end_time]
|
324 |
+
)
|
325 |
|
326 |
+
generate_btn.click(
|
327 |
+
generate_music,
|
328 |
+
inputs=[genre_input, lyrics_input, use_audio_prompt, audio_input, start_time, end_time],
|
329 |
+
outputs=[vocal_output, inst_output]
|
330 |
+
)
|
331 |
|
332 |
if __name__ == "__main__":
|
333 |
+
demo.launch()
|