TheStinger commited on
Commit
90770b9
1 Parent(s): 15e264a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +552 -23
app.py CHANGED
@@ -1,26 +1,555 @@
1
  import os
2
- import spaces
3
- import gradio as gr
4
  from scipy.io.wavfile import write
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- @spaces.GPU()
7
- def inference(audio):
8
- os.makedirs("out", exist_ok=True)
9
- write('test.wav', audio[0], audio[1])
10
- os.system("python3 -m demucs.separate -n htdemucs --two-stems=vocals -d cpu test.wav -o out")
11
- return "./out/htdemucs/test/vocals.wav","./out/htdemucs/test/no_vocals.wav"
12
-
13
- title = "Ilaria UVR 💖"
14
- description = "Drag and drop an audio file to easily separate it! [Join AI Hub Discord Server](https://discord.gg/aihub).</p>"
15
- article = "Made with 💖 by Ilaria"
16
-
17
- examples=[['test.mp3']]
18
- gr.Interface(
19
- inference,
20
- gr.Audio(type="numpy", label="Song"),
21
- [gr.Audio(type="filepath", label="Vocals"),gr.Audio(type="filepath", label="Instrumentals")],
22
- title=title,
23
- description=description,
24
- article=article,
25
- examples=examples
26
- ).launch()
 
1
  import os
2
+ import re
3
+ import random
4
  from scipy.io.wavfile import write
5
+ import gradio as gr
6
+ import spaces
7
+
8
+ roformer_models = {
9
+ 'BS-Roformer-Viperx-1297.ckpt': 'model_bs_roformer_ep_317_sdr_12.9755.ckpt',
10
+ 'BS-Roformer-Viperx-1296.ckpt': 'model_bs_roformer_ep_368_sdr_12.9628.ckpt',
11
+ 'BS-Roformer-Viperx-1053.ckpt': 'model_bs_roformer_ep_937_sdr_10.5309.ckpt',
12
+ 'Mel-Roformer-Viperx-1143.ckpt': 'model_mel_band_roformer_ep_3005_sdr_11.4360.ckpt'
13
+ }
14
+
15
+ mdx23c_models = [
16
+ 'MDX23C_D1581.ckpt',
17
+ 'MDX23C-8KFFT-InstVoc_HQ.ckpt',
18
+ 'MDX23C-8KFFT-InstVoc_HQ_2.ckpt',
19
+ ]
20
+
21
+ mdxnet_models = [
22
+ 'UVR-MDX-NET-Inst_full_292.onnx',
23
+ 'UVR-MDX-NET_Inst_187_beta.onnx',
24
+ 'UVR-MDX-NET_Inst_82_beta.onnx',
25
+ 'UVR-MDX-NET_Inst_90_beta.onnx',
26
+ 'UVR-MDX-NET_Main_340.onnx',
27
+ 'UVR-MDX-NET_Main_390.onnx',
28
+ 'UVR-MDX-NET_Main_406.onnx',
29
+ 'UVR-MDX-NET_Main_427.onnx',
30
+ 'UVR-MDX-NET_Main_438.onnx',
31
+ 'UVR-MDX-NET-Inst_HQ_1.onnx',
32
+ 'UVR-MDX-NET-Inst_HQ_2.onnx',
33
+ 'UVR-MDX-NET-Inst_HQ_3.onnx',
34
+ 'UVR-MDX-NET-Inst_HQ_4.onnx',
35
+ 'UVR_MDXNET_Main.onnx',
36
+ 'UVR-MDX-NET-Inst_Main.onnx',
37
+ 'UVR_MDXNET_1_9703.onnx',
38
+ 'UVR_MDXNET_2_9682.onnx',
39
+ 'UVR_MDXNET_3_9662.onnx',
40
+ 'UVR-MDX-NET-Inst_1.onnx',
41
+ 'UVR-MDX-NET-Inst_2.onnx',
42
+ 'UVR-MDX-NET-Inst_3.onnx',
43
+ 'UVR_MDXNET_KARA.onnx',
44
+ 'UVR_MDXNET_KARA_2.onnx',
45
+ 'UVR_MDXNET_9482.onnx',
46
+ 'UVR-MDX-NET-Voc_FT.onnx',
47
+ 'Kim_Vocal_1.onnx',
48
+ 'Kim_Vocal_2.onnx',
49
+ 'Kim_Inst.onnx',
50
+ 'Reverb_HQ_By_FoxJoy.onnx',
51
+ 'UVR-MDX-NET_Crowd_HQ_1.onnx',
52
+ 'kuielab_a_vocals.onnx',
53
+ 'kuielab_a_other.onnx',
54
+ 'kuielab_a_bass.onnx',
55
+ 'kuielab_a_drums.onnx',
56
+ 'kuielab_b_vocals.onnx',
57
+ 'kuielab_b_other.onnx',
58
+ 'kuielab_b_bass.onnx',
59
+ 'kuielab_b_drums.onnx',
60
+ ]
61
+
62
+ vrarch_models = [
63
+ '1_HP-UVR.pth',
64
+ '2_HP-UVR.pth',
65
+ '3_HP-Vocal-UVR.pth',
66
+ '4_HP-Vocal-UVR.pth',
67
+ '5_HP-Karaoke-UVR.pth',
68
+ '6_HP-Karaoke-UVR.pth',
69
+ '7_HP2-UVR.pth',
70
+ '8_HP2-UVR.pth',
71
+ '9_HP2-UVR.pth',
72
+ '10_SP-UVR-2B-32000-1.pth',
73
+ '11_SP-UVR-2B-32000-2.pth',
74
+ '12_SP-UVR-3B-44100.pth',
75
+ '13_SP-UVR-4B-44100-1.pth',
76
+ '14_SP-UVR-4B-44100-2.pth',
77
+ '15_SP-UVR-MID-44100-1.pth',
78
+ '16_SP-UVR-MID-44100-2.pth',
79
+ '17_HP-Wind_Inst-UVR.pth',
80
+ 'UVR-De-Echo-Aggressive.pth',
81
+ 'UVR-De-Echo-Normal.pth',
82
+ 'UVR-DeEcho-DeReverb.pth',
83
+ 'UVR-DeNoise-Lite.pth',
84
+ 'UVR-DeNoise.pth',
85
+ 'UVR-BVE-4B_SN-44100-1.pth',
86
+ 'MGM_HIGHEND_v4.pth',
87
+ 'MGM_LOWEND_A_v4.pth',
88
+ 'MGM_LOWEND_B_v4.pth',
89
+ 'MGM_MAIN_v4.pth',
90
+ ]
91
+
92
+ demucs_models = [
93
+ 'htdemucs_ft.yaml',
94
+ 'htdemucs.yaml',
95
+ 'hdemucs_mmi.yaml',
96
+ ]
97
+
98
+ output_format = [
99
+ 'wav',
100
+ 'flac',
101
+ 'mp3',
102
+ ]
103
+
104
+ mdxnet_overlap_values = [
105
+ '0.25',
106
+ '0.5',
107
+ '0.75',
108
+ '0.99',
109
+ ]
110
+
111
+ vrarch_window_size_values = [
112
+ '320',
113
+ '512',
114
+ '1024',
115
+ ]
116
+
117
+ demucs_overlap_values = [
118
+ '0.25',
119
+ '0.50',
120
+ '0.75',
121
+ '0.99',
122
+ ]
123
+
124
+ @spaces.GPU(duration=300)
125
+ def roformer_separator(roformer_audio, roformer_model, roformer_output_format, roformer_overlap):
126
+ files_list = []
127
+ files_list.clear()
128
+ directory = "./outputs"
129
+ random_id = str(random.randint(10000, 99999))
130
+ pattern = f"{random_id}"
131
+ os.makedirs("outputs", exist_ok=True)
132
+ write(f'{random_id}.wav', roformer_audio[0], roformer_audio[1])
133
+ full_roformer_model = roformer_models[roformer_model]
134
+ prompt = f"audio-separator {random_id}.wav --model_filename {full_roformer_model} --output_dir=./outputs --output_format={roformer_output_format} --normalization=0.9 --mdxc_overlap={roformer_overlap}"
135
+ os.system(prompt)
136
+
137
+ for file in os.listdir(directory):
138
+ if re.search(pattern, file):
139
+ files_list.append(os.path.join(directory, file))
140
+
141
+ stem1_file = files_list[0]
142
+ stem2_file = files_list[1]
143
+
144
+ return stem1_file, stem2_file
145
+
146
+ @spaces.GPU(duration=300)
147
+ def mdxc_separator(mdx23c_audio, mdx23c_model, mdx23c_output_format, mdx23c_segment_size, mdx23c_overlap):
148
+ files_list = []
149
+ files_list.clear()
150
+ directory = "./outputs"
151
+ random_id = str(random.randint(10000, 99999))
152
+ pattern = f"{random_id}"
153
+ os.makedirs("outputs", exist_ok=True)
154
+ write(f'{random_id}.wav', mdx23c_audio[0], mdx23c_audio[1])
155
+ prompt = f"audio-separator {random_id}.wav --model_filename {mdx23c_model} --output_dir=./outputs --output_format={mdx23c_output_format} --normalization=0.9 --mdxc_segment_size={mdx23c_segment_size} --mdxc_overlap={mdx23c_overlap}"
156
+ os.system(prompt)
157
+
158
+ for file in os.listdir(directory):
159
+ if re.search(pattern, file):
160
+ files_list.append(os.path.join(directory, file))
161
+
162
+ stem1_file = files_list[0]
163
+ stem2_file = files_list[1]
164
+
165
+ return stem1_file, stem2_file
166
+
167
+ @spaces.GPU(duration=300)
168
+ def mdxnet_separator(mdxnet_audio, mdxnet_model, mdxnet_output_format, mdxnet_segment_size, mdxnet_overlap, mdxnet_denoise):
169
+ files_list = []
170
+ files_list.clear()
171
+ directory = "./outputs"
172
+ random_id = str(random.randint(10000, 99999))
173
+ pattern = f"{random_id}"
174
+ os.makedirs("outputs", exist_ok=True)
175
+ write(f'{random_id}.wav', mdxnet_audio[0], mdxnet_audio[1])
176
+ prompt = f"audio-separator {random_id}.wav --model_filename {mdxnet_model} --output_dir=./outputs --output_format={mdxnet_output_format} --normalization=0.9 --mdx_segment_size={mdxnet_segment_size} --mdx_overlap={mdxnet_overlap}"
177
+
178
+ if mdxnet_denoise:
179
+ prompt += " --mdx_enable_denoise"
180
+
181
+ os.system(prompt)
182
+
183
+ for file in os.listdir(directory):
184
+ if re.search(pattern, file):
185
+ files_list.append(os.path.join(directory, file))
186
+
187
+ stem1_file = files_list[0]
188
+ stem2_file = files_list[1]
189
+
190
+ return stem1_file, stem2_file
191
+
192
+ @spaces.GPU(duration=300)
193
+ def vrarch_separator(vrarch_audio, vrarch_model, vrarch_output_format, vrarch_window_size, vrarch_agression, vrarch_tta, vrarch_high_end_process):
194
+ files_list = []
195
+ files_list.clear()
196
+ directory = "./outputs"
197
+ random_id = str(random.randint(10000, 99999))
198
+ pattern = f"{random_id}"
199
+ os.makedirs("outputs", exist_ok=True)
200
+ write(f'{random_id}.wav', vrarch_audio[0], vrarch_audio[1])
201
+ prompt = f"audio-separator {random_id}.wav --model_filename {vrarch_model} --output_dir=./outputs --output_format={vrarch_output_format} --normalization=0.9 --vr_window_size={vrarch_window_size} --vr_aggression={vrarch_agression}"
202
+
203
+ if vrarch_tta:
204
+ prompt += " --vr_enable_tta"
205
+ if vrarch_high_end_process:
206
+ prompt += " --vr_high_end_process"
207
+
208
+ os.system(prompt)
209
+
210
+ for file in os.listdir(directory):
211
+ if re.search(pattern, file):
212
+ files_list.append(os.path.join(directory, file))
213
+
214
+ stem1_file = files_list[0]
215
+ stem2_file = files_list[1]
216
+
217
+ return stem1_file, stem2_file
218
+
219
+ @spaces.GPU(duration=300)
220
+ def demucs_separator(demucs_audio, demucs_model, demucs_output_format, demucs_shifts, demucs_overlap):
221
+ files_list = []
222
+ files_list.clear()
223
+ directory = "./outputs"
224
+ random_id = str(random.randint(10000, 99999))
225
+ pattern = f"{random_id}"
226
+ os.makedirs("outputs", exist_ok=True)
227
+ write(f'{random_id}.wav', demucs_audio[0], demucs_audio[1])
228
+ prompt = f"audio-separator {random_id}.wav --model_filename {demucs_model} --output_dir=./outputs --output_format={demucs_output_format} --normalization=0.9 --demucs_shifts={demucs_shifts} --demucs_overlap={demucs_overlap}"
229
+
230
+ os.system(prompt)
231
+
232
+ for file in os.listdir(directory):
233
+ if re.search(pattern, file):
234
+ files_list.append(os.path.join(directory, file))
235
+
236
+ stem1_file = files_list[0]
237
+ stem2_file = files_list[1]
238
+ stem3_file = files_list[2]
239
+ stem4_file = files_list[3]
240
+
241
+ return stem1_file, stem2_file, stem3_file, stem4_file
242
+
243
+ with gr.Blocks(theme="NoCrypt/miku@1.2.2", title="🎵 UVR5 UI 🎵") as app:
244
+ gr.Markdown("<h1> 🎵 UVR5 UI 🎵 </h1>")
245
+ gr.Markdown("If you liked this HF Space you can give me a ❤️")
246
+ gr.Markdown("Try UVR5 UI using Colab [here](https://colab.research.google.com/github/Eddycrack864/UVR5-UI/blob/main/UVR_UI.ipynb)")
247
+ with gr.Tabs():
248
+ with gr.TabItem("BS/Mel Roformer"):
249
+ with gr.Row():
250
+ roformer_model = gr.Dropdown(
251
+ label = "Select the Model",
252
+ choices=list(roformer_models.keys()),
253
+ interactive = True
254
+ )
255
+ roformer_output_format = gr.Dropdown(
256
+ label = "Select the Output Format",
257
+ choices = output_format,
258
+ interactive = True
259
+ )
260
+ with gr.Row():
261
+ roformer_overlap = gr.Slider(
262
+ minimum = 2,
263
+ maximum = 4,
264
+ step = 1,
265
+ label = "Overlap",
266
+ info = "Amount of overlap between prediction windows.",
267
+ value = 4,
268
+ interactive = True
269
+ )
270
+ with gr.Row():
271
+ roformer_audio = gr.Audio(
272
+ label = "Input Audio",
273
+ type = "numpy",
274
+ interactive = True
275
+ )
276
+ with gr.Row():
277
+ roformer_button = gr.Button("Separate!", variant = "primary")
278
+ with gr.Row():
279
+ roformer_stem1 = gr.Audio(
280
+ show_download_button = True,
281
+ interactive = False,
282
+ label = "Stem 1",
283
+ type = "filepath"
284
+ )
285
+ roformer_stem2 = gr.Audio(
286
+ show_download_button = True,
287
+ interactive = False,
288
+ label = "Stem 2",
289
+ type = "filepath"
290
+ )
291
+
292
+ roformer_button.click(roformer_separator, [roformer_audio, roformer_model, roformer_output_format, roformer_overlap], [roformer_stem1, roformer_stem2])
293
+
294
+ with gr.TabItem("MDX23C"):
295
+ with gr.Row():
296
+ mdx23c_model = gr.Dropdown(
297
+ label = "Select the Model",
298
+ choices = mdx23c_models,
299
+ interactive = True
300
+ )
301
+ mdx23c_output_format = gr.Dropdown(
302
+ label = "Select the Output Format",
303
+ choices = output_format,
304
+ interactive = True
305
+ )
306
+ with gr.Row():
307
+ mdx23c_segment_size = gr.Slider(
308
+ minimum = 32,
309
+ maximum = 4000,
310
+ step = 32,
311
+ label = "Segment Size",
312
+ info = "Larger consumes more resources, but may give better results.",
313
+ value = 256,
314
+ interactive = True
315
+ )
316
+ mdx23c_overlap = gr.Slider(
317
+ minimum = 2,
318
+ maximum = 50,
319
+ step = 1,
320
+ label = "Overlap",
321
+ info = "Amount of overlap between prediction windows.",
322
+ value = 8,
323
+ interactive = True
324
+ )
325
+ with gr.Row():
326
+ mdx23c_audio = gr.Audio(
327
+ label = "Input Audio",
328
+ type = "numpy",
329
+ interactive = True
330
+ )
331
+ with gr.Row():
332
+ mdx23c_button = gr.Button("Separate!", variant = "primary")
333
+ with gr.Row():
334
+ mdx23c_stem1 = gr.Audio(
335
+ show_download_button = True,
336
+ interactive = False,
337
+ label = "Stem 1",
338
+ type = "filepath"
339
+ )
340
+ mdx23c_stem2 = gr.Audio(
341
+ show_download_button = True,
342
+ interactive = False,
343
+ label = "Stem 2",
344
+ type = "filepath"
345
+ )
346
+
347
+ mdx23c_button.click(mdxc_separator, [mdx23c_audio, mdx23c_model, mdx23c_output_format, mdx23c_segment_size, mdx23c_overlap], [mdx23c_stem1, mdx23c_stem2])
348
+
349
+ with gr.TabItem("MDX-NET"):
350
+ with gr.Row():
351
+ mdxnet_model = gr.Dropdown(
352
+ label = "Select the Model",
353
+ choices = mdxnet_models,
354
+ interactive = True
355
+ )
356
+ mdxnet_output_format = gr.Dropdown(
357
+ label = "Select the Output Format",
358
+ choices = output_format,
359
+ interactive = True
360
+ )
361
+ with gr.Row():
362
+ mdxnet_segment_size = gr.Slider(
363
+ minimum = 32,
364
+ maximum = 4000,
365
+ step = 32,
366
+ label = "Segment Size",
367
+ info = "Larger consumes more resources, but may give better results.",
368
+ value = 256,
369
+ interactive = True
370
+ )
371
+ mdxnet_overlap = gr.Dropdown(
372
+ label = "Overlap",
373
+ choices = mdxnet_overlap_values,
374
+ value = mdxnet_overlap_values[0],
375
+ interactive = True
376
+ )
377
+ mdxnet_denoise = gr.Checkbox(
378
+ label = "Denoise",
379
+ info = "Enable denoising during separation.",
380
+ value = True,
381
+ interactive = True
382
+ )
383
+ with gr.Row():
384
+ mdxnet_audio = gr.Audio(
385
+ label = "Input Audio",
386
+ type = "numpy",
387
+ interactive = True
388
+ )
389
+ with gr.Row():
390
+ mdxnet_button = gr.Button("Separate!", variant = "primary")
391
+ with gr.Row():
392
+ mdxnet_stem1 = gr.Audio(
393
+ show_download_button = True,
394
+ interactive = False,
395
+ label = "Stem 1",
396
+ type = "filepath"
397
+ )
398
+ mdxnet_stem2 = gr.Audio(
399
+ show_download_button = True,
400
+ interactive = False,
401
+ label = "Stem 2",
402
+ type = "filepath"
403
+ )
404
+
405
+ mdxnet_button.click(mdxnet_separator, [mdxnet_audio, mdxnet_model, mdxnet_output_format, mdxnet_segment_size, mdxnet_overlap, mdxnet_denoise], [mdxnet_stem1, mdxnet_stem2])
406
+
407
+ with gr.TabItem("VR ARCH"):
408
+ with gr.Row():
409
+ vrarch_model = gr.Dropdown(
410
+ label = "Select the Model",
411
+ choices = vrarch_models,
412
+ interactive = True
413
+ )
414
+ vrarch_output_format = gr.Dropdown(
415
+ label = "Select the Output Format",
416
+ choices = output_format,
417
+ interactive = True
418
+ )
419
+ with gr.Row():
420
+ vrarch_window_size = gr.Dropdown(
421
+ label = "Window Size",
422
+ choices = vrarch_window_size_values,
423
+ value = vrarch_window_size_values[0],
424
+ interactive = True
425
+ )
426
+ vrarch_agression = gr.Slider(
427
+ minimum = 1,
428
+ maximum = 50,
429
+ step = 1,
430
+ label = "Agression",
431
+ info = "Intensity of primary stem extraction.",
432
+ value = 5,
433
+ interactive = True
434
+ )
435
+ vrarch_tta = gr.Checkbox(
436
+ label = "TTA",
437
+ info = "Enable Test-Time-Augmentation; slow but improves quality.",
438
+ value = True,
439
+ visible = True,
440
+ interactive = True,
441
+ )
442
+ vrarch_high_end_process = gr.Checkbox(
443
+ label = "High End Process",
444
+ info = "Mirror the missing frequency range of the output.",
445
+ value = False,
446
+ visible = True,
447
+ interactive = True,
448
+ )
449
+ with gr.Row():
450
+ vrarch_audio = gr.Audio(
451
+ label = "Input Audio",
452
+ type = "numpy",
453
+ interactive = True
454
+ )
455
+ with gr.Row():
456
+ vrarch_button = gr.Button("Separate!", variant = "primary")
457
+ with gr.Row():
458
+ vrarch_stem1 = gr.Audio(
459
+ show_download_button = True,
460
+ interactive = False,
461
+ type = "filepath",
462
+ label = "Stem 1"
463
+ )
464
+ vrarch_stem2 = gr.Audio(
465
+ show_download_button = True,
466
+ interactive = False,
467
+ type = "filepath",
468
+ label = "Stem 2"
469
+ )
470
+
471
+ vrarch_button.click(vrarch_separator, [vrarch_audio, vrarch_model, vrarch_output_format, vrarch_window_size, vrarch_agression, vrarch_tta, vrarch_high_end_process], [vrarch_stem1, vrarch_stem2])
472
+
473
+ with gr.TabItem("Demucs"):
474
+ with gr.Row():
475
+ demucs_model = gr.Dropdown(
476
+ label = "Select the Model",
477
+ choices = demucs_models,
478
+ interactive = True
479
+ )
480
+ demucs_output_format = gr.Dropdown(
481
+ label = "Select the Output Format",
482
+ choices = output_format,
483
+ interactive = True
484
+ )
485
+ with gr.Row():
486
+ demucs_shifts = gr.Slider(
487
+ minimum = 1,
488
+ maximum = 20,
489
+ step = 1,
490
+ label = "Shifts",
491
+ info = "Number of predictions with random shifts, higher = slower but better quality.",
492
+ value = 2,
493
+ interactive = True
494
+ )
495
+ demucs_overlap = gr.Dropdown(
496
+ label = "Overlap",
497
+ choices = demucs_overlap_values,
498
+ value = demucs_overlap_values[0],
499
+ interactive = True
500
+ )
501
+ with gr.Row():
502
+ demucs_audio = gr.Audio(
503
+ label = "Input Audio",
504
+ type = "numpy",
505
+ interactive = True
506
+ )
507
+ with gr.Row():
508
+ demucs_button = gr.Button("Separate!", variant = "primary")
509
+ with gr.Row():
510
+ demucs_stem1 = gr.Audio(
511
+ show_download_button = True,
512
+ interactive = False,
513
+ type = "filepath",
514
+ label = "Stem 1"
515
+ )
516
+ demucs_stem2 = gr.Audio(
517
+ show_download_button = True,
518
+ interactive = False,
519
+ type = "filepath",
520
+ label = "Stem 2"
521
+ )
522
+ with gr.Row():
523
+ demucs_stem3 = gr.Audio(
524
+ show_download_button = True,
525
+ interactive = False,
526
+ type = "filepath",
527
+ label = "Stem 3"
528
+ )
529
+ demucs_stem4 = gr.Audio(
530
+ show_download_button = True,
531
+ interactive = False,
532
+ type = "filepath",
533
+ label = "Stem 4"
534
+ )
535
+
536
+ demucs_button.click(demucs_separator, [demucs_audio, demucs_model, demucs_output_format, demucs_shifts, demucs_overlap], [demucs_stem1, demucs_stem2, demucs_stem3, demucs_stem4])
537
+
538
+ with gr.TabItem("Credits"):
539
+ gr.Markdown(
540
+ """
541
+ UVR5 UI created by **[Eddycrack 864](https://github.com/Eddycrack864).** Join **[AI HUB](https://discord.gg/aihub)** community.
542
+
543
+ * python-audio-separator by [beveradb](https://github.com/beveradb).
544
+ * Special thanks to [Ilaria](https://github.com/TheStingerX) for hosting this space and help.
545
+ * Thanks to [Mikus](https://github.com/cappuch) for the help with the code.
546
+ * Thanks to [Nick088](https://huggingface.co/Nick088) for the help to fix roformers.
547
+ * Improvements by [Blane187](https://github.com/Blane187).
548
+
549
+ You can donate to the original UVR5 project here:
550
+ [!["Buy Me A Coffee"](https://www.buymeacoffee.com/assets/img/custom_images/orange_img.png)](https://www.buymeacoffee.com/uvr5)
551
+ """
552
+ )
553
 
554
+ app.queue()
555
+ app.launch()