Spaces:
Sleeping
Sleeping
j
commited on
Commit
·
5cbf40c
1
Parent(s):
3fe568c
initial commit
Browse files- README.md +1 -1
- app.py +72 -0
- requirements.txt +3 -0
README.md
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: 💻
|
4 |
colorFrom: blue
|
5 |
colorTo: yellow
|
|
|
1 |
---
|
2 |
+
title: AudioLDM Variations HARP plugin
|
3 |
emoji: 💻
|
4 |
colorFrom: blue
|
5 |
colorTo: yellow
|
app.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pyharp import ModelCard, build_endpoint, save_and_return_filepath
|
2 |
+
from audiotools import AudioSignal
|
3 |
+
from audioldm import build_model, save_wave, text_to_audio
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
audioldm = build_model(model_name="audioldm-m-full")
|
7 |
+
|
8 |
+
def process_fn(input_audio_path, seed, guidance_scale, num_inference_steps, num_candidates, audio_length_in_s):
|
9 |
+
waveform = text_to_audio(
|
10 |
+
audioldm,
|
11 |
+
'placeholder',
|
12 |
+
input_audio_path,
|
13 |
+
seed = int(seed),
|
14 |
+
duration = audio_length_in_s,
|
15 |
+
guidance_scale = guidance_scale,
|
16 |
+
n_candidate_gen_per_text = int(num_candidates),
|
17 |
+
ddim_steps = int(num_inference_steps)
|
18 |
+
)
|
19 |
+
|
20 |
+
save_wave(waveform, "./", name="output.wav")
|
21 |
+
|
22 |
+
|
23 |
+
card = ModelCard(
|
24 |
+
name='AudioLDM Variations',
|
25 |
+
description='AudioLDM Variation Generator, operates on region selected in track.',
|
26 |
+
author='Team Audio',
|
27 |
+
tags=['AudioLDM', 'Variations', 'audio-to-audio']
|
28 |
+
)
|
29 |
+
|
30 |
+
with gr.Blocks() as webapp:
|
31 |
+
# Define your Gradio interface
|
32 |
+
inputs = [
|
33 |
+
gr.Audio(
|
34 |
+
label="Audio Input",
|
35 |
+
type="filepath"
|
36 |
+
),
|
37 |
+
gr.Slider(
|
38 |
+
label="seed",
|
39 |
+
minimum="0",
|
40 |
+
maximum="65535",
|
41 |
+
value="43534",
|
42 |
+
step="1"
|
43 |
+
),
|
44 |
+
gr.Slider(
|
45 |
+
minimum=0, maximum=10,
|
46 |
+
step=0.1, value=2.5,
|
47 |
+
label="Guidance Scale"
|
48 |
+
),
|
49 |
+
gr.Slider(
|
50 |
+
minimum=1, maximum=500,
|
51 |
+
step=1, value=200,
|
52 |
+
label="Inference Steps"
|
53 |
+
),
|
54 |
+
gr.Slider(
|
55 |
+
minimum=1, maximum=10,
|
56 |
+
step=1, value=1,
|
57 |
+
label="Candidates"
|
58 |
+
),
|
59 |
+
gr.Slider(
|
60 |
+
minimum=2.5, maximum=10.0,
|
61 |
+
step=2.5, value=5,
|
62 |
+
label="Duration"
|
63 |
+
),
|
64 |
+
]
|
65 |
+
|
66 |
+
output = gr.Audio(label="Audio Output", type="filepath")
|
67 |
+
|
68 |
+
ctrls_data, ctrls_button, process_button, cancel_button = build_endpoint(inputs, output, process_fn, card)
|
69 |
+
|
70 |
+
# queue the webapp: https://www.gradio.app/guides/setting-up-a-demo-for-maximum-performance
|
71 |
+
#webapp.queue()
|
72 |
+
webapp.launch(share=True)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
-e git+https://github.com/audacitorch/pyharp.git#egg=pyharp
|
2 |
+
descript-audiotools
|
3 |
+
audioldm
|