Spaces:
Sleeping
Sleeping
j
commited on
Commit
•
7b539b5
1
Parent(s):
62d3af7
initial commit
Browse files- app.py +125 -0
- requirements.txt +10 -0
app.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
print(sys.path)
|
3 |
+
sys.path.append('/home/user/audio_ai/diffusers_harp/venv/src')
|
4 |
+
|
5 |
+
from pyharp import ModelCard, build_endpoint, save_and_return_filepath
|
6 |
+
|
7 |
+
from audiotools import AudioSignal
|
8 |
+
import scipy
|
9 |
+
import torch
|
10 |
+
import gradio as gr
|
11 |
+
from diffusers import AudioLDM2Pipeline
|
12 |
+
import subprocess as sp
|
13 |
+
|
14 |
+
|
15 |
+
#harp_deps = [
|
16 |
+
#"descript-audiotools"]
|
17 |
+
#
|
18 |
+
#try:
|
19 |
+
# from pyharp import ModelCard, build_endpoint, save_and_return_filepath
|
20 |
+
#except ImportError:
|
21 |
+
# print("Installing harp dependencies...")
|
22 |
+
# sp.check_call(["pip", "install", *harp_deps])
|
23 |
+
# sp.check_call(["pip", "install", "-e git+https://github.com/audacitorch/pyharp.git#egg=pyharp"])
|
24 |
+
# sp.check_call(["pip", "install", "pydantic<2.0.0"])
|
25 |
+
# from pyharp import ModelCard, build_endpoint, save_and_return_filepath
|
26 |
+
|
27 |
+
# Create a Model Card
|
28 |
+
card = ModelCard(
|
29 |
+
name='Diffusers AudioLDM2 Style Transfer',
|
30 |
+
description='AudioLDM2 style transfer, operates on region selected in track.',
|
31 |
+
author='Team Audio',
|
32 |
+
tags=['AudioLDM', 'Diffusers', 'Style Transfer']
|
33 |
+
)
|
34 |
+
|
35 |
+
# Load the model
|
36 |
+
repo_id = "cvssp/audioldm2"
|
37 |
+
pipe = AudioLDM2Pipeline.from_pretrained(repo_id, torch_dtype=torch.float16)
|
38 |
+
pipe = pipe.to("cuda")
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
def process_fn(input_audio_path, prompt, negative_prompt, seed, num_inference_steps, audio_length_in_s, num_waveforms_per_prompt):
|
43 |
+
"""
|
44 |
+
This function defines the audio processing steps
|
45 |
+
|
46 |
+
Args:
|
47 |
+
input_audio_path (str): the audio filepath to be processed.
|
48 |
+
|
49 |
+
<YOUR_KWARGS>: additional keyword arguments necessary for processing.
|
50 |
+
NOTE: These should correspond to and match order of UI elements defined below.
|
51 |
+
|
52 |
+
Returns:
|
53 |
+
output_audio_path (str): the filepath of the processed audio.
|
54 |
+
"""
|
55 |
+
|
56 |
+
sig = AudioSignal(input_audio_path)
|
57 |
+
outfile = "./output.wav"
|
58 |
+
|
59 |
+
#prompt = "The sound of a hammer hitting a wooden surface."
|
60 |
+
#negative_prompt = "Low quality."
|
61 |
+
|
62 |
+
# set the seed for generator
|
63 |
+
generator = torch.Generator("cuda").manual_seed(int(seed))
|
64 |
+
|
65 |
+
audio = pipe(
|
66 |
+
prompt,
|
67 |
+
negative_prompt=negative_prompt,
|
68 |
+
num_inference_steps=num_inference_steps,
|
69 |
+
audio_length_in_s=audio_length_in_s,
|
70 |
+
num_waveforms_per_prompt=num_waveforms_per_prompt,
|
71 |
+
generator=generator,
|
72 |
+
).audios
|
73 |
+
|
74 |
+
scipy.io.wavfile.write(outfile, rate=16000, data=audio[0])
|
75 |
+
return outfile
|
76 |
+
|
77 |
+
|
78 |
+
# Build the endpoint
|
79 |
+
with gr.Blocks() as webapp:
|
80 |
+
# Define your Gradio interface
|
81 |
+
inputs = [
|
82 |
+
gr.Audio(
|
83 |
+
label="Audio Input",
|
84 |
+
type="filepath"
|
85 |
+
),
|
86 |
+
gr.Text(
|
87 |
+
label="Prompt",
|
88 |
+
interactive=True
|
89 |
+
),
|
90 |
+
gr.Text(
|
91 |
+
label="Negative Prompt",
|
92 |
+
interactive=True
|
93 |
+
),
|
94 |
+
gr.Slider(
|
95 |
+
label="seed",
|
96 |
+
minimum="0",
|
97 |
+
maximum="65535",
|
98 |
+
value="0",
|
99 |
+
step="1"
|
100 |
+
),
|
101 |
+
gr.Slider(
|
102 |
+
minimum=1, maximum=500,
|
103 |
+
step=1, value=1,
|
104 |
+
label="Inference Steps"
|
105 |
+
),
|
106 |
+
gr.Slider(
|
107 |
+
minimum=2.5, maximum=10.0,
|
108 |
+
step=2.5, value=2.5,
|
109 |
+
label="Duration"
|
110 |
+
),
|
111 |
+
gr.Slider(
|
112 |
+
minimum=1, maximum=10,
|
113 |
+
step=1, value=1,
|
114 |
+
label="Waveforms Per Prompt"
|
115 |
+
),
|
116 |
+
]
|
117 |
+
|
118 |
+
# make an output audio widget
|
119 |
+
output = gr.Audio(label="Audio Output", type="filepath")
|
120 |
+
|
121 |
+
# Build the endpoint
|
122 |
+
ctrls_data, ctrls_button, process_button, cancel_button = build_endpoint(inputs, output, process_fn, card)
|
123 |
+
|
124 |
+
#webapp.queue()
|
125 |
+
webapp.launch(share=True)
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-e git+https://github.com/audacitorch/pyharp.git#egg=pyharp
|
2 |
+
descript-audiotools
|
3 |
+
diffusers
|
4 |
+
torch
|
5 |
+
scipy
|
6 |
+
transformers
|
7 |
+
accelerate
|
8 |
+
datetime
|
9 |
+
numpy
|
10 |
+
gradio
|