Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -9,6 +9,9 @@ from diffusers import DDPMScheduler
|
|
9 |
from pico_model import PicoDiffusion
|
10 |
from audioldm.variational_autoencoder.autoencoder import AutoencoderKL
|
11 |
from llm_preprocess import get_event, preprocess_gemini, preprocess_gpt
|
|
|
|
|
|
|
12 |
class dotdict(dict):
|
13 |
"""dot.notation access to dictionary attributes"""
|
14 |
__getattr__ = dict.get
|
@@ -35,6 +38,8 @@ class InferRunner:
|
|
35 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
36 |
runner = InferRunner(device)
|
37 |
event_list = get_event()
|
|
|
|
|
38 |
def infer(caption, num_steps=200, guidance_scale=3.0, audio_len=16000*10):
|
39 |
with torch.no_grad():
|
40 |
latents = runner.pico_model.demo_inference(caption, runner.scheduler, num_steps=num_steps, guidance_scale=guidance_scale, num_samples_per_prompt=1, disable_progress=True)
|
|
|
9 |
from pico_model import PicoDiffusion
|
10 |
from audioldm.variational_autoencoder.autoencoder import AutoencoderKL
|
11 |
from llm_preprocess import get_event, preprocess_gemini, preprocess_gpt
|
12 |
+
|
13 |
+
import spaces
|
14 |
+
|
15 |
class dotdict(dict):
|
16 |
"""dot.notation access to dictionary attributes"""
|
17 |
__getattr__ = dict.get
|
|
|
38 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
39 |
runner = InferRunner(device)
|
40 |
event_list = get_event()
|
41 |
+
|
42 |
+
@spaces.GPU(duration=240)
|
43 |
def infer(caption, num_steps=200, guidance_scale=3.0, audio_len=16000*10):
|
44 |
with torch.no_grad():
|
45 |
latents = runner.pico_model.demo_inference(caption, runner.scheduler, num_steps=num_steps, guidance_scale=guidance_scale, num_samples_per_prompt=1, disable_progress=True)
|