artificialguybr commited on
Commit
a17cbc4
1 Parent(s): 456ed62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -5
app.py CHANGED
@@ -1,9 +1,10 @@
1
  import torch
2
  import torchaudio
3
  from einops import rearrange
4
-
5
  import gradio as gr
6
  import spaces
 
 
7
 
8
  # Importing the model-related functions
9
  from stable_audio_tools import get_pretrained_model
@@ -13,9 +14,14 @@ from stable_audio_tools.inference.generation import generate_diffusion_cond
13
  @spaces.GPU(duration=120) # Allocate GPU only when this function is called
14
  def generate_audio(prompt, seconds_total=30, steps=100, cfg_scale=7):
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
16
 
17
  # Download and set up the model
18
- model, model_config = get_pretrained_model("stabilityai/stable-audio-open-1.0")
19
  sample_rate = model_config["sample_rate"]
20
  sample_size = model_config["sample_size"]
21
 
@@ -46,12 +52,14 @@ def generate_audio(prompt, seconds_total=30, steps=100, cfg_scale=7):
46
 
47
  # Peak normalize, clip, convert to int16
48
  output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
49
-
 
 
50
  # Save to file
51
- torchaudio.save("output.wav", output, sample_rate)
52
 
53
  # Return the path to the generated audio file
54
- return "output.wav"
55
 
56
  # Setting up the Gradio Interface
57
  interface = gr.Interface(
 
1
  import torch
2
  import torchaudio
3
  from einops import rearrange
 
4
  import gradio as gr
5
  import spaces
6
+ import os
7
+ import uuid
8
 
9
  # Importing the model-related functions
10
  from stable_audio_tools import get_pretrained_model
 
14
  @spaces.GPU(duration=120) # Allocate GPU only when this function is called
15
  def generate_audio(prompt, seconds_total=30, steps=100, cfg_scale=7):
16
  device = "cuda" if torch.cuda.is_available() else "cpu"
17
+
18
+ # Fetch the Hugging Face token from the environment variable
19
+ hf_token = os.getenv('HF_TOKEN')
20
+ if not hf_token:
21
+ raise EnvironmentError("HF_TOKEN environment variable not set")
22
 
23
  # Download and set up the model
24
+ model, model_config = get_pretrained_model("stabilityai/stable-audio-open-1.0", use_auth_token=hf_token)
25
  sample_rate = model_config["sample_rate"]
26
  sample_size = model_config["sample_size"]
27
 
 
52
 
53
  # Peak normalize, clip, convert to int16
54
  output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
55
+
56
+ # Generate a unique filename for the output
57
+ unique_filename = f"output_{uuid.uuid4().hex}.wav"
58
  # Save to file
59
+ torchaudio.save(unique_filename, output, sample_rate)
60
 
61
  # Return the path to the generated audio file
62
+ return unique_filename
63
 
64
  # Setting up the Gradio Interface
65
  interface = gr.Interface(