reach-vb HF staff commited on
Commit
2268e8d
ยท
1 Parent(s): e5c03a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -2,6 +2,7 @@ import torch
2
 
3
  import numpy as np
4
  import gradio as gr
 
5
 
6
  from transformers import pipeline
7
  from huggingface_hub import InferenceClient
@@ -30,9 +31,11 @@ def generate_audio(text,):
30
  prompt = f"Take the next sentence and enrich it with details. Keep it compact. {text}"
31
  output = client.text_generation(prompt, max_new_tokens=100)
32
  out = vibes(output)
33
- np_audio = out["audio"][0]
 
 
34
 
35
- return np_audio
36
 
37
  css = """
38
  #container{
@@ -56,7 +59,7 @@ with gr.Blocks(css=css) as demo_blocks:
56
  btn = gr.Button("Generate Music!๐ŸŽถ")
57
 
58
  with gr.Column():
59
- out = gr.Audio(type="numpy", autoplay=False, label=f"Generated Music", show_label=True,)
60
 
61
 
62
  with gr.Accordion("Run MusicGen with Transformers ๐Ÿค—", open=False):
 
2
 
3
  import numpy as np
4
  import gradio as gr
5
+ import soundfile as sf
6
 
7
  from transformers import pipeline
8
  from huggingface_hub import InferenceClient
 
31
  prompt = f"Take the next sentence and enrich it with details. Keep it compact. {text}"
32
  output = client.text_generation(prompt, max_new_tokens=100)
33
  out = vibes(output)
34
+
35
+ with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
36
+ sf.write(f.name, out["audio"][0].T, out["sampling_rate"])
37
 
38
+ return f.name
39
 
40
  css = """
41
  #container{
 
59
  btn = gr.Button("Generate Music!๐ŸŽถ")
60
 
61
  with gr.Column():
62
+ out = gr.Audio(autoplay=False, label=f"Generated Music", show_label=True,)
63
 
64
 
65
  with gr.Accordion("Run MusicGen with Transformers ๐Ÿค—", open=False):