Spaces:
Running
on
Zero
Running
on
Zero
Commit
•
ea17fc9
1
Parent(s):
c5a477d
Update app.py
Browse files
app.py
CHANGED
@@ -9,17 +9,12 @@ from transformers import MusicgenForConditionalGeneration, MusicgenProcessor, se
|
|
9 |
from transformers.generation.streamers import BaseStreamer
|
10 |
|
11 |
import gradio as gr
|
|
|
12 |
|
13 |
|
14 |
model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
|
15 |
processor = MusicgenProcessor.from_pretrained("facebook/musicgen-small")
|
16 |
|
17 |
-
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
18 |
-
if device != model.device:
|
19 |
-
model.to(device)
|
20 |
-
if device == "cuda:0":
|
21 |
-
model.half()
|
22 |
-
|
23 |
title = "MusicGen Streaming"
|
24 |
|
25 |
description = """
|
@@ -49,7 +44,7 @@ particularly when the chunk size is chosen to be small. In practice, the chunk s
|
|
49 |
smaller chunk size will mean that the first chunk is ready faster, but should not be chosen so small that the model generates slower
|
50 |
than the time it takes to play the audio.
|
51 |
|
52 |
-
For details on how the streaming class works, check out the source code for the [MusicgenStreamer](https://huggingface.co/spaces/sanchit-gandhi/musicgen-streaming/blob/main/app.py#
|
53 |
"""
|
54 |
|
55 |
|
@@ -180,10 +175,17 @@ target_dtype = np.int16
|
|
180 |
max_range = np.iinfo(target_dtype).max
|
181 |
|
182 |
|
|
|
183 |
def generate_audio(text_prompt, audio_length_in_s=10.0, play_steps_in_s=2.0, seed=0):
|
184 |
max_new_tokens = int(frame_rate * audio_length_in_s)
|
185 |
play_steps = int(frame_rate * play_steps_in_s)
|
186 |
|
|
|
|
|
|
|
|
|
|
|
|
|
187 |
inputs = processor(
|
188 |
text=text_prompt,
|
189 |
padding=True,
|
|
|
9 |
from transformers.generation.streamers import BaseStreamer
|
10 |
|
11 |
import gradio as gr
|
12 |
+
import spaces
|
13 |
|
14 |
|
15 |
model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
|
16 |
processor = MusicgenProcessor.from_pretrained("facebook/musicgen-small")
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
title = "MusicGen Streaming"
|
19 |
|
20 |
description = """
|
|
|
44 |
smaller chunk size will mean that the first chunk is ready faster, but should not be chosen so small that the model generates slower
|
45 |
than the time it takes to play the audio.
|
46 |
|
47 |
+
For details on how the streaming class works, check out the source code for the [MusicgenStreamer](https://huggingface.co/spaces/sanchit-gandhi/musicgen-streaming/blob/main/app.py#L50).
|
48 |
"""
|
49 |
|
50 |
|
|
|
175 |
max_range = np.iinfo(target_dtype).max
|
176 |
|
177 |
|
178 |
+
@spaces.GPU
|
179 |
def generate_audio(text_prompt, audio_length_in_s=10.0, play_steps_in_s=2.0, seed=0):
|
180 |
max_new_tokens = int(frame_rate * audio_length_in_s)
|
181 |
play_steps = int(frame_rate * play_steps_in_s)
|
182 |
|
183 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
184 |
+
if device != model.device:
|
185 |
+
model.to(device)
|
186 |
+
if device == "cuda:0":
|
187 |
+
model.half()
|
188 |
+
|
189 |
inputs = processor(
|
190 |
text=text_prompt,
|
191 |
padding=True,
|