Spaces:
Runtime error
Runtime error
Kvikontent
commited on
Commit
β’
3c33543
1
Parent(s):
e16f7e6
Update app.py
Browse files
app.py
CHANGED
@@ -12,18 +12,29 @@ pipe.enable_model_cpu_offload()
|
|
12 |
pipe.enable_vae_slicing()
|
13 |
|
14 |
@spaces.GPU(duration=250)
|
15 |
-
def generate(prompt):
|
16 |
-
video_frames = pipe(prompt, num_inference_steps
|
17 |
video_path = export_to_video(video_frames, fps=10)
|
18 |
return video_path
|
19 |
|
20 |
prompt = gr.Textbox(label="Enter prompt to generate a video", info="Based on this prompt ai will generate a video")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
interface = gr.Interface(
|
23 |
generate,
|
24 |
inputs=[prompt],
|
|
|
25 |
examples=[["Astronaut riding a horse"], ["Darth vader surfing in waves"]],
|
26 |
outputs="video",
|
|
|
|
|
27 |
cache_examples=False,
|
28 |
theme="soft"
|
29 |
).launch()
|
|
|
12 |
pipe.enable_vae_slicing()
|
13 |
|
14 |
@spaces.GPU(duration=250)
|
15 |
+
def generate(prompt, num_inference_steps, num_frames):
|
16 |
+
video_frames = pipe(prompt, num_inference_steps, num_frames).frames[0]
|
17 |
video_path = export_to_video(video_frames, fps=10)
|
18 |
return video_path
|
19 |
|
20 |
prompt = gr.Textbox(label="Enter prompt to generate a video", info="Based on this prompt ai will generate a video")
|
21 |
+
description="""
|
22 |
+
π This is **unofficial** demo of Openai's Sora that haven't been released yet.
|
23 |
+
β This space made using [ali-vilab/text-to-video-ms-1.7b](https://huggingface.co/ali-vilab/text-to-video-ms-1.7b)
|
24 |
+
β Estimated generation time is **150 seconds**
|
25 |
+
π Space is running on ZeroGPU, if you want faster generation, duplicate space and choose faster GPU
|
26 |
+
"""
|
27 |
+
num_inference_steps=gr.Slider(8, 64, step=8, value=24, label="Num Inference Steps", info="More steps then better quality")
|
28 |
+
num_frames=gr.Slider(8, 640, step=8, value=200, label="Num of Frames", info="It is duration of video")
|
29 |
|
30 |
interface = gr.Interface(
|
31 |
generate,
|
32 |
inputs=[prompt],
|
33 |
+
additional_inputs=[num_inference_steps, num_frames],
|
34 |
examples=[["Astronaut riding a horse"], ["Darth vader surfing in waves"]],
|
35 |
outputs="video",
|
36 |
+
title="Openai Sora (Unofficial)",
|
37 |
+
description=description,
|
38 |
cache_examples=False,
|
39 |
theme="soft"
|
40 |
).launch()
|