Create ani
Browse files
ani
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from diffusers import DiffusionPipeline
|
4 |
+
|
5 |
+
# 加載模型
|
6 |
+
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
|
7 |
+
pipe = pipe.to("cuda")
|
8 |
+
|
9 |
+
# 定義生成視頻的函數
|
10 |
+
def text_to_video(prompt):
|
11 |
+
video_frames = pipe(prompt, num_inference_steps=50).frames
|
12 |
+
# 將幀轉換為視頻並返回
|
13 |
+
return video_frames
|
14 |
+
|
15 |
+
# Gradio UI 設置
|
16 |
+
with gr.Blocks() as demo:
|
17 |
+
gr.Markdown("# Text to Video Generator")
|
18 |
+
with gr.Row():
|
19 |
+
with gr.Column():
|
20 |
+
prompt = gr.Textbox(label="Enter your prompt")
|
21 |
+
generate_btn = gr.Button("Generate Video")
|
22 |
+
with gr.Column():
|
23 |
+
video_output = gr.Video(label="Generated Video")
|
24 |
+
|
25 |
+
generate_btn.click(fn=text_to_video, inputs=prompt, outputs=video_output)
|
26 |
+
|
27 |
+
# 啟動 Gradio 應用
|
28 |
+
demo.launch()
|