import gradio as gr import torch from diffusers import DiffusionPipeline # 使用另一個更穩定的模型 pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float32) pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu") # 定義生成視頻的函數 def text_to_video(prompt): video_frames = pipe(prompt, num_inference_steps=50).frames return video_frames # Gradio UI 設置 with gr.Blocks() as demo: gr.Markdown("# Text to Video Generator") with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Enter your prompt") generate_btn = gr.Button("Generate Video") with gr.Column(): video_output = gr.Video(label="Generated Video") generate_btn.click(fn=text_to_video, inputs=prompt, outputs=video_output) # 啟動 Gradio 應用 demo.launch()