Kvikontent's picture
Update app.py
b25264b verified
raw
history blame
903 Bytes
import gradio as gr
from diffusers import DiffusionPipeline
import torch
from PIL import Image
import spaces
# Load the pre-trained pipeline
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt")
# Define the Gradio interface
interface = gr.Interface(
fn=lambda img: generate_video(img),
inputs=gr.Image(type="pil"),
outputs=gr.Video(),
title="Stable Video Diffusion",
description="Upload an image to generate a video",
theme="soft"
)
@spaces.GPU(duration=250)
def generate_video(image):
"""
Generates a video from an input image using the pipeline.
Args:
image: A PIL Image object representing the input image.
Returns:
A list of PIL Images representing the video frames.
"""
video_frames = pipeline(image=image, num_inference_steps=20).images
return video_frames
# Launch the Gradio app
interface.launch()