File size: 1,164 Bytes
d18f074
 
457dd9b
d18f074
457dd9b
d18f074
 
 
 
422c73d
d18f074
 
 
 
 
 
 
 
 
 
 
b25264b
b5d38bf
 
 
 
 
 
 
 
457dd9b
b5d38bf
b25264b
b5d38bf
457dd9b
 
 
 
 
 
 
d18f074
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import gradio as gr
from diffusers import DiffusionPipeline
from diffusers.utils import export_to_video
import torch
import os
from PIL import Image
import spaces

# Load the pre-trained pipeline
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt")

# Define the Gradio interface
interface = gr.Interface(
    fn=lambda img: generate_video(img),
    inputs=gr.Image(type="pil"),
    outputs=gr.Video(),
    title="Stable Video Diffusion",
    description="Upload an image to generate a video",
    theme="soft"
)

@spaces.GPU(duration=250)
def generate_video(image):
  """
  Generates a video from an input image using the pipeline.

  Args:
      image: A PIL Image object representing the input image.

  Returns:
      The path of a video file.
  """
  video_frames = pipeline(image=image, num_inference_steps=20).images

  # Frames to Video
  os.makedirs("outputs", exist_ok=True)
  base_count = len(glob(os.path.join("outputs", "*.mp4")))
  video_path = os.path.join("outputs", f"{base_count:06d}.mp4")
  export_to_video(video_frames, video_path, fps=6)

  return video_path

# Launch the Gradio app
interface.launch()