import torch from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video device = "cuda" if torch.cuda.is_available() else "cpu" print("Using device for video:", device) pipeline = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16" ).to(device) # pipeline.enable_model_cpu_offload() def Video(image): image = load_image(image) image = image.resize((1024, 576)) generator = torch.Generator(device=device).manual_seed(42) frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0] # export_to_video(frames, "generated.mp4", fps=7) return frames