##!/usr/bin/env python import os import pathlib import tempfile import gradio as gr import torch from huggingface_hub import snapshot_download from modelscope.outputs import OutputKeys from modelscope.pipelines import pipeline DESCRIPTION = "# ModelScope-Image2Video" DESCRIPTION += "\n
Running on CPU. Performance may be slower compared to GPU.
" model_cache_dir = os.getenv("MODEL_CACHE_DIR", "./models") model_dir = pathlib.Path(model_cache_dir) / "MS-Image2Video" snapshot_download(repo_id="damo-vilab/MS-Image2Video", repo_type="model", local_dir=model_dir) pipe = pipeline(task="image-to-video", model=model_dir.as_posix(), model_revision="v1.1.0", device="cpu") def image_to_video(image_path: str) -> str: output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) pipe(image_path, output_video=output_file.name)[OutputKeys.OUTPUT_VIDEO] return output_file.name with gr.Blocks(css="style.css") as demo: gr.Markdown(DESCRIPTION) gr.DuplicateButton( value="Duplicate Space for private use", elem_id="duplicate-button", visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1", ) with gr.Group(): input_image = gr.Image(label="Input image", type="filepath") run_button = gr.Button() output_video = gr.Video(label="Output video") run_button.click( fn=image_to_video, inputs=input_image, outputs=output_video, api_name="run", ) if __name__ == "__main__": demo.queue(max_size=10).launch()