import os import pathlib import gradio as gr from huggingface_hub import HfApi, snapshot_download from modelscope.outputs import OutputKeys from modelscope.pipelines import pipeline api = HfApi() model_dir = pathlib.Path("weights") snapshot_download( "damo-vilab/modelscope-damo-text-to-video-synthesis", repo_type="model", local_dir=model_dir, use_auth_token=os.environ["HUB_TOKEN"], ) pipe = pipeline("text-to-video-synthesis", model_dir.as_posix()) def generate_video(prompt): text = {"text": prompt} output = pipe(text)[OutputKeys.OUTPUT_VIDEO] filename = "output.mp4" cwd = os.getcwd() video_path = os.path.join(cwd, filename) with open(video_path, "wb") as video_file: video_file.write(output) return video_path iface = gr.Interface(fn=generate_video, inputs="text", outputs="video") iface.launch()