import torch | |
import gradio as gr | |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler | |
from diffusers.utils import export_to_video | |
# Отключение CUDA (GPU) | |
torch.device('cuda') | |
def generate_video(prompt): | |
# load pipeline | |
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16").device | |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config).device | |
# optimize for GPU memory | |
pipe.enable_model_cpu_offload() | |
pipe.enable_vae_slicing() | |
# generate | |
video_frames = pipe(prompt, num_inference_steps=25, num_frames=200).frames | |
# convert to video | |
video_path = export_to_video(video_frames) | |
return video_path | |
demo = gr.Interface(fn=generate_video, inputs="text", outputs="file") | |
demo.launch(share=True) |