Spaces:
Running
on
A100
Running
on
A100
import torch | |
from xora.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder | |
from xora.models.transformers.transformer3d import Transformer3DModel | |
from xora.models.transformers.symmetric_patchifier import SymmetricPatchifier | |
from xora.schedulers.rf import RectifiedFlowScheduler | |
from xora.pipelines.pipeline_video_pixart_alpha import VideoPixArtAlphaPipeline | |
from pathlib import Path | |
from transformers import T5EncoderModel | |
import safetensors.torch | |
import json | |
# Paths for the separate mode directories | |
separate_dir = Path("/opt/models/xora-txt2video") | |
unet_dir = separate_dir / 'unet' | |
vae_dir = separate_dir / 'vae' | |
scheduler_dir = separate_dir / 'scheduler' | |
# Load VAE from separate mode | |
vae_ckpt_path = vae_dir / "diffusion_pytorch_model.safetensors" | |
vae_config_path = vae_dir / "config.json" | |
with open(vae_config_path, 'r') as f: | |
vae_config = json.load(f) | |
vae_state_dict = safetensors.torch.load_file(vae_ckpt_path) | |
vae = CausalVideoAutoencoder.from_pretrained_conf( | |
config=vae_config, | |
state_dict=vae_state_dict, | |
torch_dtype=torch.bfloat16 | |
).cuda() | |
# Load UNet (Transformer) from separate mode | |
unet_ckpt_path = unet_dir / "diffusion_pytorch_model.safetensors" | |
unet_config_path = unet_dir / "config.json" | |
transformer_config = Transformer3DModel.load_config(unet_config_path) | |
transformer = Transformer3DModel.from_config(transformer_config) | |
unet_state_dict = safetensors.torch.load_file(unet_ckpt_path) | |
transformer.load_state_dict(unet_state_dict, strict=True) | |
transformer = transformer.cuda() | |
unet = transformer | |
# Load Scheduler from separate mode | |
scheduler_config_path = scheduler_dir / "scheduler_config.json" | |
scheduler_config = RectifiedFlowScheduler.load_config(scheduler_config_path) | |
scheduler = RectifiedFlowScheduler.from_config(scheduler_config) | |
# Patchifier (remains the same) | |
patchifier = SymmetricPatchifier(patch_size=1) | |
# Use submodels for the pipeline | |
submodel_dict = { | |
"unet": unet, | |
"transformer": transformer, | |
"patchifier": patchifier, | |
"scheduler": scheduler, | |
"vae": vae, | |
} | |
model_name_or_path = "PixArt-alpha/PixArt-XL-2-1024-MS" | |
pipeline = VideoPixArtAlphaPipeline.from_pretrained(model_name_or_path, | |
safety_checker=None, | |
revision=None, | |
torch_dtype=torch.float32, | |
**submodel_dict, | |
).to("cuda") | |
# Sample input | |
num_inference_steps = 20 | |
num_images_per_prompt = 2 | |
guidance_scale = 3 | |
height = 512 | |
width = 768 | |
num_frames = 57 | |
frame_rate = 25 | |
sample = { | |
"prompt": "A middle-aged man with glasses and a salt-and-pepper beard is driving a car and talking, gesturing with his right hand. " | |
"The man is wearing a dark blue zip-up jacket and a light blue collared shirt. He is sitting in the driver's seat of a car with a black interior. The car is moving on a road with trees and bushes on either side. The man has a serious expression on his face and is looking straight ahead.", | |
'prompt_attention_mask': None, # Adjust attention masks as needed | |
'negative_prompt': "Ugly deformed", | |
'negative_prompt_attention_mask': None | |
} | |
# Generate images (video frames) | |
images = pipeline( | |
num_inference_steps=num_inference_steps, | |
num_images_per_prompt=num_images_per_prompt, | |
guidance_scale=guidance_scale, | |
generator=None, | |
output_type="pt", | |
callback_on_step_end=None, | |
height=height, | |
width=width, | |
num_frames=num_frames, | |
frame_rate=frame_rate, | |
**sample, | |
is_video=True, | |
vae_per_channel_normalize=True, | |
).images | |
print("Generated images (video frames).") | |