Spaces:
Running
on
A100
Running
on
A100
import torch | |
from xora.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder | |
from xora.models.transformers.transformer3d import Transformer3DModel | |
from xora.models.transformers.symmetric_patchifier import SymmetricPatchifier | |
from xora.schedulers.rf import RectifiedFlowScheduler | |
from xora.pipelines.pipeline_video_pixart_alpha import VideoPixArtAlphaPipeline | |
from pathlib import Path | |
import safetensors.torch | |
import json | |
# Paths for the separate mode directories | |
separate_dir = Path("/opt/models/xora-img2video") | |
unet_dir = separate_dir / 'unet' | |
vae_dir = separate_dir / 'vae' | |
scheduler_dir = separate_dir / 'scheduler' | |
# Load VAE from separate mode | |
vae_ckpt_path = vae_dir / "diffusion_pytorch_model.safetensors" | |
vae_config_path = vae_dir / "config.json" | |
with open(vae_config_path, 'r') as f: | |
vae_config = json.load(f) | |
vae_state_dict = safetensors.torch.load_file(vae_ckpt_path) | |
vae = CausalVideoAutoencoder.from_pretrained_conf( | |
config=vae_config, | |
state_dict=vae_state_dict, | |
torch_dtype=torch.bfloat16 | |
).cuda() | |
# Load UNet (Transformer) from separate mode | |
unet_ckpt_path = unet_dir / "diffusion_pytorch_model.safetensors" | |
unet_config_path = unet_dir / "config.json" | |
transformer_config = Transformer3DModel.load_config(unet_config_path) | |
transformer = Transformer3DModel.from_config(transformer_config) | |
unet_state_dict = safetensors.torch.load_file(unet_ckpt_path) | |
transformer.load_state_dict(unet_state_dict, strict=True) | |
transformer = transformer.cuda() | |
unet = transformer | |
# Load Scheduler from separate mode | |
scheduler_config_path = scheduler_dir / "scheduler_config.json" | |
scheduler_config = RectifiedFlowScheduler.load_config(scheduler_config_path) | |
scheduler = RectifiedFlowScheduler.from_config(scheduler_config) | |
# Patchifier (remains the same) | |
patchifier = SymmetricPatchifier(patch_size=1) | |
# Use submodels for the pipeline | |
submodel_dict = { | |
"unet": unet, | |
"transformer": transformer, | |
"patchifier": patchifier, | |
"text_encoder": None, | |
"scheduler": scheduler, | |
"vae": vae, | |
} | |
model_name_or_path = "PixArt-alpha/PixArt-XL-2-1024-MS" | |
pipeline = VideoPixArtAlphaPipeline.from_pretrained(model_name_or_path, | |
safety_checker=None, | |
revision=None, | |
torch_dtype=torch.float32, # dtype adjusted | |
**submodel_dict, | |
).to("cuda") | |
num_inference_steps = 20 | |
num_images_per_prompt = 2 | |
guidance_scale = 3 | |
height = 512 | |
width = 768 | |
num_frames = 57 | |
frame_rate = 25 | |
# Assuming sample is a dict loaded from a .pt file | |
sample = torch.load("/opt/sample.pt") | |
for key, item in sample.items(): | |
if item is not None: | |
sample[key] = item.cuda() | |
media_items = torch.load("/opt/sample_media.pt") | |
# Generate images (video frames) | |
images = pipeline( | |
num_inference_steps=num_inference_steps, | |
num_images_per_prompt=num_images_per_prompt, | |
guidance_scale=guidance_scale, | |
generator=None, | |
output_type="pt", | |
callback_on_step_end=None, | |
height=height, | |
width=width, | |
num_frames=num_frames, | |
frame_rate=frame_rate, | |
**sample, | |
is_video=True, | |
vae_per_channel_normalize=True, | |
).images | |
print("Generated video frames.") | |