|
import copy |
|
import inspect |
|
from dataclasses import dataclass |
|
from typing import Callable, List, Optional, Union |
|
|
|
import numpy as np |
|
import PIL.Image |
|
import torch |
|
import torch.nn.functional as F |
|
from torch.nn.functional import grid_sample |
|
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer |
|
|
|
from ...image_processor import VaeImageProcessor |
|
from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin |
|
from ...models import AutoencoderKL, UNet2DConditionModel |
|
from ...models.lora import adjust_lora_scale_text_encoder |
|
from ...schedulers import KarrasDiffusionSchedulers |
|
from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers |
|
from ...utils.torch_utils import randn_tensor |
|
from ..pipeline_utils import DiffusionPipeline |
|
from ..stable_diffusion import StableDiffusionSafetyChecker |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
def rearrange_0(tensor, f): |
|
F, C, H, W = tensor.size() |
|
tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) |
|
return tensor |
|
|
|
|
|
def rearrange_1(tensor): |
|
B, C, F, H, W = tensor.size() |
|
return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) |
|
|
|
|
|
def rearrange_3(tensor, f): |
|
F, D, C = tensor.size() |
|
return torch.reshape(tensor, (F // f, f, D, C)) |
|
|
|
|
|
def rearrange_4(tensor): |
|
B, F, D, C = tensor.size() |
|
return torch.reshape(tensor, (B * F, D, C)) |
|
|
|
|
|
class CrossFrameAttnProcessor: |
|
""" |
|
Cross frame attention processor. Each frame attends the first frame. |
|
|
|
Args: |
|
batch_size: The number that represents actual batch size, other than the frames. |
|
For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to |
|
2, due to classifier-free guidance. |
|
""" |
|
|
|
def __init__(self, batch_size=2): |
|
self.batch_size = batch_size |
|
|
|
def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): |
|
batch_size, sequence_length, _ = hidden_states.shape |
|
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) |
|
query = attn.to_q(hidden_states) |
|
|
|
is_cross_attention = encoder_hidden_states is not None |
|
if encoder_hidden_states is None: |
|
encoder_hidden_states = hidden_states |
|
elif attn.norm_cross: |
|
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) |
|
|
|
key = attn.to_k(encoder_hidden_states) |
|
value = attn.to_v(encoder_hidden_states) |
|
|
|
|
|
if not is_cross_attention: |
|
video_length = key.size()[0] // self.batch_size |
|
first_frame_index = [0] * video_length |
|
|
|
|
|
key = rearrange_3(key, video_length) |
|
key = key[:, first_frame_index] |
|
|
|
value = rearrange_3(value, video_length) |
|
value = value[:, first_frame_index] |
|
|
|
|
|
key = rearrange_4(key) |
|
value = rearrange_4(value) |
|
|
|
query = attn.head_to_batch_dim(query) |
|
key = attn.head_to_batch_dim(key) |
|
value = attn.head_to_batch_dim(value) |
|
|
|
attention_probs = attn.get_attention_scores(query, key, attention_mask) |
|
hidden_states = torch.bmm(attention_probs, value) |
|
hidden_states = attn.batch_to_head_dim(hidden_states) |
|
|
|
|
|
hidden_states = attn.to_out[0](hidden_states) |
|
|
|
hidden_states = attn.to_out[1](hidden_states) |
|
|
|
return hidden_states |
|
|
|
|
|
class CrossFrameAttnProcessor2_0: |
|
""" |
|
Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. |
|
|
|
Args: |
|
batch_size: The number that represents actual batch size, other than the frames. |
|
For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to |
|
2, due to classifier-free guidance. |
|
""" |
|
|
|
def __init__(self, batch_size=2): |
|
if not hasattr(F, "scaled_dot_product_attention"): |
|
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") |
|
self.batch_size = batch_size |
|
|
|
def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): |
|
batch_size, sequence_length, _ = ( |
|
hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape |
|
) |
|
inner_dim = hidden_states.shape[-1] |
|
|
|
if attention_mask is not None: |
|
attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) |
|
|
|
|
|
attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) |
|
|
|
query = attn.to_q(hidden_states) |
|
|
|
is_cross_attention = encoder_hidden_states is not None |
|
if encoder_hidden_states is None: |
|
encoder_hidden_states = hidden_states |
|
elif attn.norm_cross: |
|
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) |
|
|
|
key = attn.to_k(encoder_hidden_states) |
|
value = attn.to_v(encoder_hidden_states) |
|
|
|
|
|
if not is_cross_attention: |
|
video_length = max(1, key.size()[0] // self.batch_size) |
|
first_frame_index = [0] * video_length |
|
|
|
|
|
key = rearrange_3(key, video_length) |
|
key = key[:, first_frame_index] |
|
|
|
value = rearrange_3(value, video_length) |
|
value = value[:, first_frame_index] |
|
|
|
|
|
key = rearrange_4(key) |
|
value = rearrange_4(value) |
|
|
|
head_dim = inner_dim // attn.heads |
|
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) |
|
|
|
|
|
|
|
hidden_states = F.scaled_dot_product_attention( |
|
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False |
|
) |
|
|
|
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
|
hidden_states = hidden_states.to(query.dtype) |
|
|
|
|
|
hidden_states = attn.to_out[0](hidden_states) |
|
|
|
hidden_states = attn.to_out[1](hidden_states) |
|
return hidden_states |
|
|
|
|
|
@dataclass |
|
class TextToVideoPipelineOutput(BaseOutput): |
|
r""" |
|
Output class for zero-shot text-to-video pipeline. |
|
|
|
Args: |
|
images (`[List[PIL.Image.Image]`, `np.ndarray`]): |
|
List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, |
|
num_channels)`. |
|
nsfw_content_detected (`[List[bool]]`): |
|
List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or |
|
`None` if safety checking could not be performed. |
|
""" |
|
|
|
images: Union[List[PIL.Image.Image], np.ndarray] |
|
nsfw_content_detected: Optional[List[bool]] |
|
|
|
|
|
def coords_grid(batch, ht, wd, device): |
|
|
|
coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) |
|
coords = torch.stack(coords[::-1], dim=0).float() |
|
return coords[None].repeat(batch, 1, 1, 1) |
|
|
|
|
|
def warp_single_latent(latent, reference_flow): |
|
""" |
|
Warp latent of a single frame with given flow |
|
|
|
Args: |
|
latent: latent code of a single frame |
|
reference_flow: flow which to warp the latent with |
|
|
|
Returns: |
|
warped: warped latent |
|
""" |
|
_, _, H, W = reference_flow.size() |
|
_, _, h, w = latent.size() |
|
coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) |
|
|
|
coords_t0 = coords0 + reference_flow |
|
coords_t0[:, 0] /= W |
|
coords_t0[:, 1] /= H |
|
|
|
coords_t0 = coords_t0 * 2.0 - 1.0 |
|
coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") |
|
coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) |
|
|
|
warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") |
|
return warped |
|
|
|
|
|
def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): |
|
""" |
|
Create translation motion field |
|
|
|
Args: |
|
motion_field_strength_x: motion strength along x-axis |
|
motion_field_strength_y: motion strength along y-axis |
|
frame_ids: indexes of the frames the latents of which are being processed. |
|
This is needed when we perform chunk-by-chunk inference |
|
device: device |
|
dtype: dtype |
|
|
|
Returns: |
|
|
|
""" |
|
seq_length = len(frame_ids) |
|
reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) |
|
for fr_idx in range(seq_length): |
|
reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) |
|
reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) |
|
return reference_flow |
|
|
|
|
|
def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): |
|
""" |
|
Creates translation motion and warps the latents accordingly |
|
|
|
Args: |
|
motion_field_strength_x: motion strength along x-axis |
|
motion_field_strength_y: motion strength along y-axis |
|
frame_ids: indexes of the frames the latents of which are being processed. |
|
This is needed when we perform chunk-by-chunk inference |
|
latents: latent codes of frames |
|
|
|
Returns: |
|
warped_latents: warped latents |
|
""" |
|
motion_field = create_motion_field( |
|
motion_field_strength_x=motion_field_strength_x, |
|
motion_field_strength_y=motion_field_strength_y, |
|
frame_ids=frame_ids, |
|
device=latents.device, |
|
dtype=latents.dtype, |
|
) |
|
warped_latents = latents.clone().detach() |
|
for i in range(len(warped_latents)): |
|
warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) |
|
return warped_latents |
|
|
|
|
|
class TextToVideoZeroPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin): |
|
r""" |
|
Pipeline for zero-shot text-to-video generation using Stable Diffusion. |
|
|
|
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods |
|
implemented for all pipelines (downloading, saving, running on a particular device, etc.). |
|
|
|
Args: |
|
vae ([`AutoencoderKL`]): |
|
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. |
|
text_encoder ([`CLIPTextModel`]): |
|
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). |
|
tokenizer (`CLIPTokenizer`): |
|
A [`~transformers.CLIPTokenizer`] to tokenize text. |
|
unet ([`UNet2DConditionModel`]): |
|
A [`UNet3DConditionModel`] to denoise the encoded video latents. |
|
scheduler ([`SchedulerMixin`]): |
|
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of |
|
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. |
|
safety_checker ([`StableDiffusionSafetyChecker`]): |
|
Classification module that estimates whether generated images could be considered offensive or harmful. |
|
Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details |
|
about a model's potential harms. |
|
feature_extractor ([`CLIPImageProcessor`]): |
|
A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
vae: AutoencoderKL, |
|
text_encoder: CLIPTextModel, |
|
tokenizer: CLIPTokenizer, |
|
unet: UNet2DConditionModel, |
|
scheduler: KarrasDiffusionSchedulers, |
|
safety_checker: StableDiffusionSafetyChecker, |
|
feature_extractor: CLIPImageProcessor, |
|
requires_safety_checker: bool = True, |
|
): |
|
super().__init__() |
|
self.register_modules( |
|
vae=vae, |
|
text_encoder=text_encoder, |
|
tokenizer=tokenizer, |
|
unet=unet, |
|
scheduler=scheduler, |
|
safety_checker=safety_checker, |
|
feature_extractor=feature_extractor, |
|
) |
|
processor = ( |
|
CrossFrameAttnProcessor2_0(batch_size=2) |
|
if hasattr(F, "scaled_dot_product_attention") |
|
else CrossFrameAttnProcessor(batch_size=2) |
|
) |
|
self.unet.set_attn_processor(processor) |
|
|
|
if safety_checker is None and requires_safety_checker: |
|
logger.warning( |
|
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" |
|
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" |
|
" results in services or applications open to the public. Both the diffusers team and Hugging Face" |
|
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" |
|
" it only for use-cases that involve analyzing network behavior or auditing its results. For more" |
|
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." |
|
) |
|
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) |
|
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) |
|
|
|
def forward_loop(self, x_t0, t0, t1, generator): |
|
""" |
|
Perform DDPM forward process from time t0 to t1. This is the same as adding noise with corresponding variance. |
|
|
|
Args: |
|
x_t0: |
|
Latent code at time t0. |
|
t0: |
|
Timestep at t0. |
|
t1: |
|
Timestamp at t1. |
|
generator (`torch.Generator` or `List[torch.Generator]`, *optional*): |
|
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make |
|
generation deterministic. |
|
|
|
Returns: |
|
x_t1: |
|
Forward process applied to x_t0 from time t0 to t1. |
|
""" |
|
eps = randn_tensor(x_t0.size(), generator=generator, dtype=x_t0.dtype, device=x_t0.device) |
|
alpha_vec = torch.prod(self.scheduler.alphas[t0:t1]) |
|
x_t1 = torch.sqrt(alpha_vec) * x_t0 + torch.sqrt(1 - alpha_vec) * eps |
|
return x_t1 |
|
|
|
def backward_loop( |
|
self, |
|
latents, |
|
timesteps, |
|
prompt_embeds, |
|
guidance_scale, |
|
callback, |
|
callback_steps, |
|
num_warmup_steps, |
|
extra_step_kwargs, |
|
cross_attention_kwargs=None, |
|
): |
|
""" |
|
Perform backward process given list of time steps. |
|
|
|
Args: |
|
latents: |
|
Latents at time timesteps[0]. |
|
timesteps: |
|
Time steps along which to perform backward process. |
|
prompt_embeds: |
|
Pre-generated text embeddings. |
|
guidance_scale: |
|
A higher guidance scale value encourages the model to generate images closely linked to the text |
|
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. |
|
callback (`Callable`, *optional*): |
|
A function that calls every `callback_steps` steps during inference. The function is called with the |
|
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. |
|
callback_steps (`int`, *optional*, defaults to 1): |
|
The frequency at which the `callback` function is called. If not specified, the callback is called at |
|
every step. |
|
extra_step_kwargs: |
|
Extra_step_kwargs. |
|
cross_attention_kwargs: |
|
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in |
|
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). |
|
num_warmup_steps: |
|
number of warmup steps. |
|
|
|
Returns: |
|
latents: |
|
Latents of backward process output at time timesteps[-1]. |
|
""" |
|
do_classifier_free_guidance = guidance_scale > 1.0 |
|
num_steps = (len(timesteps) - num_warmup_steps) // self.scheduler.order |
|
with self.progress_bar(total=num_steps) as progress_bar: |
|
for i, t in enumerate(timesteps): |
|
|
|
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents |
|
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
|
|
|
|
|
noise_pred = self.unet( |
|
latent_model_input, |
|
t, |
|
encoder_hidden_states=prompt_embeds, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
).sample |
|
|
|
|
|
if do_classifier_free_guidance: |
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
|
|
|
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample |
|
|
|
|
|
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): |
|
progress_bar.update() |
|
if callback is not None and i % callback_steps == 0: |
|
step_idx = i // getattr(self.scheduler, "order", 1) |
|
callback(step_idx, t, latents) |
|
return latents.clone().detach() |
|
|
|
|
|
def check_inputs( |
|
self, |
|
prompt, |
|
height, |
|
width, |
|
callback_steps, |
|
negative_prompt=None, |
|
prompt_embeds=None, |
|
negative_prompt_embeds=None, |
|
callback_on_step_end_tensor_inputs=None, |
|
): |
|
if height % 8 != 0 or width % 8 != 0: |
|
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") |
|
|
|
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): |
|
raise ValueError( |
|
f"`callback_steps` has to be a positive integer but is {callback_steps} of type" |
|
f" {type(callback_steps)}." |
|
) |
|
if callback_on_step_end_tensor_inputs is not None and not all( |
|
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs |
|
): |
|
raise ValueError( |
|
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" |
|
) |
|
|
|
if prompt is not None and prompt_embeds is not None: |
|
raise ValueError( |
|
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" |
|
" only forward one of the two." |
|
) |
|
elif prompt is None and prompt_embeds is None: |
|
raise ValueError( |
|
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." |
|
) |
|
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): |
|
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") |
|
|
|
if negative_prompt is not None and negative_prompt_embeds is not None: |
|
raise ValueError( |
|
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" |
|
f" {negative_prompt_embeds}. Please make sure to only forward one of the two." |
|
) |
|
|
|
if prompt_embeds is not None and negative_prompt_embeds is not None: |
|
if prompt_embeds.shape != negative_prompt_embeds.shape: |
|
raise ValueError( |
|
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" |
|
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" |
|
f" {negative_prompt_embeds.shape}." |
|
) |
|
|
|
|
|
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): |
|
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) |
|
if isinstance(generator, list) and len(generator) != batch_size: |
|
raise ValueError( |
|
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" |
|
f" size of {batch_size}. Make sure the batch size matches the length of the generators." |
|
) |
|
|
|
if latents is None: |
|
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) |
|
else: |
|
latents = latents.to(device) |
|
|
|
|
|
latents = latents * self.scheduler.init_noise_sigma |
|
return latents |
|
|
|
@torch.no_grad() |
|
def __call__( |
|
self, |
|
prompt: Union[str, List[str]], |
|
video_length: Optional[int] = 8, |
|
height: Optional[int] = None, |
|
width: Optional[int] = None, |
|
num_inference_steps: int = 50, |
|
guidance_scale: float = 7.5, |
|
negative_prompt: Optional[Union[str, List[str]]] = None, |
|
num_videos_per_prompt: Optional[int] = 1, |
|
eta: float = 0.0, |
|
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
|
latents: Optional[torch.FloatTensor] = None, |
|
motion_field_strength_x: float = 12, |
|
motion_field_strength_y: float = 12, |
|
output_type: Optional[str] = "tensor", |
|
return_dict: bool = True, |
|
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, |
|
callback_steps: Optional[int] = 1, |
|
t0: int = 44, |
|
t1: int = 47, |
|
frame_ids: Optional[List[int]] = None, |
|
): |
|
""" |
|
The call function to the pipeline for generation. |
|
|
|
Args: |
|
prompt (`str` or `List[str]`, *optional*): |
|
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. |
|
video_length (`int`, *optional*, defaults to 8): |
|
The number of generated video frames. |
|
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): |
|
The height in pixels of the generated image. |
|
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): |
|
The width in pixels of the generated image. |
|
num_inference_steps (`int`, *optional*, defaults to 50): |
|
The number of denoising steps. More denoising steps usually lead to a higher quality image at the |
|
expense of slower inference. |
|
guidance_scale (`float`, *optional*, defaults to 7.5): |
|
A higher guidance scale value encourages the model to generate images closely linked to the text |
|
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. |
|
negative_prompt (`str` or `List[str]`, *optional*): |
|
The prompt or prompts to guide what to not include in video generation. If not defined, you need to |
|
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). |
|
num_videos_per_prompt (`int`, *optional*, defaults to 1): |
|
The number of videos to generate per prompt. |
|
eta (`float`, *optional*, defaults to 0.0): |
|
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies |
|
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. |
|
generator (`torch.Generator` or `List[torch.Generator]`, *optional*): |
|
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make |
|
generation deterministic. |
|
latents (`torch.FloatTensor`, *optional*): |
|
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video |
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents |
|
tensor is generated by sampling using the supplied random `generator`. |
|
output_type (`str`, *optional*, defaults to `"numpy"`): |
|
The output format of the generated video. Choose between `"latent"` and `"numpy"`. |
|
return_dict (`bool`, *optional*, defaults to `True`): |
|
Whether or not to return a |
|
[`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`] instead of |
|
a plain tuple. |
|
callback (`Callable`, *optional*): |
|
A function that calls every `callback_steps` steps during inference. The function is called with the |
|
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. |
|
callback_steps (`int`, *optional*, defaults to 1): |
|
The frequency at which the `callback` function is called. If not specified, the callback is called at |
|
every step. |
|
motion_field_strength_x (`float`, *optional*, defaults to 12): |
|
Strength of motion in generated video along x-axis. See the [paper](https://arxiv.org/abs/2303.13439), |
|
Sect. 3.3.1. |
|
motion_field_strength_y (`float`, *optional*, defaults to 12): |
|
Strength of motion in generated video along y-axis. See the [paper](https://arxiv.org/abs/2303.13439), |
|
Sect. 3.3.1. |
|
t0 (`int`, *optional*, defaults to 44): |
|
Timestep t0. Should be in the range [0, num_inference_steps - 1]. See the |
|
[paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. |
|
t1 (`int`, *optional*, defaults to 47): |
|
Timestep t0. Should be in the range [t0 + 1, num_inference_steps - 1]. See the |
|
[paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1. |
|
frame_ids (`List[int]`, *optional*): |
|
Indexes of the frames that are being generated. This is used when generating longer videos |
|
chunk-by-chunk. |
|
|
|
Returns: |
|
[`~pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput`]: |
|
The output contains a `ndarray` of the generated video, when `output_type` != `"latent"`, otherwise a |
|
latent code of generated videos and a list of `bool`s indicating whether the corresponding generated |
|
video contains "not-safe-for-work" (nsfw) content.. |
|
""" |
|
assert video_length > 0 |
|
if frame_ids is None: |
|
frame_ids = list(range(video_length)) |
|
assert len(frame_ids) == video_length |
|
|
|
assert num_videos_per_prompt == 1 |
|
|
|
if isinstance(prompt, str): |
|
prompt = [prompt] |
|
if isinstance(negative_prompt, str): |
|
negative_prompt = [negative_prompt] |
|
|
|
|
|
height = height or self.unet.config.sample_size * self.vae_scale_factor |
|
width = width or self.unet.config.sample_size * self.vae_scale_factor |
|
|
|
|
|
self.check_inputs(prompt, height, width, callback_steps) |
|
|
|
|
|
batch_size = 1 if isinstance(prompt, str) else len(prompt) |
|
device = self._execution_device |
|
|
|
|
|
|
|
do_classifier_free_guidance = guidance_scale > 1.0 |
|
|
|
|
|
prompt_embeds_tuple = self.encode_prompt( |
|
prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt |
|
) |
|
prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]]) |
|
|
|
|
|
self.scheduler.set_timesteps(num_inference_steps, device=device) |
|
timesteps = self.scheduler.timesteps |
|
|
|
|
|
num_channels_latents = self.unet.config.in_channels |
|
latents = self.prepare_latents( |
|
batch_size * num_videos_per_prompt, |
|
num_channels_latents, |
|
height, |
|
width, |
|
prompt_embeds.dtype, |
|
device, |
|
generator, |
|
latents, |
|
) |
|
|
|
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
|
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order |
|
|
|
|
|
x_1_t1 = self.backward_loop( |
|
timesteps=timesteps[: -t1 - 1], |
|
prompt_embeds=prompt_embeds, |
|
latents=latents, |
|
guidance_scale=guidance_scale, |
|
callback=callback, |
|
callback_steps=callback_steps, |
|
extra_step_kwargs=extra_step_kwargs, |
|
num_warmup_steps=num_warmup_steps, |
|
) |
|
scheduler_copy = copy.deepcopy(self.scheduler) |
|
|
|
|
|
x_1_t0 = self.backward_loop( |
|
timesteps=timesteps[-t1 - 1 : -t0 - 1], |
|
prompt_embeds=prompt_embeds, |
|
latents=x_1_t1, |
|
guidance_scale=guidance_scale, |
|
callback=callback, |
|
callback_steps=callback_steps, |
|
extra_step_kwargs=extra_step_kwargs, |
|
num_warmup_steps=0, |
|
) |
|
|
|
|
|
x_2k_t0 = x_1_t0.repeat(video_length - 1, 1, 1, 1) |
|
|
|
|
|
x_2k_t0 = create_motion_field_and_warp_latents( |
|
motion_field_strength_x=motion_field_strength_x, |
|
motion_field_strength_y=motion_field_strength_y, |
|
latents=x_2k_t0, |
|
frame_ids=frame_ids[1:], |
|
) |
|
|
|
|
|
x_2k_t1 = self.forward_loop( |
|
x_t0=x_2k_t0, |
|
t0=timesteps[-t0 - 1].item(), |
|
t1=timesteps[-t1 - 1].item(), |
|
generator=generator, |
|
) |
|
|
|
|
|
x_1k_t1 = torch.cat([x_1_t1, x_2k_t1]) |
|
b, l, d = prompt_embeds.size() |
|
prompt_embeds = prompt_embeds[:, None].repeat(1, video_length, 1, 1).reshape(b * video_length, l, d) |
|
|
|
self.scheduler = scheduler_copy |
|
x_1k_0 = self.backward_loop( |
|
timesteps=timesteps[-t1 - 1 :], |
|
prompt_embeds=prompt_embeds, |
|
latents=x_1k_t1, |
|
guidance_scale=guidance_scale, |
|
callback=callback, |
|
callback_steps=callback_steps, |
|
extra_step_kwargs=extra_step_kwargs, |
|
num_warmup_steps=0, |
|
) |
|
latents = x_1k_0 |
|
|
|
|
|
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: |
|
self.unet.to("cpu") |
|
torch.cuda.empty_cache() |
|
|
|
if output_type == "latent": |
|
image = latents |
|
has_nsfw_concept = None |
|
else: |
|
image = self.decode_latents(latents) |
|
|
|
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) |
|
|
|
|
|
self.maybe_free_model_hooks() |
|
|
|
if not return_dict: |
|
return (image, has_nsfw_concept) |
|
|
|
return TextToVideoPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) |
|
|
|
|
|
def run_safety_checker(self, image, device, dtype): |
|
if self.safety_checker is None: |
|
has_nsfw_concept = None |
|
else: |
|
if torch.is_tensor(image): |
|
feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") |
|
else: |
|
feature_extractor_input = self.image_processor.numpy_to_pil(image) |
|
safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) |
|
image, has_nsfw_concept = self.safety_checker( |
|
images=image, clip_input=safety_checker_input.pixel_values.to(dtype) |
|
) |
|
return image, has_nsfw_concept |
|
|
|
|
|
def prepare_extra_step_kwargs(self, generator, eta): |
|
|
|
|
|
|
|
|
|
|
|
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
|
extra_step_kwargs = {} |
|
if accepts_eta: |
|
extra_step_kwargs["eta"] = eta |
|
|
|
|
|
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
|
if accepts_generator: |
|
extra_step_kwargs["generator"] = generator |
|
return extra_step_kwargs |
|
|
|
|
|
def encode_prompt( |
|
self, |
|
prompt, |
|
device, |
|
num_images_per_prompt, |
|
do_classifier_free_guidance, |
|
negative_prompt=None, |
|
prompt_embeds: Optional[torch.FloatTensor] = None, |
|
negative_prompt_embeds: Optional[torch.FloatTensor] = None, |
|
lora_scale: Optional[float] = None, |
|
clip_skip: Optional[int] = None, |
|
): |
|
r""" |
|
Encodes the prompt into text encoder hidden states. |
|
|
|
Args: |
|
prompt (`str` or `List[str]`, *optional*): |
|
prompt to be encoded |
|
device: (`torch.device`): |
|
torch device |
|
num_images_per_prompt (`int`): |
|
number of images that should be generated per prompt |
|
do_classifier_free_guidance (`bool`): |
|
whether to use classifier free guidance or not |
|
negative_prompt (`str` or `List[str]`, *optional*): |
|
The prompt or prompts not to guide the image generation. If not defined, one has to pass |
|
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is |
|
less than `1`). |
|
prompt_embeds (`torch.FloatTensor`, *optional*): |
|
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not |
|
provided, text embeddings will be generated from `prompt` input argument. |
|
negative_prompt_embeds (`torch.FloatTensor`, *optional*): |
|
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt |
|
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input |
|
argument. |
|
lora_scale (`float`, *optional*): |
|
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. |
|
clip_skip (`int`, *optional*): |
|
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that |
|
the output of the pre-final layer will be used for computing the prompt embeddings. |
|
""" |
|
|
|
|
|
if lora_scale is not None and isinstance(self, LoraLoaderMixin): |
|
self._lora_scale = lora_scale |
|
|
|
|
|
if not USE_PEFT_BACKEND: |
|
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) |
|
else: |
|
scale_lora_layers(self.text_encoder, lora_scale) |
|
|
|
if prompt is not None and isinstance(prompt, str): |
|
batch_size = 1 |
|
elif prompt is not None and isinstance(prompt, list): |
|
batch_size = len(prompt) |
|
else: |
|
batch_size = prompt_embeds.shape[0] |
|
|
|
if prompt_embeds is None: |
|
|
|
if isinstance(self, TextualInversionLoaderMixin): |
|
prompt = self.maybe_convert_prompt(prompt, self.tokenizer) |
|
|
|
text_inputs = self.tokenizer( |
|
prompt, |
|
padding="max_length", |
|
max_length=self.tokenizer.model_max_length, |
|
truncation=True, |
|
return_tensors="pt", |
|
) |
|
text_input_ids = text_inputs.input_ids |
|
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids |
|
|
|
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( |
|
text_input_ids, untruncated_ids |
|
): |
|
removed_text = self.tokenizer.batch_decode( |
|
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] |
|
) |
|
logger.warning( |
|
"The following part of your input was truncated because CLIP can only handle sequences up to" |
|
f" {self.tokenizer.model_max_length} tokens: {removed_text}" |
|
) |
|
|
|
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: |
|
attention_mask = text_inputs.attention_mask.to(device) |
|
else: |
|
attention_mask = None |
|
|
|
if clip_skip is None: |
|
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) |
|
prompt_embeds = prompt_embeds[0] |
|
else: |
|
prompt_embeds = self.text_encoder( |
|
text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True |
|
) |
|
|
|
|
|
|
|
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] |
|
|
|
|
|
|
|
|
|
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) |
|
|
|
if self.text_encoder is not None: |
|
prompt_embeds_dtype = self.text_encoder.dtype |
|
elif self.unet is not None: |
|
prompt_embeds_dtype = self.unet.dtype |
|
else: |
|
prompt_embeds_dtype = prompt_embeds.dtype |
|
|
|
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) |
|
|
|
bs_embed, seq_len, _ = prompt_embeds.shape |
|
|
|
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) |
|
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) |
|
|
|
|
|
if do_classifier_free_guidance and negative_prompt_embeds is None: |
|
uncond_tokens: List[str] |
|
if negative_prompt is None: |
|
uncond_tokens = [""] * batch_size |
|
elif prompt is not None and type(prompt) is not type(negative_prompt): |
|
raise TypeError( |
|
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" |
|
f" {type(prompt)}." |
|
) |
|
elif isinstance(negative_prompt, str): |
|
uncond_tokens = [negative_prompt] |
|
elif batch_size != len(negative_prompt): |
|
raise ValueError( |
|
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" |
|
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" |
|
" the batch size of `prompt`." |
|
) |
|
else: |
|
uncond_tokens = negative_prompt |
|
|
|
|
|
if isinstance(self, TextualInversionLoaderMixin): |
|
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) |
|
|
|
max_length = prompt_embeds.shape[1] |
|
uncond_input = self.tokenizer( |
|
uncond_tokens, |
|
padding="max_length", |
|
max_length=max_length, |
|
truncation=True, |
|
return_tensors="pt", |
|
) |
|
|
|
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: |
|
attention_mask = uncond_input.attention_mask.to(device) |
|
else: |
|
attention_mask = None |
|
|
|
negative_prompt_embeds = self.text_encoder( |
|
uncond_input.input_ids.to(device), |
|
attention_mask=attention_mask, |
|
) |
|
negative_prompt_embeds = negative_prompt_embeds[0] |
|
|
|
if do_classifier_free_guidance: |
|
|
|
seq_len = negative_prompt_embeds.shape[1] |
|
|
|
negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) |
|
|
|
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) |
|
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) |
|
|
|
if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: |
|
|
|
unscale_lora_layers(self.text_encoder, lora_scale) |
|
|
|
return prompt_embeds, negative_prompt_embeds |
|
|
|
def decode_latents(self, latents): |
|
latents = 1 / self.vae.config.scaling_factor * latents |
|
image = self.vae.decode(latents, return_dict=False)[0] |
|
image = (image / 2 + 0.5).clamp(0, 1) |
|
|
|
image = image.cpu().permute(0, 2, 3, 1).float().numpy() |
|
return image |
|
|