|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import math |
|
from dataclasses import dataclass |
|
from typing import List, Optional, Tuple, Union |
|
|
|
import numpy as np |
|
import paddle |
|
import paddle.nn.functional as F |
|
|
|
from ..configuration_utils import ConfigMixin, FrozenDict, register_to_config |
|
from ..utils import _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS, BaseOutput, deprecate |
|
from .scheduling_utils import SchedulerMixin |
|
|
|
|
|
@dataclass |
|
class DDPMSchedulerOutput(BaseOutput): |
|
""" |
|
Output class for the scheduler's step function output. |
|
|
|
Args: |
|
prev_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images): |
|
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the |
|
denoising loop. |
|
pred_original_sample (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)` for images): |
|
The predicted denoised sample (x_{0}) based on the model output from the current timestep. |
|
`pred_original_sample` can be used to preview progress or for guidance. |
|
""" |
|
|
|
prev_sample: paddle.Tensor |
|
pred_original_sample: Optional[paddle.Tensor] = None |
|
|
|
|
|
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): |
|
""" |
|
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of |
|
(1-beta) over time from t = [0,1]. |
|
|
|
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up |
|
to that part of the diffusion process. |
|
|
|
|
|
Args: |
|
num_diffusion_timesteps (`int`): the number of betas to produce. |
|
max_beta (`float`): the maximum beta to use; use values lower than 1 to |
|
prevent singularities. |
|
|
|
Returns: |
|
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs |
|
""" |
|
|
|
def alpha_bar(time_step): |
|
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 |
|
|
|
betas = [] |
|
for i in range(num_diffusion_timesteps): |
|
t1 = i / num_diffusion_timesteps |
|
t2 = (i + 1) / num_diffusion_timesteps |
|
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) |
|
return paddle.to_tensor(betas, dtype="float32") |
|
|
|
|
|
class DDPMScheduler(SchedulerMixin, ConfigMixin): |
|
""" |
|
Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and |
|
Langevin dynamics sampling. |
|
|
|
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` |
|
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. |
|
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and |
|
[`~SchedulerMixin.from_pretrained`] functions. |
|
|
|
For more details, see the original paper: https://arxiv.org/abs/2006.11239 |
|
|
|
Args: |
|
num_train_timesteps (`int`): number of diffusion steps used to train the model. |
|
beta_start (`float`): the starting `beta` value of inference. |
|
beta_end (`float`): the final `beta` value. |
|
beta_schedule (`str`): |
|
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from |
|
`linear`, `scaled_linear`, or `squaredcos_cap_v2`. |
|
trained_betas (`np.ndarray`, optional): |
|
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. |
|
variance_type (`str`): |
|
options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, |
|
`fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. |
|
clip_sample (`bool`, default `True`): |
|
option to clip predicted sample between -1 and 1 for numerical stability. |
|
prediction_type (`str`, default `epsilon`, optional): |
|
prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion |
|
process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 |
|
https://imagen.research.google/video/paper.pdf) |
|
""" |
|
|
|
_compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy() |
|
_deprecated_kwargs = ["predict_epsilon"] |
|
order = 1 |
|
|
|
@register_to_config |
|
def __init__( |
|
self, |
|
num_train_timesteps: int = 1000, |
|
beta_start: float = 0.0001, |
|
beta_end: float = 0.02, |
|
beta_schedule: str = "linear", |
|
trained_betas: Optional[Union[np.ndarray, List[float]]] = None, |
|
variance_type: str = "fixed_small", |
|
clip_sample: bool = True, |
|
prediction_type: str = "epsilon", |
|
**kwargs, |
|
): |
|
message = ( |
|
"Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" |
|
" DDPMScheduler.from_pretrained(<model_id>, prediction_type='epsilon')`." |
|
) |
|
predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs) |
|
if predict_epsilon is not None: |
|
self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample") |
|
if trained_betas is not None: |
|
self.betas = paddle.to_tensor(trained_betas, dtype="float32") |
|
elif beta_schedule == "linear": |
|
self.betas = paddle.linspace(beta_start, beta_end, num_train_timesteps, dtype="float32") |
|
elif beta_schedule == "scaled_linear": |
|
|
|
self.betas = paddle.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype="float32") ** 2 |
|
elif beta_schedule == "squaredcos_cap_v2": |
|
|
|
self.betas = betas_for_alpha_bar(num_train_timesteps) |
|
elif beta_schedule == "sigmoid": |
|
|
|
betas = paddle.linspace(-6, 6, num_train_timesteps) |
|
self.betas = F.sigmoid(betas) * (beta_end - beta_start) + beta_start |
|
else: |
|
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") |
|
|
|
self.alphas = 1.0 - self.betas |
|
self.alphas_cumprod = paddle.cumprod(self.alphas, 0) |
|
self.one = paddle.to_tensor(1.0) |
|
|
|
|
|
self.init_noise_sigma = 1.0 |
|
|
|
|
|
self.num_inference_steps = None |
|
self.timesteps = paddle.to_tensor(np.arange(0, num_train_timesteps)[::-1].copy()) |
|
|
|
self.variance_type = variance_type |
|
|
|
def scale_model_input(self, sample: paddle.Tensor, timestep: Optional[int] = None) -> paddle.Tensor: |
|
""" |
|
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the |
|
current timestep. |
|
|
|
Args: |
|
sample (`paddle.Tensor`): input sample |
|
timestep (`int`, optional): current timestep |
|
|
|
Returns: |
|
`paddle.Tensor`: scaled input sample |
|
""" |
|
return sample |
|
|
|
def set_timesteps(self, num_inference_steps: int): |
|
""" |
|
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. |
|
|
|
Args: |
|
num_inference_steps (`int`): |
|
the number of diffusion steps used when generating samples with a pre-trained model. |
|
""" |
|
num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps) |
|
self.num_inference_steps = num_inference_steps |
|
timesteps = np.arange( |
|
0, self.config.num_train_timesteps, self.config.num_train_timesteps // self.num_inference_steps |
|
)[::-1].copy() |
|
self.timesteps = paddle.to_tensor(timesteps) |
|
|
|
def _get_variance(self, t, predicted_variance=None, variance_type=None): |
|
alpha_prod_t = self.alphas_cumprod[t] |
|
alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one |
|
|
|
|
|
|
|
|
|
variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t] |
|
|
|
if variance_type is None: |
|
variance_type = self.config.variance_type |
|
|
|
|
|
if variance_type == "fixed_small": |
|
variance = paddle.clip(variance, min=1e-20) |
|
|
|
elif variance_type == "fixed_small_log": |
|
variance = paddle.log(paddle.clip(variance, min=1e-20)) |
|
variance = paddle.exp(0.5 * variance) |
|
elif variance_type == "fixed_large": |
|
variance = self.betas[t] |
|
elif variance_type == "fixed_large_log": |
|
|
|
variance = paddle.log(self.betas[t]) |
|
elif variance_type == "learned": |
|
return predicted_variance |
|
elif variance_type == "learned_range": |
|
min_log = variance |
|
max_log = self.betas[t] |
|
frac = (predicted_variance + 1) / 2 |
|
variance = frac * max_log + (1 - frac) * min_log |
|
|
|
return variance |
|
|
|
def step( |
|
self, |
|
model_output: paddle.Tensor, |
|
timestep: int, |
|
sample: paddle.Tensor, |
|
generator=None, |
|
return_dict: bool = True, |
|
**kwargs, |
|
) -> Union[DDPMSchedulerOutput, Tuple]: |
|
""" |
|
Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion |
|
process from the learned model outputs (most often the predicted noise). |
|
|
|
Args: |
|
model_output (`paddle.Tensor`): direct output from learned diffusion model. |
|
timestep (`int`): current discrete timestep in the diffusion chain. |
|
sample (`paddle.Tensor`): |
|
current instance of sample being created by diffusion process. |
|
generator: random number generator. |
|
return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class |
|
|
|
Returns: |
|
[`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`: |
|
[`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When |
|
returning a tuple, the first element is the sample tensor. |
|
|
|
""" |
|
message = ( |
|
"Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" |
|
" DDPMScheduler.from_pretrained(<model_id>, prediction_type='epsilon')`." |
|
) |
|
predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs) |
|
if predict_epsilon is not None: |
|
new_config = dict(self.config) |
|
new_config["prediction_type"] = "epsilon" if predict_epsilon else "sample" |
|
self._internal_dict = FrozenDict(new_config) |
|
|
|
t = timestep |
|
|
|
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: |
|
model_output, predicted_variance = paddle.split(model_output, sample.shape[1], axis=1) |
|
else: |
|
predicted_variance = None |
|
|
|
|
|
alpha_prod_t = self.alphas_cumprod[t] |
|
alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one |
|
beta_prod_t = 1 - alpha_prod_t |
|
beta_prod_t_prev = 1 - alpha_prod_t_prev |
|
|
|
|
|
|
|
if self.config.prediction_type == "epsilon": |
|
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) |
|
elif self.config.prediction_type == "sample": |
|
pred_original_sample = model_output |
|
elif self.config.prediction_type == "v_prediction": |
|
pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output |
|
else: |
|
raise ValueError( |
|
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" |
|
" `v_prediction` for the DDPMScheduler." |
|
) |
|
|
|
|
|
if self.config.clip_sample: |
|
pred_original_sample = paddle.clip(pred_original_sample, -1, 1) |
|
|
|
|
|
|
|
pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t |
|
current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t |
|
|
|
|
|
|
|
pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample |
|
|
|
|
|
variance = 0 |
|
if t > 0: |
|
variance_noise = paddle.randn(model_output.shape, generator=generator, dtype=model_output.dtype) |
|
if self.variance_type == "fixed_small_log": |
|
variance = self._get_variance(t, predicted_variance=predicted_variance) * variance_noise |
|
else: |
|
variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * variance_noise |
|
|
|
pred_prev_sample = pred_prev_sample + variance |
|
|
|
if not return_dict: |
|
return (pred_prev_sample,) |
|
|
|
return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) |
|
|
|
def add_noise( |
|
self, |
|
original_samples: paddle.Tensor, |
|
noise: paddle.Tensor, |
|
timesteps: paddle.Tensor, |
|
) -> paddle.Tensor: |
|
|
|
self.alphas_cumprod = self.alphas_cumprod.cast(original_samples.dtype) |
|
|
|
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5 |
|
sqrt_alpha_prod = sqrt_alpha_prod.flatten() |
|
while len(sqrt_alpha_prod.shape) < len(original_samples.shape): |
|
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) |
|
|
|
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5 |
|
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() |
|
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): |
|
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) |
|
|
|
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise |
|
return noisy_samples |
|
|
|
def get_velocity(self, sample: paddle.Tensor, noise: paddle.Tensor, timesteps: paddle.Tensor) -> paddle.Tensor: |
|
|
|
self.alphas_cumprod = self.alphas_cumprod.cast(sample.dtype) |
|
|
|
sqrt_alpha_prod = self.alphas_cumprod[timesteps] ** 0.5 |
|
sqrt_alpha_prod = sqrt_alpha_prod.flatten() |
|
while len(sqrt_alpha_prod.shape) < len(sample.shape): |
|
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) |
|
|
|
sqrt_one_minus_alpha_prod = (1 - self.alphas_cumprod[timesteps]) ** 0.5 |
|
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() |
|
while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): |
|
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) |
|
|
|
velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample |
|
return velocity |
|
|
|
def __len__(self): |
|
return self.config.num_train_timesteps |
|
|