Spaces:
Sleeping
Sleeping
""" | |
wild mixture of | |
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py | |
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py | |
https://github.com/CompVis/taming-transformers | |
-- merci | |
""" | |
import sys | |
import os | |
import torch | |
import torch.nn as nn | |
import numpy as np | |
from contextlib import contextmanager | |
from functools import partial | |
from tqdm import tqdm | |
from audioldm.utils import exists, default, count_params, instantiate_from_config | |
from audioldm.latent_diffusion.ema import LitEma | |
from audioldm.latent_diffusion.util import ( | |
make_beta_schedule, | |
extract_into_tensor, | |
noise_like, | |
) | |
import soundfile as sf | |
import os | |
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} | |
def disabled_train(self, mode=True): | |
"""Overwrite model.train with this function to make sure train/eval mode | |
does not change anymore.""" | |
return self | |
def uniform_on_device(r1, r2, shape, device): | |
return (r1 - r2) * torch.rand(*shape, device=device) + r2 | |
class DiffusionWrapper(nn.Module): | |
def __init__(self, diff_model_config, conditioning_key): | |
super().__init__() | |
self.diffusion_model = instantiate_from_config(diff_model_config) | |
self.conditioning_key = conditioning_key | |
assert self.conditioning_key in [ | |
None, | |
"concat", | |
"crossattn", | |
"hybrid", | |
"adm", | |
"film", | |
] | |
def forward( | |
self, x, t, c_concat: list = None, c_crossattn: list = None, c_film: list = None | |
): | |
x = x.contiguous() | |
t = t.contiguous() | |
if self.conditioning_key is None: | |
out = self.diffusion_model(x, t) | |
elif self.conditioning_key == "concat": | |
xc = torch.cat([x] + c_concat, dim=1) | |
out = self.diffusion_model(xc, t) | |
elif self.conditioning_key == "crossattn": | |
cc = torch.cat(c_crossattn, 1) | |
out = self.diffusion_model(x, t, context=cc) | |
elif self.conditioning_key == "hybrid": | |
xc = torch.cat([x] + c_concat, dim=1) | |
cc = torch.cat(c_crossattn, 1) | |
out = self.diffusion_model(xc, t, context=cc) | |
elif ( | |
self.conditioning_key == "film" | |
): # The condition is assumed to be a global token, which wil pass through a linear layer and added with the time embedding for the FILM | |
cc = c_film[0].squeeze(1) # only has one token | |
out = self.diffusion_model(x, t, y=cc) | |
elif self.conditioning_key == "adm": | |
cc = c_crossattn[0] | |
out = self.diffusion_model(x, t, y=cc) | |
else: | |
raise NotImplementedError() | |
return out | |
class DDPM(nn.Module): | |
# classic DDPM with Gaussian diffusion, in image space | |
def __init__( | |
self, | |
unet_config, | |
timesteps=1000, | |
beta_schedule="linear", | |
loss_type="l2", | |
ckpt_path=None, | |
ignore_keys=[], | |
load_only_unet=False, | |
monitor="val/loss", | |
use_ema=True, | |
first_stage_key="image", | |
latent_t_size=256, | |
latent_f_size=16, | |
channels=3, | |
log_every_t=100, | |
clip_denoised=True, | |
linear_start=1e-4, | |
linear_end=2e-2, | |
cosine_s=8e-3, | |
given_betas=None, | |
original_elbo_weight=0.0, | |
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta | |
l_simple_weight=1.0, | |
conditioning_key=None, | |
parameterization="eps", # all assuming fixed variance schedules | |
scheduler_config=None, | |
use_positional_encodings=False, | |
learn_logvar=False, | |
logvar_init=0.0, | |
): | |
super().__init__() | |
assert parameterization in [ | |
"eps", | |
"x0", | |
], 'currently only supporting "eps" and "x0"' | |
self.parameterization = parameterization | |
self.state = None | |
# print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") | |
self.cond_stage_model = None | |
self.clip_denoised = clip_denoised | |
self.log_every_t = log_every_t | |
self.first_stage_key = first_stage_key | |
self.latent_t_size = latent_t_size | |
self.latent_f_size = latent_f_size | |
self.channels = channels | |
self.use_positional_encodings = use_positional_encodings | |
self.model = DiffusionWrapper(unet_config, conditioning_key) | |
count_params(self.model, verbose=True) | |
self.use_ema = use_ema | |
if self.use_ema: | |
self.model_ema = LitEma(self.model) | |
# print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") | |
self.use_scheduler = scheduler_config is not None | |
if self.use_scheduler: | |
self.scheduler_config = scheduler_config | |
self.v_posterior = v_posterior | |
self.original_elbo_weight = original_elbo_weight | |
self.l_simple_weight = l_simple_weight | |
if monitor is not None: | |
self.monitor = monitor | |
self.register_schedule( | |
given_betas=given_betas, | |
beta_schedule=beta_schedule, | |
timesteps=timesteps, | |
linear_start=linear_start, | |
linear_end=linear_end, | |
cosine_s=cosine_s, | |
) | |
self.loss_type = loss_type | |
self.learn_logvar = learn_logvar | |
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) | |
if self.learn_logvar: | |
self.logvar = nn.Parameter(self.logvar, requires_grad=True) | |
else: | |
self.logvar = nn.Parameter(self.logvar, requires_grad=False) | |
self.logger_save_dir = None | |
self.logger_project = None | |
self.logger_version = None | |
self.label_indices_total = None | |
# To avoid the system cannot find metric value for checkpoint | |
self.metrics_buffer = { | |
"val/kullback_leibler_divergence_sigmoid": 15.0, | |
"val/kullback_leibler_divergence_softmax": 10.0, | |
"val/psnr": 0.0, | |
"val/ssim": 0.0, | |
"val/inception_score_mean": 1.0, | |
"val/inception_score_std": 0.0, | |
"val/kernel_inception_distance_mean": 0.0, | |
"val/kernel_inception_distance_std": 0.0, | |
"val/frechet_inception_distance": 133.0, | |
"val/frechet_audio_distance": 32.0, | |
} | |
self.initial_learning_rate = None | |
def get_log_dir(self): | |
if ( | |
self.logger_save_dir is None | |
and self.logger_project is None | |
and self.logger_version is None | |
): | |
return os.path.join( | |
self.logger.save_dir, self.logger._project, self.logger.version | |
) | |
else: | |
return os.path.join( | |
self.logger_save_dir, self.logger_project, self.logger_version | |
) | |
def set_log_dir(self, save_dir, project, version): | |
self.logger_save_dir = save_dir | |
self.logger_project = project | |
self.logger_version = version | |
def register_schedule( | |
self, | |
given_betas=None, | |
beta_schedule="linear", | |
timesteps=1000, | |
linear_start=1e-4, | |
linear_end=2e-2, | |
cosine_s=8e-3, | |
): | |
if exists(given_betas): | |
betas = given_betas | |
else: | |
betas = make_beta_schedule( | |
beta_schedule, | |
timesteps, | |
linear_start=linear_start, | |
linear_end=linear_end, | |
cosine_s=cosine_s, | |
) | |
alphas = 1.0 - betas | |
alphas_cumprod = np.cumprod(alphas, axis=0) | |
alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) | |
(timesteps,) = betas.shape | |
self.num_timesteps = int(timesteps) | |
self.linear_start = linear_start | |
self.linear_end = linear_end | |
assert ( | |
alphas_cumprod.shape[0] == self.num_timesteps | |
), "alphas have to be defined for each timestep" | |
to_torch = partial(torch.tensor, dtype=torch.float32) | |
self.register_buffer("betas", to_torch(betas)) | |
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) | |
self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) | |
# calculations for diffusion q(x_t | x_{t-1}) and others | |
self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) | |
self.register_buffer( | |
"sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) | |
) | |
self.register_buffer( | |
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) | |
) | |
self.register_buffer( | |
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) | |
) | |
self.register_buffer( | |
"sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) | |
) | |
# calculations for posterior q(x_{t-1} | x_t, x_0) | |
posterior_variance = (1 - self.v_posterior) * betas * ( | |
1.0 - alphas_cumprod_prev | |
) / (1.0 - alphas_cumprod) + self.v_posterior * betas | |
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) | |
self.register_buffer("posterior_variance", to_torch(posterior_variance)) | |
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain | |
self.register_buffer( | |
"posterior_log_variance_clipped", | |
to_torch(np.log(np.maximum(posterior_variance, 1e-20))), | |
) | |
self.register_buffer( | |
"posterior_mean_coef1", | |
to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), | |
) | |
self.register_buffer( | |
"posterior_mean_coef2", | |
to_torch( | |
(1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) | |
), | |
) | |
if self.parameterization == "eps": | |
lvlb_weights = self.betas**2 / ( | |
2 | |
* self.posterior_variance | |
* to_torch(alphas) | |
* (1 - self.alphas_cumprod) | |
) | |
elif self.parameterization == "x0": | |
lvlb_weights = ( | |
0.5 | |
* np.sqrt(torch.Tensor(alphas_cumprod)) | |
/ (2.0 * 1 - torch.Tensor(alphas_cumprod)) | |
) | |
else: | |
raise NotImplementedError("mu not supported") | |
# TODO how to choose this term | |
lvlb_weights[0] = lvlb_weights[1] | |
self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) | |
assert not torch.isnan(self.lvlb_weights).all() | |
def ema_scope(self, context=None): | |
if self.use_ema: | |
self.model_ema.store(self.model.parameters()) | |
self.model_ema.copy_to(self.model) | |
if context is not None: | |
# print(f"{context}: Switched to EMA weights") | |
pass | |
try: | |
yield None | |
finally: | |
if self.use_ema: | |
self.model_ema.restore(self.model.parameters()) | |
if context is not None: | |
# print(f"{context}: Restored training weights") | |
pass | |
def q_mean_variance(self, x_start, t): | |
""" | |
Get the distribution q(x_t | x_0). | |
:param x_start: the [N x C x ...] tensor of noiseless inputs. | |
:param t: the number of diffusion steps (minus 1). Here, 0 means one step. | |
:return: A tuple (mean, variance, log_variance), all of x_start's shape. | |
""" | |
mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start | |
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) | |
log_variance = extract_into_tensor( | |
self.log_one_minus_alphas_cumprod, t, x_start.shape | |
) | |
return mean, variance, log_variance | |
def predict_start_from_noise(self, x_t, t, noise): | |
return ( | |
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t | |
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) | |
* noise | |
) | |
def q_posterior(self, x_start, x_t, t): | |
posterior_mean = ( | |
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start | |
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t | |
) | |
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) | |
posterior_log_variance_clipped = extract_into_tensor( | |
self.posterior_log_variance_clipped, t, x_t.shape | |
) | |
return posterior_mean, posterior_variance, posterior_log_variance_clipped | |
def p_mean_variance(self, x, t, clip_denoised: bool): | |
model_out = self.model(x, t) | |
if self.parameterization == "eps": | |
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) | |
elif self.parameterization == "x0": | |
x_recon = model_out | |
if clip_denoised: | |
x_recon.clamp_(-1.0, 1.0) | |
model_mean, posterior_variance, posterior_log_variance = self.q_posterior( | |
x_start=x_recon, x_t=x, t=t | |
) | |
return model_mean, posterior_variance, posterior_log_variance | |
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): | |
b, *_, device = *x.shape, x.device | |
model_mean, _, model_log_variance = self.p_mean_variance( | |
x=x, t=t, clip_denoised=clip_denoised | |
) | |
noise = noise_like(x.shape, device, repeat_noise) | |
# no noise when t == 0 | |
nonzero_mask = ( | |
(1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))).contiguous() | |
) | |
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise | |
def p_sample_loop(self, shape, return_intermediates=False): | |
device = self.betas.device | |
b = shape[0] | |
img = torch.randn(shape, device=device) | |
intermediates = [img] | |
for i in tqdm( | |
reversed(range(0, self.num_timesteps)), | |
desc="Sampling t", | |
total=self.num_timesteps, | |
): | |
img = self.p_sample( | |
img, | |
torch.full((b,), i, device=device, dtype=torch.long), | |
clip_denoised=self.clip_denoised, | |
) | |
if i % self.log_every_t == 0 or i == self.num_timesteps - 1: | |
intermediates.append(img) | |
if return_intermediates: | |
return img, intermediates | |
return img | |
def sample(self, batch_size=16, return_intermediates=False): | |
shape = (batch_size, channels, self.latent_t_size, self.latent_f_size) | |
channels = self.channels | |
return self.p_sample_loop(shape, return_intermediates=return_intermediates) | |
def q_sample(self, x_start, t, noise=None): | |
noise = default(noise, lambda: torch.randn_like(x_start)) | |
return ( | |
extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start | |
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) | |
* noise | |
) | |
def forward(self, x, *args, **kwargs): | |
t = torch.randint( | |
0, self.num_timesteps, (x.shape[0],), device=self.device | |
).long() | |
return self.p_losses(x, t, *args, **kwargs) | |
def get_input(self, batch, k): | |
# fbank, log_magnitudes_stft, label_indices, fname, waveform, clip_label, text = batch | |
fbank, log_magnitudes_stft, label_indices, fname, waveform, text = batch | |
ret = {} | |
ret["fbank"] = ( | |
fbank.unsqueeze(1).to(memory_format=torch.contiguous_format).float() | |
) | |
ret["stft"] = log_magnitudes_stft.to( | |
memory_format=torch.contiguous_format | |
).float() | |
# ret["clip_label"] = clip_label.to(memory_format=torch.contiguous_format).float() | |
ret["waveform"] = waveform.to(memory_format=torch.contiguous_format).float() | |
ret["text"] = list(text) | |
ret["fname"] = fname | |
return ret[k] | |