|
from functools import partial
|
|
|
|
import numpy as np
|
|
import torch
|
|
import torch.nn as nn
|
|
|
|
from .positional_embedding import offset_sequence_embedding
|
|
from .positional_embedding import position_sequence_embedding
|
|
from .positional_embedding import timestep_embedding
|
|
|
|
|
|
def modulate(x, shift, scale):
|
|
return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TimestepEmbedder(nn.Module):
|
|
"""
|
|
Embeds scalar timesteps into vector representations.
|
|
"""
|
|
|
|
def __init__(self, hidden_size, frequency_embedding_size=256):
|
|
super().__init__()
|
|
self.mlp = nn.Sequential(
|
|
nn.Linear(frequency_embedding_size, hidden_size, bias=True),
|
|
nn.SiLU(),
|
|
nn.Linear(hidden_size, hidden_size, bias=True),
|
|
)
|
|
self.frequency_embedding_size = frequency_embedding_size
|
|
|
|
def forward(self, t):
|
|
t_freq = timestep_embedding(t, self.frequency_embedding_size)
|
|
t_emb = self.mlp(t_freq)
|
|
return t_emb
|
|
|
|
|
|
class LabelEmbedder(nn.Module):
|
|
"""
|
|
Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
|
|
"""
|
|
|
|
def __init__(self, num_classes, hidden_size, dropout_prob):
|
|
super().__init__()
|
|
use_cfg_embedding = dropout_prob > 0
|
|
self.embedding_table = nn.Embedding(
|
|
num_classes + use_cfg_embedding,
|
|
hidden_size,
|
|
)
|
|
self.num_classes = num_classes
|
|
self.dropout_prob = dropout_prob
|
|
|
|
def token_drop(self, labels, force_drop_ids=None):
|
|
"""
|
|
Drops labels to enable classifier-free guidance.
|
|
"""
|
|
if force_drop_ids is None:
|
|
drop_ids = (
|
|
torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
|
|
)
|
|
else:
|
|
drop_ids = force_drop_ids == 1
|
|
labels = torch.where(drop_ids, self.num_classes, labels)
|
|
return labels
|
|
|
|
def forward(self, labels, train, force_drop_ids=None):
|
|
use_dropout = self.dropout_prob > 0
|
|
if (train and use_dropout) or (force_drop_ids is not None):
|
|
labels = self.token_drop(labels, force_drop_ids)
|
|
embeddings = self.embedding_table(labels)
|
|
return embeddings
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Mlp(nn.Module):
|
|
"""MLP as used in Vision Transformer, MLP-Mixer and related networks"""
|
|
|
|
def __init__(
|
|
self,
|
|
in_features,
|
|
hidden_features=None,
|
|
out_features=None,
|
|
act_layer=nn.GELU,
|
|
norm_layer=None,
|
|
bias=True,
|
|
drop=0.0,
|
|
use_conv=False,
|
|
):
|
|
super().__init__()
|
|
out_features = out_features or in_features
|
|
hidden_features = hidden_features or in_features
|
|
bias = (bias, bias)
|
|
drop_probs = (drop, drop)
|
|
linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear
|
|
|
|
self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0])
|
|
self.act = act_layer()
|
|
self.drop1 = nn.Dropout(drop_probs[0])
|
|
self.norm = (
|
|
norm_layer(hidden_features) if norm_layer is not None else nn.Identity()
|
|
)
|
|
self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1])
|
|
self.drop2 = nn.Dropout(drop_probs[1])
|
|
|
|
def forward(self, x):
|
|
x = self.fc1(x)
|
|
x = self.act(x)
|
|
x = self.drop1(x)
|
|
x = self.norm(x)
|
|
x = self.fc2(x)
|
|
x = self.drop2(x)
|
|
return x
|
|
|
|
|
|
class DiTBlock(nn.Module):
|
|
"""
|
|
A DiT block with adaptive layer norm zero (adaLN-Zero) conditioning.
|
|
"""
|
|
|
|
def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, **block_kwargs):
|
|
super().__init__()
|
|
self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
|
self.attn = nn.MultiheadAttention(
|
|
hidden_size,
|
|
num_heads=num_heads,
|
|
batch_first=True,
|
|
**block_kwargs,
|
|
)
|
|
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
|
mlp_hidden_dim = int(hidden_size * mlp_ratio)
|
|
approx_gelu = lambda: nn.GELU(approximate="tanh")
|
|
|
|
self.mlp = Mlp(
|
|
in_features=hidden_size,
|
|
hidden_features=mlp_hidden_dim,
|
|
act_layer=approx_gelu,
|
|
drop=0,
|
|
)
|
|
self.adaLN_modulation = nn.Sequential(
|
|
nn.SiLU(),
|
|
nn.Linear(hidden_size, 6 * hidden_size, bias=True),
|
|
)
|
|
|
|
def forward(self, x, c, attn_mask=None):
|
|
(
|
|
shift_msa,
|
|
scale_msa,
|
|
gate_msa,
|
|
shift_mlp,
|
|
scale_mlp,
|
|
gate_mlp,
|
|
) = self.adaLN_modulation(c).chunk(6, dim=1)
|
|
modulated = modulate(self.norm1(x), shift_msa, scale_msa)
|
|
x = (
|
|
x
|
|
+ gate_msa.unsqueeze(1)
|
|
* self.attn(
|
|
modulated,
|
|
modulated,
|
|
modulated,
|
|
need_weights=False,
|
|
attn_mask=attn_mask,
|
|
)[0]
|
|
)
|
|
x = x + gate_mlp.unsqueeze(1) * self.mlp(
|
|
modulate(self.norm2(x), shift_mlp, scale_mlp),
|
|
)
|
|
return x
|
|
|
|
|
|
class FinalLayer(nn.Module):
|
|
"""
|
|
The final layer of DiT.
|
|
"""
|
|
|
|
def __init__(self, hidden_size, out_channels):
|
|
super().__init__()
|
|
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
|
|
self.linear = nn.Linear(hidden_size, out_channels, bias=True)
|
|
self.adaLN_modulation = nn.Sequential(
|
|
nn.SiLU(),
|
|
nn.Linear(hidden_size, 2 * hidden_size, bias=True),
|
|
)
|
|
|
|
def forward(self, x, c):
|
|
shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
|
|
x = modulate(self.norm_final(x), shift, scale)
|
|
x = self.linear(x)
|
|
return x
|
|
|
|
|
|
class FirstLayer(nn.Module):
|
|
"""
|
|
Embeds scalar positions into vector representation and concatenates context.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
hidden_size,
|
|
context_size,
|
|
in_channels,
|
|
frequency_embedding_size=128,
|
|
):
|
|
super().__init__()
|
|
self.mlp = nn.Sequential(
|
|
nn.Linear(
|
|
in_channels * frequency_embedding_size
|
|
+ frequency_embedding_size
|
|
+ context_size,
|
|
hidden_size,
|
|
bias=True,
|
|
),
|
|
)
|
|
self.frequency_embedding_size = frequency_embedding_size
|
|
self.playfield_size = nn.Parameter(
|
|
torch.tensor((512, 384), dtype=torch.float32),
|
|
requires_grad=False,
|
|
)
|
|
|
|
def forward(self, x, o, c):
|
|
x_freq = position_sequence_embedding(
|
|
x * self.playfield_size,
|
|
self.frequency_embedding_size,
|
|
)
|
|
o_freq = offset_sequence_embedding(o / 10, self.frequency_embedding_size)
|
|
xoc = torch.concatenate((x_freq, o_freq, c), -1)
|
|
xoc_emb = self.mlp(xoc)
|
|
return xoc_emb
|
|
|
|
|
|
class DiT(nn.Module):
|
|
"""
|
|
Diffusion model with a Transformer backbone.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
in_channels=2,
|
|
context_size=142,
|
|
hidden_size=1152,
|
|
depth=28,
|
|
num_heads=16,
|
|
mlp_ratio=4.0,
|
|
class_dropout_prob=0.1,
|
|
num_classes=1000,
|
|
learn_sigma=True,
|
|
):
|
|
super().__init__()
|
|
self.learn_sigma = learn_sigma
|
|
self.in_channels = in_channels
|
|
self.context_size = context_size
|
|
self.out_channels = in_channels * 2 if learn_sigma else in_channels
|
|
self.num_heads = num_heads
|
|
|
|
self.xoc_embedder = FirstLayer(hidden_size, context_size, in_channels)
|
|
self.t_embedder = TimestepEmbedder(hidden_size)
|
|
self.y_embedder = LabelEmbedder(num_classes, hidden_size, class_dropout_prob)
|
|
|
|
self.blocks = nn.ModuleList(
|
|
[
|
|
DiTBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio)
|
|
for _ in range(depth)
|
|
],
|
|
)
|
|
self.final_layer = FinalLayer(hidden_size, self.out_channels)
|
|
self.initialize_weights()
|
|
|
|
def initialize_weights(self):
|
|
|
|
def _basic_init(module):
|
|
if isinstance(module, nn.Linear):
|
|
torch.nn.init.xavier_uniform_(module.weight)
|
|
if module.bias is not None:
|
|
nn.init.constant_(module.bias, 0)
|
|
|
|
self.apply(_basic_init)
|
|
|
|
|
|
nn.init.normal_(self.xoc_embedder.mlp[0].weight, std=0.02)
|
|
|
|
|
|
nn.init.normal_(self.y_embedder.embedding_table.weight, std=0.02)
|
|
|
|
|
|
nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
|
|
nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
|
|
|
|
|
|
for block in self.blocks:
|
|
nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
|
|
nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
|
|
|
|
|
|
nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0)
|
|
nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0)
|
|
nn.init.constant_(self.final_layer.linear.weight, 0)
|
|
nn.init.constant_(self.final_layer.linear.bias, 0)
|
|
|
|
def forward(self, x, t, o, c, y, attn_mask=None):
|
|
"""
|
|
Forward pass of DiT.
|
|
x: (N, C, T) tensor of sequence inputs
|
|
t: (N) tensor of diffusion timesteps
|
|
o: (N, T) tensor of sequence offsets in milliseconds
|
|
c: (N, E, T) tensor of sequence context
|
|
y: (N) tensor of class labels
|
|
"""
|
|
x = torch.swapaxes(x, 1, 2)
|
|
c = torch.swapaxes(c, 1, 2)
|
|
x = self.xoc_embedder(x, o, c)
|
|
t = self.t_embedder(t)
|
|
y = self.y_embedder(y, self.training)
|
|
b = t + y
|
|
for block in self.blocks:
|
|
x = block(x, b, attn_mask)
|
|
x = self.final_layer(x, b)
|
|
x = torch.swapaxes(x, 1, 2)
|
|
return x
|
|
|
|
def forward_with_cfg(self, x, t, o, c, y, cfg_scale, attn_mask=None):
|
|
"""
|
|
Forward pass of DiT, but also batches the unconditional forward pass for classifier-free guidance.
|
|
"""
|
|
|
|
half = x[: len(x) // 2]
|
|
combined = torch.cat([half, half], dim=0)
|
|
model_out = self.forward(combined, t, o, c, y, attn_mask)
|
|
|
|
|
|
|
|
eps, rest = model_out[:, : self.in_channels], model_out[:, self.in_channels :]
|
|
|
|
cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)
|
|
half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)
|
|
eps = torch.cat([half_eps, half_eps], dim=0)
|
|
return torch.cat([eps, rest], dim=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0):
|
|
"""
|
|
grid_size: int of the grid height and width
|
|
return:
|
|
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
|
"""
|
|
grid_h = np.arange(grid_size, dtype=np.float32)
|
|
grid_w = np.arange(grid_size, dtype=np.float32)
|
|
grid = np.meshgrid(grid_w, grid_h)
|
|
grid = np.stack(grid, axis=0)
|
|
|
|
grid = grid.reshape([2, 1, grid_size, grid_size])
|
|
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
|
|
if cls_token and extra_tokens > 0:
|
|
pos_embed = np.concatenate(
|
|
[np.zeros([extra_tokens, embed_dim]), pos_embed],
|
|
axis=0,
|
|
)
|
|
return pos_embed
|
|
|
|
|
|
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
|
|
assert embed_dim % 2 == 0
|
|
|
|
|
|
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0])
|
|
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1])
|
|
|
|
emb = np.concatenate([emb_h, emb_w], axis=1)
|
|
return emb
|
|
|
|
|
|
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
|
|
"""
|
|
embed_dim: output dimension for each position
|
|
pos: a list of positions to be encoded: size (M,)
|
|
out: (M, D)
|
|
"""
|
|
assert embed_dim % 2 == 0
|
|
omega = np.arange(embed_dim // 2, dtype=np.float64)
|
|
omega /= embed_dim / 2.0
|
|
omega = 1.0 / 10000**omega
|
|
|
|
pos = pos.reshape(-1)
|
|
out = np.einsum("m,d->md", pos, omega)
|
|
|
|
emb_sin = np.sin(out)
|
|
emb_cos = np.cos(out)
|
|
|
|
emb = np.concatenate([emb_sin, emb_cos], axis=1)
|
|
return emb
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def DiT_XL(**kwargs: dict) -> DiT:
|
|
return DiT(depth=28, hidden_size=1152, num_heads=16, **kwargs)
|
|
|
|
|
|
def DiT_L(**kwargs: dict) -> DiT:
|
|
return DiT(depth=24, hidden_size=1024, num_heads=16, **kwargs)
|
|
|
|
|
|
def DiT_B(**kwargs: dict) -> DiT:
|
|
return DiT(depth=12, hidden_size=768, num_heads=12, **kwargs)
|
|
|
|
|
|
def DiT_S(**kwargs: dict) -> DiT:
|
|
return DiT(depth=12, hidden_size=384, num_heads=6, **kwargs)
|
|
|
|
|
|
DiT_models = {
|
|
"DiT-XL": DiT_XL,
|
|
"DiT-L": DiT_L,
|
|
"DiT-B": DiT_B,
|
|
"DiT-S": DiT_S,
|
|
}
|
|
|