|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from typing import Optional, Tuple |
|
|
|
import torch |
|
import torch.nn.functional as F |
|
from torch import nn |
|
|
|
import xformers |
|
import xformers.ops |
|
|
|
|
|
class CrossAttention(nn.Module): |
|
r""" |
|
A cross attention layer. |
|
|
|
Parameters: |
|
query_dim (`int`): The number of channels in the query. |
|
cross_attention_dim (`int`, *optional*): |
|
The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`. |
|
heads (`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention. |
|
dim_head (`int`, *optional*, defaults to 64): The number of channels in each head. |
|
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. |
|
bias (`bool`, *optional*, defaults to False): |
|
Set to `True` for the query, key, and value linear layers to contain a bias parameter. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
query_dim: int, |
|
cross_attention_dim: Optional[int] = None, |
|
heads: int = 8, |
|
dim_head: int = 64, |
|
dropout: float = 0.0, |
|
bias=False, |
|
upcast_attention: bool = False, |
|
upcast_softmax: bool = False, |
|
added_kv_proj_dim: Optional[int] = None, |
|
norm_num_groups: Optional[int] = None, |
|
): |
|
super().__init__() |
|
inner_dim = dim_head * heads |
|
cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim |
|
self.upcast_attention = upcast_attention |
|
self.upcast_softmax = upcast_softmax |
|
self.upcast_efficient_attention = False |
|
|
|
self.scale = dim_head**-0.5 |
|
|
|
self.heads = heads |
|
|
|
|
|
|
|
self.sliceable_head_dim = heads |
|
self._slice_size = None |
|
self._use_memory_efficient_attention_xformers = False |
|
self.added_kv_proj_dim = added_kv_proj_dim |
|
|
|
if norm_num_groups is not None: |
|
self.group_norm = nn.GroupNorm(num_channels=inner_dim, num_groups=norm_num_groups, eps=1e-5, affine=True) |
|
else: |
|
self.group_norm = None |
|
|
|
self.to_q = nn.Linear(query_dim, inner_dim, bias=bias) |
|
self.to_k = nn.Linear(cross_attention_dim, inner_dim, bias=bias) |
|
self.to_v = nn.Linear(cross_attention_dim, inner_dim, bias=bias) |
|
|
|
if self.added_kv_proj_dim is not None: |
|
self.add_k_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim) |
|
self.add_v_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim) |
|
|
|
self.to_out = nn.ModuleList([]) |
|
self.to_out.append(nn.Linear(inner_dim, query_dim)) |
|
self.to_out.append(nn.Dropout(dropout)) |
|
|
|
def reshape_heads_to_batch_dim(self, tensor): |
|
batch_size, seq_len, dim = tensor.shape |
|
head_size = self.heads |
|
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size).contiguous() |
|
tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size).contiguous() |
|
return tensor |
|
|
|
def reshape_heads_to_4d(self, tensor): |
|
batch_size, seq_len, dim = tensor.shape |
|
head_size = self.heads |
|
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size).contiguous() |
|
return tensor |
|
|
|
def reshape_batch_dim_to_heads(self, tensor): |
|
batch_size, seq_len, dim = tensor.shape |
|
head_size = self.heads |
|
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim).contiguous() |
|
tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size).contiguous() |
|
return tensor |
|
|
|
def reshape_4d_to_heads(self, tensor): |
|
batch_size, seq_len, head_size, dim = tensor.shape |
|
head_size = self.heads |
|
tensor = tensor.reshape(batch_size, seq_len, dim * head_size).contiguous() |
|
return tensor |
|
|
|
def set_attention_slice(self, slice_size): |
|
if slice_size is not None and slice_size > self.sliceable_head_dim: |
|
raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.") |
|
|
|
self._slice_size = slice_size |
|
|
|
def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None): |
|
batch_size, sequence_length, _ = hidden_states.shape |
|
|
|
encoder_hidden_states = encoder_hidden_states |
|
|
|
if self.group_norm is not None: |
|
hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2) |
|
|
|
query = self.to_q(hidden_states) |
|
dim = query.shape[-1] |
|
query = self.reshape_heads_to_batch_dim(query) |
|
|
|
if self.added_kv_proj_dim is not None: |
|
key = self.to_k(hidden_states) |
|
value = self.to_v(hidden_states) |
|
encoder_hidden_states_key_proj = self.add_k_proj(encoder_hidden_states) |
|
encoder_hidden_states_value_proj = self.add_v_proj(encoder_hidden_states) |
|
|
|
key = self.reshape_heads_to_batch_dim(key) |
|
value = self.reshape_heads_to_batch_dim(value) |
|
encoder_hidden_states_key_proj = self.reshape_heads_to_batch_dim(encoder_hidden_states_key_proj) |
|
encoder_hidden_states_value_proj = self.reshape_heads_to_batch_dim(encoder_hidden_states_value_proj) |
|
|
|
key = torch.concat([encoder_hidden_states_key_proj, key], dim=1) |
|
value = torch.concat([encoder_hidden_states_value_proj, value], dim=1) |
|
else: |
|
encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states |
|
key = self.to_k(encoder_hidden_states) |
|
value = self.to_v(encoder_hidden_states) |
|
|
|
key = self.reshape_heads_to_batch_dim(key) |
|
value = self.reshape_heads_to_batch_dim(value) |
|
|
|
if attention_mask is not None: |
|
if attention_mask.shape[-1] != query.shape[1]: |
|
target_length = query.shape[1] |
|
attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) |
|
attention_mask = attention_mask.repeat_interleave(self.heads, dim=0) |
|
|
|
|
|
if self._use_memory_efficient_attention_xformers: |
|
hidden_states = self._memory_efficient_attention_xformers(query, key, value, attention_mask) |
|
|
|
hidden_states = hidden_states.to(query.dtype) |
|
else: |
|
if self._slice_size is None or query.shape[0] // self._slice_size == 1: |
|
hidden_states = self._attention(query, key, value, attention_mask) |
|
else: |
|
hidden_states = self._sliced_attention(query, key, value, sequence_length, dim, attention_mask) |
|
|
|
|
|
hidden_states = self.to_out[0](hidden_states) |
|
|
|
|
|
hidden_states = self.to_out[1](hidden_states) |
|
return hidden_states |
|
|
|
def _attention(self, query, key, value, attention_mask=None): |
|
if self.upcast_attention: |
|
query = query.float() |
|
key = key.float() |
|
|
|
attention_scores = torch.baddbmm( |
|
torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device), |
|
query, |
|
key.transpose(-1, -2), |
|
beta=0, |
|
alpha=self.scale, |
|
) |
|
|
|
if attention_mask is not None: |
|
attention_scores = attention_scores + attention_mask |
|
|
|
if self.upcast_softmax: |
|
attention_scores = attention_scores.float() |
|
|
|
attention_probs = attention_scores.softmax(dim=-1) |
|
|
|
|
|
attention_probs = attention_probs.to(value.dtype) |
|
|
|
|
|
hidden_states = torch.bmm(attention_probs, value) |
|
|
|
|
|
hidden_states = self.reshape_batch_dim_to_heads(hidden_states) |
|
return hidden_states |
|
|
|
def _sliced_attention(self, query, key, value, sequence_length, dim, attention_mask): |
|
batch_size_attention = query.shape[0] |
|
hidden_states = torch.zeros( |
|
(batch_size_attention, sequence_length, dim // self.heads), device=query.device, dtype=query.dtype |
|
) |
|
slice_size = self._slice_size if self._slice_size is not None else hidden_states.shape[0] |
|
for i in range(hidden_states.shape[0] // slice_size): |
|
start_idx = i * slice_size |
|
end_idx = (i + 1) * slice_size |
|
|
|
query_slice = query[start_idx:end_idx] |
|
key_slice = key[start_idx:end_idx] |
|
|
|
if self.upcast_attention: |
|
query_slice = query_slice.float() |
|
key_slice = key_slice.float() |
|
|
|
attn_slice = torch.baddbmm( |
|
torch.empty(slice_size, query.shape[1], key.shape[1], dtype=query_slice.dtype, device=query.device), |
|
query_slice, |
|
key_slice.transpose(-1, -2), |
|
beta=0, |
|
alpha=self.scale, |
|
) |
|
|
|
if attention_mask is not None: |
|
attn_slice = attn_slice + attention_mask[start_idx:end_idx] |
|
|
|
if self.upcast_softmax: |
|
attn_slice = attn_slice.float() |
|
|
|
attn_slice = attn_slice.softmax(dim=-1) |
|
|
|
|
|
attn_slice = attn_slice.to(value.dtype) |
|
attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx]) |
|
|
|
hidden_states[start_idx:end_idx] = attn_slice |
|
|
|
|
|
hidden_states = self.reshape_batch_dim_to_heads(hidden_states) |
|
return hidden_states |
|
|
|
def _memory_efficient_attention_xformers(self, query, key, value, attention_mask): |
|
if self.upcast_efficient_attention: |
|
org_dtype = query.dtype |
|
query = query.float() |
|
key = key.float() |
|
value = value.float() |
|
if attention_mask is not None: |
|
attention_mask = attention_mask.float() |
|
hidden_states = self._memory_efficient_attention_split(query, key, value, attention_mask) |
|
|
|
if self.upcast_efficient_attention: |
|
hidden_states = hidden_states.to(org_dtype) |
|
|
|
hidden_states = self.reshape_4d_to_heads(hidden_states) |
|
return hidden_states |
|
|
|
|
|
|
|
|
|
def _memory_efficient_attention_split(self, query, key, value, attention_mask): |
|
batch_size = query.shape[0] |
|
max_batch_size = 65535 |
|
num_batches = (batch_size + max_batch_size - 1) // max_batch_size |
|
results = [] |
|
for i in range(num_batches): |
|
start_idx = i * max_batch_size |
|
end_idx = min((i + 1) * max_batch_size, batch_size) |
|
query_batch = query[start_idx:end_idx] |
|
key_batch = key[start_idx:end_idx] |
|
value_batch = value[start_idx:end_idx] |
|
if attention_mask is not None: |
|
attention_mask_batch = attention_mask[start_idx:end_idx] |
|
else: |
|
attention_mask_batch = None |
|
result = xformers.ops.memory_efficient_attention(query_batch, key_batch, value_batch, attn_bias=attention_mask_batch) |
|
results.append(result) |
|
full_result = torch.cat(results, dim=0) |
|
return full_result |
|
|
|
|
|
class FeedForward(nn.Module): |
|
r""" |
|
A feed-forward layer. |
|
|
|
Parameters: |
|
dim (`int`): The number of channels in the input. |
|
dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. |
|
mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. |
|
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. |
|
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
dim: int, |
|
dim_out: Optional[int] = None, |
|
mult: int = 4, |
|
dropout: float = 0.0, |
|
activation_fn: str = "geglu", |
|
): |
|
super().__init__() |
|
inner_dim = int(dim * mult) |
|
dim_out = dim_out if dim_out is not None else dim |
|
|
|
if activation_fn == "gelu": |
|
act_fn = GELU(dim, inner_dim) |
|
elif activation_fn == "geglu": |
|
act_fn = GEGLU(dim, inner_dim) |
|
elif activation_fn == "geglu-approximate": |
|
act_fn = ApproximateGELU(dim, inner_dim) |
|
|
|
self.net = nn.ModuleList([]) |
|
|
|
self.net.append(act_fn) |
|
|
|
self.net.append(nn.Dropout(dropout)) |
|
|
|
self.net.append(nn.Linear(inner_dim, dim_out)) |
|
|
|
def forward(self, hidden_states): |
|
for module in self.net: |
|
hidden_states = module(hidden_states) |
|
return hidden_states |
|
|
|
|
|
class GELU(nn.Module): |
|
r""" |
|
GELU activation function |
|
""" |
|
|
|
def __init__(self, dim_in: int, dim_out: int): |
|
super().__init__() |
|
self.proj = nn.Linear(dim_in, dim_out) |
|
|
|
def gelu(self, gate): |
|
if gate.device.type != "mps": |
|
return F.gelu(gate) |
|
|
|
return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype) |
|
|
|
def forward(self, hidden_states): |
|
hidden_states = self.proj(hidden_states) |
|
hidden_states = self.gelu(hidden_states) |
|
return hidden_states |
|
|
|
|
|
|
|
class GEGLU(nn.Module): |
|
r""" |
|
A variant of the gated linear unit activation function from https://arxiv.org/abs/2002.05202. |
|
|
|
Parameters: |
|
dim_in (`int`): The number of channels in the input. |
|
dim_out (`int`): The number of channels in the output. |
|
""" |
|
|
|
def __init__(self, dim_in: int, dim_out: int): |
|
super().__init__() |
|
self.proj = nn.Linear(dim_in, dim_out * 2) |
|
|
|
def gelu(self, gate): |
|
if gate.device.type != "mps": |
|
return F.gelu(gate) |
|
|
|
return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype) |
|
|
|
def forward(self, hidden_states): |
|
hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1) |
|
return hidden_states * self.gelu(gate) |
|
|
|
|
|
class ApproximateGELU(nn.Module): |
|
""" |
|
The approximate form of Gaussian Error Linear Unit (GELU) |
|
|
|
For more details, see section 2: https://arxiv.org/abs/1606.08415 |
|
""" |
|
|
|
def __init__(self, dim_in: int, dim_out: int): |
|
super().__init__() |
|
self.proj = nn.Linear(dim_in, dim_out) |
|
|
|
def forward(self, x): |
|
x = self.proj(x) |
|
return x * torch.sigmoid(1.702 * x) |
|
|
|
|
|
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0): |
|
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim)) |
|
t = torch.arange(end, device=freqs.device, dtype=torch.float32) |
|
freqs = torch.outer(t, freqs) |
|
freqs_cis = torch.polar(torch.ones_like(freqs), freqs) |
|
return freqs_cis |
|
|
|
|
|
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): |
|
ndim = x.ndim |
|
assert 0 <= 1 < ndim |
|
assert freqs_cis.shape == (x.shape[1], x.shape[-1]) |
|
shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] |
|
return freqs_cis.view(*shape) |
|
|
|
|
|
def apply_rotary_emb( |
|
xq: torch.Tensor, |
|
xk: torch.Tensor, |
|
freqs_cis: torch.Tensor, |
|
) -> Tuple[torch.Tensor, torch.Tensor]: |
|
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2).contiguous()) |
|
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2).contiguous()) |
|
freqs_cis = reshape_for_broadcast(freqs_cis, xq_) |
|
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(2) |
|
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(2) |
|
return xq_out.type_as(xq), xk_out.type_as(xk) |