|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import logging |
|
|
|
from torch import Tensor |
|
from torch import nn |
|
import comfy.ops |
|
ops = comfy.ops.manual_cast |
|
from comfy.ldm.modules.attention import optimized_attention |
|
|
|
logger = logging.getLogger("dinov2") |
|
|
|
try: |
|
from xformers.ops import memory_efficient_attention, unbind |
|
|
|
XFORMERS_AVAILABLE = True |
|
except ImportError: |
|
logger.warning("xFormers not available") |
|
XFORMERS_AVAILABLE = False |
|
|
|
|
|
class Attention(nn.Module): |
|
def __init__( |
|
self, |
|
dim: int, |
|
num_heads: int = 8, |
|
qkv_bias: bool = False, |
|
proj_bias: bool = True, |
|
attn_drop: float = 0.0, |
|
proj_drop: float = 0.0, |
|
) -> None: |
|
super().__init__() |
|
self.num_heads = num_heads |
|
self.head_dim = dim // num_heads |
|
self.scale = self.head_dim**-0.5 |
|
|
|
self.qkv = ops.Linear(dim, dim * 3, bias=qkv_bias) |
|
self.attn_drop = nn.Dropout(attn_drop) |
|
self.proj = ops.Linear(dim, dim, bias=proj_bias) |
|
self.proj_drop = nn.Dropout(proj_drop) |
|
|
|
|
|
def forward(self, x: Tensor) -> Tensor: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
B, N, C = x.shape |
|
q, k, v = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) |
|
out = optimized_attention(q, k, v, self.num_heads, skip_reshape=True) |
|
|
|
out= self.proj(out) |
|
out = self.proj_drop(out) |
|
return out |
|
|
|
|
|
class MemEffAttention(Attention): |
|
def forward(self, x: Tensor, attn_bias=None) -> Tensor: |
|
if not XFORMERS_AVAILABLE: |
|
assert attn_bias is None, "xFormers is required for nested tensors usage" |
|
return super().forward(x) |
|
|
|
B, N, C = x.shape |
|
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads) |
|
|
|
q, k, v = unbind(qkv, 2) |
|
|
|
x = memory_efficient_attention(q, k, v, attn_bias=attn_bias) |
|
x = x.reshape([B, N, C]) |
|
|
|
x = self.proj(x) |
|
x = self.proj_drop(x) |
|
return x |
|
|
|
|