LLM-foundry update January 08, 2024 10:35:22
#2
by
niallturbitt
- opened
- attention.py +59 -33
- blocks.py +17 -4
- configuration_mpt.py +27 -12
- ffn.py +72 -14
- modeling_mpt.py +161 -40
attention.py
CHANGED
@@ -4,6 +4,7 @@ import warnings
|
|
4 |
from typing import Any, Optional
|
5 |
import torch
|
6 |
import torch.nn as nn
|
|
|
7 |
from einops import rearrange
|
8 |
from packaging import version
|
9 |
from torch import nn
|
@@ -24,6 +25,12 @@ def is_flash_v1_installed():
|
|
24 |
except:
|
25 |
return False
|
26 |
return version.parse(flash_attn.__version__) < version.parse('2.0.0')
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
if is_flash_v1_installed():
|
28 |
import transformers
|
29 |
transformers.utils.is_flash_attn_available = lambda : False
|
@@ -111,11 +118,11 @@ def check_valid_inputs(*tensors: torch.Tensor, valid_dtypes: Optional[list[torch
|
|
111 |
if not tensor.is_cuda:
|
112 |
raise TypeError(f'Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).')
|
113 |
|
114 |
-
def flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: Optional[int]=None, past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False, multiquery: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor, torch.Tensor]]]:
|
115 |
try:
|
116 |
from flash_attn import bert_padding, flash_attn_interface
|
117 |
except:
|
118 |
-
raise RuntimeError('Please install flash-attn==1.0.9 or flash-attn==2.3.
|
119 |
check_valid_inputs(query, key, value)
|
120 |
if multiquery:
|
121 |
warnings.warn(DeprecationWarning('The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.'))
|
@@ -128,36 +135,46 @@ def flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n
|
|
128 |
key = torch.cat([past_key_value[0], key], dim=1)
|
129 |
value = torch.cat([past_key_value[1], value], dim=1)
|
130 |
past_key_value = (key, value)
|
131 |
-
if attn_bias is not None:
|
132 |
-
_s_q = max(0, attn_bias.size(2) - query.size(1))
|
133 |
-
_s_k = max(0, attn_bias.size(3) - key.size(1))
|
134 |
-
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
|
135 |
if attn_bias is not None:
|
136 |
raise NotImplementedError(f'attn_bias not implemented for flash attn.')
|
137 |
(batch_size, seqlen) = query.shape[:2]
|
138 |
-
if
|
139 |
-
key_padding_mask
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
|
143 |
-
(key_unpad, _, cu_seqlens_k, max_seqlen_k) =
|
144 |
key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=kv_n_heads)
|
145 |
-
(value_unpad, _, _, _) =
|
146 |
value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=kv_n_heads)
|
147 |
-
if kv_n_heads
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
|
|
|
|
|
|
153 |
dropout_p = dropout_p if training else 0.0
|
154 |
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
|
155 |
if is_flash_v1_installed():
|
156 |
output_unpad = flash_attn_interface.flash_attn_unpadded_func(q=query_unpad, k=key_unpad, v=value_unpad, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k, dropout_p=dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights)
|
157 |
elif is_flash_v2_installed():
|
158 |
-
|
|
|
|
|
|
|
|
|
|
|
159 |
else:
|
160 |
-
raise RuntimeError('flash-attn==1.0.9 or flash-attn==2.
|
161 |
output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)
|
162 |
return (output, None, past_key_value)
|
163 |
|
@@ -225,7 +242,7 @@ class GroupedQueryAttention(nn.Module):
|
|
225 |
implementation enables user to also use additive bias.
|
226 |
"""
|
227 |
|
228 |
-
def __init__(self, d_model: int, n_heads: int, kv_n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True):
|
229 |
super().__init__()
|
230 |
self.attn_impl = attn_impl
|
231 |
self.clip_qkv = clip_qkv
|
@@ -233,6 +250,7 @@ class GroupedQueryAttention(nn.Module):
|
|
233 |
self.d_model = d_model
|
234 |
self.n_heads = n_heads
|
235 |
self.kv_n_heads = kv_n_heads
|
|
|
236 |
self.head_dim = d_model // n_heads
|
237 |
if self.kv_n_heads <= 0:
|
238 |
raise ValueError('kv_n_heads should be greater than zero.')
|
@@ -265,7 +283,7 @@ class GroupedQueryAttention(nn.Module):
|
|
265 |
self.out_proj = FC_CLASS_REGISTRY[fc_type](self.d_model, self.d_model, **fc_kwargs)
|
266 |
self.out_proj._is_residual = True
|
267 |
|
268 |
-
def forward(self, x: torch.Tensor, past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, rotary_emb_w_meta_info: Optional[dict]=None, is_causal: bool=True, needs_weights: bool=False) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor, torch.Tensor]]]:
|
269 |
qkv = self.Wqkv(x)
|
270 |
if self.clip_qkv:
|
271 |
qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
|
@@ -290,14 +308,20 @@ class GroupedQueryAttention(nn.Module):
|
|
290 |
value = value.view(bsz, seqlen, self.kv_n_heads * self.head_dim)
|
291 |
elif rotary_emb_w_meta_info['impl'] == 'hf':
|
292 |
(cos, sin) = rotary_emb(value, seq_len)
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
|
|
|
|
|
|
298 |
query = query.view(bsz, seqlen, self.d_model)
|
299 |
key = key.view(bsz, seqlen, self.kv_n_heads * self.head_dim)
|
300 |
-
|
|
|
|
|
|
|
301 |
return (self.out_proj(context), attn_weights, past_key_value)
|
302 |
|
303 |
class MultiheadAttention(GroupedQueryAttention):
|
@@ -307,8 +331,8 @@ class MultiheadAttention(GroupedQueryAttention):
|
|
307 |
additive bias.
|
308 |
"""
|
309 |
|
310 |
-
def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True):
|
311 |
-
super().__init__(d_model=d_model, n_heads=n_heads, kv_n_heads=n_heads, attn_impl=attn_impl, clip_qkv=clip_qkv, qk_ln=qk_ln, softmax_scale=softmax_scale, attn_pdrop=attn_pdrop, norm_type=norm_type, fc_type=fc_type, device=device, bias=bias)
|
312 |
|
313 |
class MultiQueryAttention(GroupedQueryAttention):
|
314 |
"""Multi-Query self attention.
|
@@ -317,8 +341,8 @@ class MultiQueryAttention(GroupedQueryAttention):
|
|
317 |
additive bias.
|
318 |
"""
|
319 |
|
320 |
-
def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True):
|
321 |
-
super().__init__(d_model=d_model, n_heads=n_heads, kv_n_heads=1, attn_impl=attn_impl, clip_qkv=clip_qkv, qk_ln=qk_ln, softmax_scale=softmax_scale, attn_pdrop=attn_pdrop, norm_type=norm_type, fc_type=fc_type, device=device, bias=bias)
|
322 |
|
323 |
def attn_bias_shape(attn_impl: str, n_heads: int, seq_len: int, alibi: bool, prefix_lm: bool, causal: bool, use_sequence_id: bool) -> Optional[tuple[int, int, int, int]]:
|
324 |
if attn_impl == 'flash':
|
@@ -345,13 +369,15 @@ def build_attn_bias(attn_impl: str, attn_bias: torch.Tensor, n_heads: int, seq_l
|
|
345 |
else:
|
346 |
raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
|
347 |
|
348 |
-
def gen_slopes(n_heads: int, alibi_bias_max: int=8, device: Optional[torch.device]=None) -> torch.Tensor:
|
349 |
_n_heads = 2 ** math.ceil(math.log2(n_heads))
|
350 |
m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
|
351 |
m = m.mul(alibi_bias_max / _n_heads)
|
352 |
slopes = 1.0 / torch.pow(2, m)
|
353 |
if _n_heads != n_heads:
|
354 |
slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
|
|
|
|
|
355 |
return slopes.view(1, n_heads, 1, 1)
|
356 |
|
357 |
def build_alibi_bias(n_heads: int, seq_len: int, full: bool=False, alibi_bias_max: int=8, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor:
|
|
|
4 |
from typing import Any, Optional
|
5 |
import torch
|
6 |
import torch.nn as nn
|
7 |
+
import transformers
|
8 |
from einops import rearrange
|
9 |
from packaging import version
|
10 |
from torch import nn
|
|
|
25 |
except:
|
26 |
return False
|
27 |
return version.parse(flash_attn.__version__) < version.parse('2.0.0')
|
28 |
+
|
29 |
+
def is_transformers_version_gte(hf_version: str) -> bool:
|
30 |
+
return version.parse(transformers.__version__) >= version.parse(hf_version)
|
31 |
+
|
32 |
+
def check_alibi_support(attention_impl: str) -> bool:
|
33 |
+
return attention_impl != 'flash' or is_flash_v2_installed(v2_version='v2.4.2')
|
34 |
if is_flash_v1_installed():
|
35 |
import transformers
|
36 |
transformers.utils.is_flash_attn_available = lambda : False
|
|
|
118 |
if not tensor.is_cuda:
|
119 |
raise TypeError(f'Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).')
|
120 |
|
121 |
+
def flash_attn_fn(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, n_heads: int, kv_n_heads: Optional[int]=None, past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]]=None, softmax_scale: Optional[float]=None, attn_bias: Optional[torch.Tensor]=None, key_padding_mask: Optional[torch.Tensor]=None, is_causal: bool=False, dropout_p: float=0.0, training: bool=False, needs_weights: bool=False, multiquery: bool=False, attention_mask_in_length: Optional[torch.Tensor]=None, should_repeat_kv_for_gqa: Optional[bool]=True, sliding_window_size: int=-1, alibi_slopes: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor, torch.Tensor]]]:
|
122 |
try:
|
123 |
from flash_attn import bert_padding, flash_attn_interface
|
124 |
except:
|
125 |
+
raise RuntimeError('Please install flash-attn==1.0.9 or flash-attn==2.3.6')
|
126 |
check_valid_inputs(query, key, value)
|
127 |
if multiquery:
|
128 |
warnings.warn(DeprecationWarning('The direct use of the multiquery arg is deprecated. Setting kv_n_heads=1 automatically. Please set kv_n_heads=1 explicitly to remove this warning.'))
|
|
|
135 |
key = torch.cat([past_key_value[0], key], dim=1)
|
136 |
value = torch.cat([past_key_value[1], value], dim=1)
|
137 |
past_key_value = (key, value)
|
|
|
|
|
|
|
|
|
138 |
if attn_bias is not None:
|
139 |
raise NotImplementedError(f'attn_bias not implemented for flash attn.')
|
140 |
(batch_size, seqlen) = query.shape[:2]
|
141 |
+
if attention_mask_in_length is None:
|
142 |
+
if key_padding_mask is None:
|
143 |
+
key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
|
144 |
+
query_padding_mask = key_padding_mask[:, -query.size(1):]
|
145 |
+
unpadding_function = bert_padding.unpad_input
|
146 |
+
else:
|
147 |
+
key_padding_mask = attention_mask_in_length
|
148 |
+
query_padding_mask = attention_mask_in_length
|
149 |
+
unpadding_function = bert_padding.unpad_input_for_concatenated_sequences
|
150 |
+
(query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = unpadding_function(query, query_padding_mask)
|
151 |
query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
|
152 |
+
(key_unpad, _, cu_seqlens_k, max_seqlen_k) = unpadding_function(key, key_padding_mask)
|
153 |
key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=kv_n_heads)
|
154 |
+
(value_unpad, _, _, _) = unpadding_function(value, key_padding_mask)
|
155 |
value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=kv_n_heads)
|
156 |
+
if kv_n_heads < n_heads and (not is_flash_v2_installed()) and (not should_repeat_kv_for_gqa):
|
157 |
+
raise ValueError('For Grouped Query Attention or Multi Query Attention, should_repeat_kv_for_gqa should be set to True if not using Flash Attention v2.')
|
158 |
+
if should_repeat_kv_for_gqa:
|
159 |
+
if kv_n_heads == 1:
|
160 |
+
key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1))
|
161 |
+
value_unpad = value_unpad.expand(value_unpad.size(0), n_heads, value_unpad.size(-1))
|
162 |
+
elif kv_n_heads < n_heads:
|
163 |
+
key_unpad = repeat_kv_for_gqa(key_unpad.view(1, key_unpad.size(0), kv_n_heads, -1), n_heads // kv_n_heads).view(key_unpad.size(0), n_heads, -1)
|
164 |
+
value_unpad = repeat_kv_for_gqa(value_unpad.view(1, value_unpad.size(0), kv_n_heads, -1), n_heads // kv_n_heads).view(value_unpad.size(0), n_heads, -1)
|
165 |
dropout_p = dropout_p if training else 0.0
|
166 |
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
|
167 |
if is_flash_v1_installed():
|
168 |
output_unpad = flash_attn_interface.flash_attn_unpadded_func(q=query_unpad, k=key_unpad, v=value_unpad, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k, dropout_p=dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights)
|
169 |
elif is_flash_v2_installed():
|
170 |
+
alibi_kwargs = {}
|
171 |
+
if check_alibi_support('flash'):
|
172 |
+
alibi_kwargs = {'alibi_slopes': alibi_slopes}
|
173 |
+
elif alibi_slopes is not None:
|
174 |
+
raise ValueError('alibi_slopes is only supported for flash-attn>=2.4.2')
|
175 |
+
output_unpad = flash_attn_interface.flash_attn_varlen_func(q=query_unpad, k=key_unpad, v=value_unpad, cu_seqlens_q=cu_seqlens_q, cu_seqlens_k=cu_seqlens_k, max_seqlen_q=max_seqlen_q, max_seqlen_k=max_seqlen_k, dropout_p=dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights, window_size=(sliding_window_size, sliding_window_size), **alibi_kwargs)
|
176 |
else:
|
177 |
+
raise RuntimeError('flash-attn==1.0.9 or flash-attn==2.4.2 is required.')
|
178 |
output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)
|
179 |
return (output, None, past_key_value)
|
180 |
|
|
|
242 |
implementation enables user to also use additive bias.
|
243 |
"""
|
244 |
|
245 |
+
def __init__(self, d_model: int, n_heads: int, kv_n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True, sliding_window_size: int=-1):
|
246 |
super().__init__()
|
247 |
self.attn_impl = attn_impl
|
248 |
self.clip_qkv = clip_qkv
|
|
|
250 |
self.d_model = d_model
|
251 |
self.n_heads = n_heads
|
252 |
self.kv_n_heads = kv_n_heads
|
253 |
+
self.sliding_window_size = sliding_window_size
|
254 |
self.head_dim = d_model // n_heads
|
255 |
if self.kv_n_heads <= 0:
|
256 |
raise ValueError('kv_n_heads should be greater than zero.')
|
|
|
283 |
self.out_proj = FC_CLASS_REGISTRY[fc_type](self.d_model, self.d_model, **fc_kwargs)
|
284 |
self.out_proj._is_residual = True
|
285 |
|
286 |
+
def forward(self, x: torch.Tensor, past_key_value: Optional[tuple[torch.Tensor, torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, rotary_emb_w_meta_info: Optional[dict]=None, is_causal: bool=True, needs_weights: bool=False, attention_mask_in_length: Optional[torch.Tensor]=None, alibi_slopes: Optional[torch.Tensor]=None) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor, torch.Tensor]]]:
|
287 |
qkv = self.Wqkv(x)
|
288 |
if self.clip_qkv:
|
289 |
qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
|
|
|
308 |
value = value.view(bsz, seqlen, self.kv_n_heads * self.head_dim)
|
309 |
elif rotary_emb_w_meta_info['impl'] == 'hf':
|
310 |
(cos, sin) = rotary_emb(value, seq_len)
|
311 |
+
if is_transformers_version_gte('4.36'):
|
312 |
+
(query, key) = apply_rotary_pos_emb(query, key, cos, sin, offset_info, unsqueeze_dim=2)
|
313 |
+
else:
|
314 |
+
query = query.transpose(1, 2)
|
315 |
+
key = key.transpose(1, 2)
|
316 |
+
(query, key) = apply_rotary_pos_emb(query, key, cos, sin, offset_info)
|
317 |
+
query = query.transpose(1, 2)
|
318 |
+
key = key.transpose(1, 2)
|
319 |
query = query.view(bsz, seqlen, self.d_model)
|
320 |
key = key.view(bsz, seqlen, self.kv_n_heads * self.head_dim)
|
321 |
+
extra_attn_kwargs = {}
|
322 |
+
if self.attn_impl == 'flash':
|
323 |
+
extra_attn_kwargs = {'attention_mask_in_length': attention_mask_in_length, 'should_repeat_kv_for_gqa': not is_flash_v2_installed(), 'sliding_window_size': self.sliding_window_size, 'alibi_slopes': alibi_slopes}
|
324 |
+
(context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, self.kv_n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, **extra_attn_kwargs)
|
325 |
return (self.out_proj(context), attn_weights, past_key_value)
|
326 |
|
327 |
class MultiheadAttention(GroupedQueryAttention):
|
|
|
331 |
additive bias.
|
332 |
"""
|
333 |
|
334 |
+
def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True, sliding_window_size: int=-1):
|
335 |
+
super().__init__(d_model=d_model, n_heads=n_heads, kv_n_heads=n_heads, attn_impl=attn_impl, clip_qkv=clip_qkv, qk_ln=qk_ln, softmax_scale=softmax_scale, attn_pdrop=attn_pdrop, norm_type=norm_type, fc_type=fc_type, device=device, bias=bias, sliding_window_size=sliding_window_size)
|
336 |
|
337 |
class MultiQueryAttention(GroupedQueryAttention):
|
338 |
"""Multi-Query self attention.
|
|
|
341 |
additive bias.
|
342 |
"""
|
343 |
|
344 |
+
def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, bias: bool=True, sliding_window_size: int=-1):
|
345 |
+
super().__init__(d_model=d_model, n_heads=n_heads, kv_n_heads=1, attn_impl=attn_impl, clip_qkv=clip_qkv, qk_ln=qk_ln, softmax_scale=softmax_scale, attn_pdrop=attn_pdrop, norm_type=norm_type, fc_type=fc_type, device=device, bias=bias, sliding_window_size=sliding_window_size)
|
346 |
|
347 |
def attn_bias_shape(attn_impl: str, n_heads: int, seq_len: int, alibi: bool, prefix_lm: bool, causal: bool, use_sequence_id: bool) -> Optional[tuple[int, int, int, int]]:
|
348 |
if attn_impl == 'flash':
|
|
|
369 |
else:
|
370 |
raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
|
371 |
|
372 |
+
def gen_slopes(n_heads: int, alibi_bias_max: int=8, device: Optional[torch.device]=None, return_1d: bool=False) -> torch.Tensor:
|
373 |
_n_heads = 2 ** math.ceil(math.log2(n_heads))
|
374 |
m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
|
375 |
m = m.mul(alibi_bias_max / _n_heads)
|
376 |
slopes = 1.0 / torch.pow(2, m)
|
377 |
if _n_heads != n_heads:
|
378 |
slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
|
379 |
+
if return_1d:
|
380 |
+
return slopes
|
381 |
return slopes.view(1, n_heads, 1, 1)
|
382 |
|
383 |
def build_alibi_bias(n_heads: int, seq_len: int, full: bool=False, alibi_bias_max: int=8, device: Optional[torch.device]=None, dtype: Optional[torch.dtype]=None) -> torch.Tensor:
|
blocks.py
CHANGED
@@ -5,11 +5,15 @@ import torch.nn as nn
|
|
5 |
from .attention import ATTN_CLASS_REGISTRY
|
6 |
from .ffn import FFN_CLASS_REGISTRY, build_ffn
|
7 |
from .norm import NORM_CLASS_REGISTRY
|
8 |
-
|
|
|
|
|
|
|
|
|
9 |
|
10 |
class MPTBlock(nn.Module):
|
11 |
|
12 |
-
def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Optional[Dict]=None, ffn_config: Optional[Dict]=None, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, no_bias: bool=False, **kwargs: Any):
|
13 |
if attn_config is None:
|
14 |
attn_config = attn_config_defaults
|
15 |
if ffn_config is None:
|
@@ -29,14 +33,23 @@ class MPTBlock(nn.Module):
|
|
29 |
self.ffn = build_ffn(d_model=d_model, expansion_ratio=expansion_ratio, device=device, bias=not no_bias, **ffn_config)
|
30 |
self.resid_attn_dropout = nn.Dropout(resid_pdrop)
|
31 |
self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
|
|
|
32 |
|
33 |
-
def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, rotary_emb_w_meta_info: Optional[Dict]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True, output_attentions: bool=False) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor, torch.Tensor]]]:
|
34 |
a = self.norm_1(x)
|
35 |
-
(b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, rotary_emb_w_meta_info=rotary_emb_w_meta_info, attention_mask=attention_mask, is_causal=is_causal, needs_weights=output_attentions)
|
36 |
x = x + self.resid_attn_dropout(b)
|
37 |
m = x
|
38 |
if self.norm_2 is not None:
|
39 |
m = self.norm_2(x)
|
|
|
|
|
|
|
|
|
|
|
40 |
n = self.ffn(m)
|
|
|
|
|
|
|
41 |
x = x + self.resid_ffn_dropout(n)
|
42 |
return (x, attn_weights, past_key_value)
|
|
|
5 |
from .attention import ATTN_CLASS_REGISTRY
|
6 |
from .ffn import FFN_CLASS_REGISTRY, build_ffn
|
7 |
from .norm import NORM_CLASS_REGISTRY
|
8 |
+
try:
|
9 |
+
from flash_attn.bert_padding import unpad_input, pad_input
|
10 |
+
except:
|
11 |
+
(unpad_input, pad_input) = (None, None)
|
12 |
+
attn_config_defaults: Dict = {'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'sliding_window_size': -1, 'alibi': False, 'alibi_bias_max': 8, 'rope': False, 'rope_theta': 10000, 'rope_impl': 'dail', 'rope_dail_config': {'type': 'original', 'pos_idx_in_fp32': True, 'xpos_scale_base': 512}, 'rope_hf_config': {'type': 'no_scaling', 'factor': 1.0}}
|
13 |
|
14 |
class MPTBlock(nn.Module):
|
15 |
|
16 |
+
def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Optional[Dict]=None, ffn_config: Optional[Dict]=None, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', fc_type: str='torch', device: Optional[str]=None, no_bias: bool=False, use_pad_tok_in_ffn: bool=True, **kwargs: Any):
|
17 |
if attn_config is None:
|
18 |
attn_config = attn_config_defaults
|
19 |
if ffn_config is None:
|
|
|
33 |
self.ffn = build_ffn(d_model=d_model, expansion_ratio=expansion_ratio, device=device, bias=not no_bias, **ffn_config)
|
34 |
self.resid_attn_dropout = nn.Dropout(resid_pdrop)
|
35 |
self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
|
36 |
+
self.use_pad_tok_in_ffn = use_pad_tok_in_ffn
|
37 |
|
38 |
+
def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, rotary_emb_w_meta_info: Optional[Dict]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True, output_attentions: bool=False, attention_mask_in_length: Optional[torch.Tensor]=None, alibi_slopes: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor, torch.Tensor]]]:
|
39 |
a = self.norm_1(x)
|
40 |
+
(b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, rotary_emb_w_meta_info=rotary_emb_w_meta_info, attention_mask=attention_mask, is_causal=is_causal, needs_weights=output_attentions, attention_mask_in_length=attention_mask_in_length, alibi_slopes=alibi_slopes)
|
41 |
x = x + self.resid_attn_dropout(b)
|
42 |
m = x
|
43 |
if self.norm_2 is not None:
|
44 |
m = self.norm_2(x)
|
45 |
+
(batch_size, seq_len) = m.size()[:2]
|
46 |
+
indices = None
|
47 |
+
if not self.use_pad_tok_in_ffn:
|
48 |
+
assert unpad_input is not None
|
49 |
+
(m, indices, _, _) = unpad_input(m, attention_mask)
|
50 |
n = self.ffn(m)
|
51 |
+
if not self.use_pad_tok_in_ffn:
|
52 |
+
assert pad_input is not None
|
53 |
+
n = pad_input(n, indices, batch_size, seq_len)
|
54 |
x = x + self.resid_ffn_dropout(n)
|
55 |
return (x, attn_weights, past_key_value)
|
configuration_mpt.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2 |
import warnings
|
3 |
from typing import Any, Dict, Optional, Union
|
4 |
from transformers import PretrainedConfig
|
5 |
-
from .attention import is_flash_v2_installed
|
6 |
from .blocks import attn_config_defaults
|
7 |
from .fc import FC_CLASS_REGISTRY
|
8 |
from .norm import LPLayerNorm
|
@@ -13,14 +13,14 @@ init_config_defaults: Dict = {'name': 'kaiming_normal_', 'fan_mode': 'fan_in', '
|
|
13 |
class MPTConfig(PretrainedConfig):
|
14 |
model_type = 'mpt'
|
15 |
|
16 |
-
def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: int=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, ffn_config: Dict=ffn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, fc_type: str='torch', verbose: Optional[int]=None, **kwargs: Any):
|
17 |
"""The MPT configuration class.
|
18 |
|
19 |
Args:
|
20 |
d_model (int): The size of the embedding dimension of the model.
|
21 |
n_heads (int): The number of attention heads.
|
22 |
n_layers (int): The number of layers in the model.
|
23 |
-
expansion_ratio (int): The ratio of the up/down scale in the ffn.
|
24 |
max_seq_len (int): The maximum sequence length of the model.
|
25 |
vocab_size (int): The size of the vocabulary.
|
26 |
resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.
|
@@ -42,6 +42,7 @@ class MPTConfig(PretrainedConfig):
|
|
42 |
When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates
|
43 |
which sub-sequence each token belongs to.
|
44 |
Defaults to ``False`` meaning any provided `sequence_id` will be ignored.
|
|
|
45 |
alibi (bool): Whether to use the alibi bias instead of position embeddings.
|
46 |
alibi_bias_max (int): The maximum value of the alibi bias.
|
47 |
rope (bool): Whether to use rotary positional embeddings.
|
@@ -56,11 +57,11 @@ class MPTConfig(PretrainedConfig):
|
|
56 |
factor (float): Scaling factor to use if using 'linear' or 'dynamic' as rope_scaling.type.
|
57 |
kv_n_heads (Optional[int]): For grouped_query_attention only, allow user to specify number of kv heads.
|
58 |
ffn_config (Dict): A dictionary used to configure the model's ffn module:
|
59 |
-
ffn_type (str): type of ffn to use. Options: mptmlp, te_ln_mlp
|
60 |
init_device (str): The device to use for parameter initialization.
|
61 |
logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.
|
62 |
no_bias (bool): Whether to use bias in all layers.
|
63 |
-
verbose (int):
|
64 |
embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.
|
65 |
norm_type (str): choose type of norm to use
|
66 |
use_cache (bool): Whether or not the model should return the last key/values attentions
|
@@ -80,6 +81,8 @@ class MPTConfig(PretrainedConfig):
|
|
80 |
---
|
81 |
See llmfoundry.models.utils.param_init_fns.py for info on other param init config options
|
82 |
fc_type (str): choose fc layer implementation. Options: torch and te. te layers support fp8 when using H100 GPUs.
|
|
|
|
|
83 |
"""
|
84 |
self.d_model = d_model
|
85 |
self.n_heads = n_heads
|
@@ -100,6 +103,7 @@ class MPTConfig(PretrainedConfig):
|
|
100 |
self.use_cache = use_cache
|
101 |
self.init_config = init_config
|
102 |
self.fc_type = fc_type
|
|
|
103 |
if verbose is not None:
|
104 |
warnings.warn(DeprecationWarning('verbose argument for MPTConfig is now ignored and will be removed. Use python_log_level instead.'))
|
105 |
if 'name' in kwargs:
|
@@ -109,7 +113,7 @@ class MPTConfig(PretrainedConfig):
|
|
109 |
if self.attn_config.get('alibi', False) or self.attn_config.get('rope', False):
|
110 |
self.learned_pos_emb = False
|
111 |
warnings.warn(f'alibi or rope is turned on, setting `learned_pos_emb` to `False.`')
|
112 |
-
super().__init__(**kwargs)
|
113 |
self._validate_config()
|
114 |
|
115 |
def _set_config_defaults(self, config: Dict[str, Any], config_defaults: Dict[str, Any]) -> Dict[str, Any]:
|
@@ -132,10 +136,10 @@ class MPTConfig(PretrainedConfig):
|
|
132 |
raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}")
|
133 |
if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
|
134 |
raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')
|
135 |
-
if self.attn_config['alibi'] and self.attn_config['attn_impl']
|
136 |
-
raise NotImplementedError('alibi only implemented with torch and
|
137 |
-
if self.attn_config['attn_uses_sequence_id'] and self.attn_config['attn_impl']
|
138 |
-
raise NotImplementedError('attn_uses_sequence_id only implemented with torch and
|
139 |
if self.attn_config['rope'] and self.attn_config['rope_impl'] not in ['dail', 'hf']:
|
140 |
raise ValueError('If rope is being used then rope_impl should be either "dail", or "hf".')
|
141 |
if self.attn_config['rope'] and self.attn_config['rope_impl'] == 'hf' and (self.attn_config['rope_hf_config']['type'] not in ['no_scaling', 'linear', 'dynamic']):
|
@@ -145,6 +149,8 @@ class MPTConfig(PretrainedConfig):
|
|
145 |
raise ValueError('If using the dail implementation of rope, the type should be one of "original" or "xpos".')
|
146 |
if not is_flash_v2_installed(v2_version='2.0.1'):
|
147 |
raise ImportError('If using the dail implementation of rope, the flash_attn library v2.0.1 or higher must be installed. Please check the instructions at https://github.com/mosaicml/llm-foundry/blob/main/TUTORIAL.md#what-kinds-of-positional-embeddings-does-llm-foundry-support')
|
|
|
|
|
148 |
if self.embedding_fraction > 1 or self.embedding_fraction <= 0:
|
149 |
raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')
|
150 |
if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':
|
@@ -159,7 +165,16 @@ class MPTConfig(PretrainedConfig):
|
|
159 |
del te
|
160 |
except:
|
161 |
raise ImportError('TransformerEngine import fail. `fc_type: te` requires TransformerEngine be installed. ' + 'The required version of transformer_engine also requires FlashAttention v1.0.6 is installed:\n' + 'pip install flash-attn==1.0.6 --no-build-isolation \n' + 'pip install git+https://github.com/NVIDIA/TransformerEngine.git@144e4888b2cdd60bd52e706d5b7a79cb9c1a7156')
|
162 |
-
if self.ffn_config['ffn_type'] == '
|
|
|
|
|
163 |
self.ffn_config['fc_type'] = self.fc_type
|
164 |
elif self.ffn_config['ffn_type'] == 'te_ln_mlp':
|
165 |
-
self.ffn_config['bias'] = not self.no_bias
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import warnings
|
3 |
from typing import Any, Dict, Optional, Union
|
4 |
from transformers import PretrainedConfig
|
5 |
+
from .attention import check_alibi_support, is_flash_v2_installed
|
6 |
from .blocks import attn_config_defaults
|
7 |
from .fc import FC_CLASS_REGISTRY
|
8 |
from .norm import LPLayerNorm
|
|
|
13 |
class MPTConfig(PretrainedConfig):
|
14 |
model_type = 'mpt'
|
15 |
|
16 |
+
def __init__(self, d_model: int=2048, n_heads: int=16, n_layers: int=24, expansion_ratio: Union[int, float]=4, max_seq_len: int=2048, vocab_size: int=50368, resid_pdrop: float=0.0, emb_pdrop: float=0.0, learned_pos_emb: bool=True, attn_config: Dict=attn_config_defaults, ffn_config: Dict=ffn_config_defaults, init_device: str='cpu', logit_scale: Optional[Union[float, str]]=None, no_bias: bool=False, embedding_fraction: float=1.0, norm_type: str='low_precision_layernorm', use_cache: bool=False, init_config: Dict=init_config_defaults, fc_type: str='torch', tie_word_embeddings: bool=True, use_pad_tok_in_ffn: bool=True, verbose: Optional[int]=None, **kwargs: Any):
|
17 |
"""The MPT configuration class.
|
18 |
|
19 |
Args:
|
20 |
d_model (int): The size of the embedding dimension of the model.
|
21 |
n_heads (int): The number of attention heads.
|
22 |
n_layers (int): The number of layers in the model.
|
23 |
+
expansion_ratio (Union[int, float]): The ratio of the up/down scale in the ffn.
|
24 |
max_seq_len (int): The maximum sequence length of the model.
|
25 |
vocab_size (int): The size of the vocabulary.
|
26 |
resid_pdrop (float): The dropout probability applied to the attention output before combining with residual.
|
|
|
42 |
When the model is in `train` mode, this requires passing an extra `sequence_id` argument which indicates
|
43 |
which sub-sequence each token belongs to.
|
44 |
Defaults to ``False`` meaning any provided `sequence_id` will be ignored.
|
45 |
+
sliding_window_size (int): Window size for sliding window local attention. Defaults to -1, which means no sliding window. Query at position i will only attend to keys between [i + seqlen_k - seqlen_q - window_size, i + seqlen_k - seqlen_q + window_size] inclusive. Only works for flash attention v2.3.0 or higher.
|
46 |
alibi (bool): Whether to use the alibi bias instead of position embeddings.
|
47 |
alibi_bias_max (int): The maximum value of the alibi bias.
|
48 |
rope (bool): Whether to use rotary positional embeddings.
|
|
|
57 |
factor (float): Scaling factor to use if using 'linear' or 'dynamic' as rope_scaling.type.
|
58 |
kv_n_heads (Optional[int]): For grouped_query_attention only, allow user to specify number of kv heads.
|
59 |
ffn_config (Dict): A dictionary used to configure the model's ffn module:
|
60 |
+
ffn_type (str): type of ffn to use. Options: mptmlp, mptglu, te_ln_mlp
|
61 |
init_device (str): The device to use for parameter initialization.
|
62 |
logit_scale (Optional[Union[float, str]]): If not None, scale the logits by this value.
|
63 |
no_bias (bool): Whether to use bias in all layers.
|
64 |
+
verbose (int): Deprecated.
|
65 |
embedding_fraction (float): The fraction to scale the gradients of the embedding layer by.
|
66 |
norm_type (str): choose type of norm to use
|
67 |
use_cache (bool): Whether or not the model should return the last key/values attentions
|
|
|
81 |
---
|
82 |
See llmfoundry.models.utils.param_init_fns.py for info on other param init config options
|
83 |
fc_type (str): choose fc layer implementation. Options: torch and te. te layers support fp8 when using H100 GPUs.
|
84 |
+
tie_word_embeddings (bool): Whether to tie the input embedding and output layers.
|
85 |
+
use_pad_tok_in_ffn (bool): Whether to forward the pad token in the feedforward networks.
|
86 |
"""
|
87 |
self.d_model = d_model
|
88 |
self.n_heads = n_heads
|
|
|
103 |
self.use_cache = use_cache
|
104 |
self.init_config = init_config
|
105 |
self.fc_type = fc_type
|
106 |
+
self.use_pad_tok_in_ffn = use_pad_tok_in_ffn
|
107 |
if verbose is not None:
|
108 |
warnings.warn(DeprecationWarning('verbose argument for MPTConfig is now ignored and will be removed. Use python_log_level instead.'))
|
109 |
if 'name' in kwargs:
|
|
|
113 |
if self.attn_config.get('alibi', False) or self.attn_config.get('rope', False):
|
114 |
self.learned_pos_emb = False
|
115 |
warnings.warn(f'alibi or rope is turned on, setting `learned_pos_emb` to `False.`')
|
116 |
+
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
|
117 |
self._validate_config()
|
118 |
|
119 |
def _set_config_defaults(self, config: Dict[str, Any], config_defaults: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
136 |
raise ValueError(f"Unknown attn_impl={self.attn_config['attn_impl']}")
|
137 |
if self.attn_config['prefix_lm'] and self.attn_config['attn_impl'] not in ['torch', 'triton']:
|
138 |
raise NotImplementedError('prefix_lm only implemented with torch and triton attention.')
|
139 |
+
if self.attn_config['alibi'] and (not check_alibi_support(self.attn_config['attn_impl'])):
|
140 |
+
raise NotImplementedError('alibi only implemented with torch, triton, and flash (v2.4.2 or higher) attention.')
|
141 |
+
if self.attn_config['attn_uses_sequence_id'] and (not (self.attn_config['attn_impl'] in ['torch', 'triton'] or (self.attn_config['attn_impl'] == 'flash' and is_flash_v2_installed(v2_version='v2.1.2')))):
|
142 |
+
raise NotImplementedError('attn_uses_sequence_id only implemented with torch, triton, and flash (v2.1.2 or higher) attention.')
|
143 |
if self.attn_config['rope'] and self.attn_config['rope_impl'] not in ['dail', 'hf']:
|
144 |
raise ValueError('If rope is being used then rope_impl should be either "dail", or "hf".')
|
145 |
if self.attn_config['rope'] and self.attn_config['rope_impl'] == 'hf' and (self.attn_config['rope_hf_config']['type'] not in ['no_scaling', 'linear', 'dynamic']):
|
|
|
149 |
raise ValueError('If using the dail implementation of rope, the type should be one of "original" or "xpos".')
|
150 |
if not is_flash_v2_installed(v2_version='2.0.1'):
|
151 |
raise ImportError('If using the dail implementation of rope, the flash_attn library v2.0.1 or higher must be installed. Please check the instructions at https://github.com/mosaicml/llm-foundry/blob/main/TUTORIAL.md#what-kinds-of-positional-embeddings-does-llm-foundry-support')
|
152 |
+
if self.attn_config['sliding_window_size'] != -1 and (not (self.attn_config['attn_impl'] == 'flash' and is_flash_v2_installed(v2_version='v2.3.0'))):
|
153 |
+
raise NotImplementedError('sliding window only implemented with flash attention v2.3.0 or higher.')
|
154 |
if self.embedding_fraction > 1 or self.embedding_fraction <= 0:
|
155 |
raise ValueError('model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!')
|
156 |
if isinstance(self.logit_scale, str) and self.logit_scale != 'inv_sqrt_d_model':
|
|
|
165 |
del te
|
166 |
except:
|
167 |
raise ImportError('TransformerEngine import fail. `fc_type: te` requires TransformerEngine be installed. ' + 'The required version of transformer_engine also requires FlashAttention v1.0.6 is installed:\n' + 'pip install flash-attn==1.0.6 --no-build-isolation \n' + 'pip install git+https://github.com/NVIDIA/TransformerEngine.git@144e4888b2cdd60bd52e706d5b7a79cb9c1a7156')
|
168 |
+
if self.ffn_config['ffn_type'] == 'mptgeglu':
|
169 |
+
raise ValueError('API CHANGE: `ffn_type=="mptgeglu"` changed to `ffn_type=="mptglu"`. ' + 'See [#829](https://github.com/mosaicml/llm-foundry/pull/829) for details.')
|
170 |
+
elif self.ffn_config['ffn_type'] in ['mptmlp', 'mptglu']:
|
171 |
self.ffn_config['fc_type'] = self.fc_type
|
172 |
elif self.ffn_config['ffn_type'] == 'te_ln_mlp':
|
173 |
+
self.ffn_config['bias'] = not self.no_bias
|
174 |
+
if 'ffn_act_fn' in self.ffn_config.keys():
|
175 |
+
raise ValueError(f'Transformer Engine block does not support custom activation functions.')
|
176 |
+
if not self.use_pad_tok_in_ffn:
|
177 |
+
try:
|
178 |
+
from flash_attn.bert_padding import unpad_input, pad_input
|
179 |
+
except:
|
180 |
+
raise ImportError('In order to set `use_pad_tok_in_ffn=False`, please install flash-attn==1.0.9 or flash-attn==2.3.6')
|
ffn.py
CHANGED
@@ -1,5 +1,8 @@
|
|
1 |
-
"""
|
2 |
-
|
|
|
|
|
|
|
3 |
import torch
|
4 |
import torch.nn as nn
|
5 |
from .fc import FC_CLASS_REGISTRY
|
@@ -7,33 +10,88 @@ try:
|
|
7 |
import transformer_engine.pytorch as te
|
8 |
except:
|
9 |
te = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
class MPTMLP(nn.Module):
|
12 |
|
13 |
-
def __init__(self, d_model: int, expansion_ratio: int, fc_type: str='torch', device: Optional[str]=None, bias: bool=True):
|
14 |
super().__init__()
|
15 |
-
|
|
|
16 |
if fc_type != 'te':
|
17 |
-
fc_kwargs['device'] = device
|
18 |
-
self.up_proj = FC_CLASS_REGISTRY[fc_type](d_model,
|
19 |
-
self.act =
|
20 |
-
self.down_proj = FC_CLASS_REGISTRY[fc_type](
|
21 |
self.down_proj._is_residual = True
|
22 |
|
23 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
24 |
return self.down_proj(self.act(self.up_proj(x)))
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
if te is not None:
|
27 |
te.LayerNormMLP._has_norm = True
|
28 |
FFN_CLASS_REGISTRY['te_ln_mlp'] = te.LayerNormMLP
|
29 |
|
30 |
-
def build_ffn(d_model: int, expansion_ratio: int, fc_type: str='torch', device: Optional[str]=None, bias: bool=True, **kwargs: Any) -> nn.Module:
|
31 |
ffn_type = kwargs.pop('ffn_type')
|
32 |
-
if ffn_type
|
33 |
if len(kwargs) > 0:
|
34 |
-
raise ValueError(f'MPTMLP got an unexpected keyword argument: {kwargs}')
|
35 |
-
return
|
36 |
elif ffn_type == 'te_ln_mlp':
|
37 |
assert te is not None
|
38 |
-
|
|
|
|
|
|
|
39 |
raise ValueError(f'ffn_type={ffn_type!r} not recognized.')
|
|
|
1 |
+
"""MPT Blocks used for the MPT Model."""
|
2 |
+
import logging
|
3 |
+
from copy import deepcopy
|
4 |
+
from functools import partial
|
5 |
+
from typing import Any, Callable, Optional, Union
|
6 |
import torch
|
7 |
import torch.nn as nn
|
8 |
from .fc import FC_CLASS_REGISTRY
|
|
|
10 |
import transformer_engine.pytorch as te
|
11 |
except:
|
12 |
te = None
|
13 |
+
log = logging.getLogger(__name__)
|
14 |
+
_FFN_ACT_FN_DEFAULT = {'name': 'gelu', 'approximate': 'none'}
|
15 |
+
|
16 |
+
def resolve_ffn_act_fn(config: Optional[dict]=None) -> Callable[[torch.Tensor], torch.Tensor]:
|
17 |
+
"""Resolve the activation function for the feed-forward network.
|
18 |
+
|
19 |
+
Args:
|
20 |
+
config (Optional[dict]): The configuration dictionary for the activation function.
|
21 |
+
The dict config must specify the 'name' of a torch.nn.functional activation
|
22 |
+
function. All of other key values pairs are bound to the function as a partial.
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
Callable[[torch.Tensor], torch.Tensor]: The activation function.
|
26 |
+
"""
|
27 |
+
if config is None:
|
28 |
+
config = _FFN_ACT_FN_DEFAULT
|
29 |
+
config = deepcopy(config)
|
30 |
+
name = config.pop('name')
|
31 |
+
if not hasattr(torch.nn.functional, name):
|
32 |
+
raise ValueError(f'Unrecognised activation function name ({name}).')
|
33 |
+
act = getattr(torch.nn.functional, name)
|
34 |
+
return partial(act, **config)
|
35 |
+
_DEFAULT_ACT_FN = resolve_ffn_act_fn(_FFN_ACT_FN_DEFAULT)
|
36 |
+
|
37 |
+
def resolve_ffn_hidden_size(d_model: int, expansion_ratio: Union[int, float], ffn_hidden_size: Optional[int]=None) -> int:
|
38 |
+
"""Resolve the hidden size of the feed-forward network.
|
39 |
+
|
40 |
+
Args:
|
41 |
+
d_model (int): The dimension of the input and output of the feed-forward network.
|
42 |
+
expansion_ratio (Union[int, float]): The expansion ratio of the feed-forward network.
|
43 |
+
ffn_hidden_size (Optional[int]): The hidden size of the feed-forward network.
|
44 |
+
|
45 |
+
Returns:
|
46 |
+
int: The hidden size of the feed-forward network.
|
47 |
+
"""
|
48 |
+
if ffn_hidden_size is not None:
|
49 |
+
log.info(f'`expansion_ratio` (={expansion_ratio}) ignored when `ffn_hidden_size` (={ffn_hidden_size}) is specified.')
|
50 |
+
else:
|
51 |
+
ffn_hidden_size = int(d_model * expansion_ratio)
|
52 |
+
if ffn_hidden_size != d_model * expansion_ratio:
|
53 |
+
raise ValueError(f'`d_model * expansion_ratio` must be an integer (d_model={d_model!r}; expansion_ratio={expansion_ratio!r}; d_model * expansion_ratio={d_model * expansion_ratio!r}).')
|
54 |
+
return ffn_hidden_size
|
55 |
|
56 |
class MPTMLP(nn.Module):
|
57 |
|
58 |
+
def __init__(self, d_model: int, expansion_ratio: Union[int, float], fc_type: str='torch', ffn_hidden_size: Optional[int]=None, act_fn: Callable[[torch.Tensor], torch.Tensor]=_DEFAULT_ACT_FN, device: Optional[str]=None, bias: bool=True):
|
59 |
super().__init__()
|
60 |
+
ffn_hidden_size = resolve_ffn_hidden_size(d_model, expansion_ratio, ffn_hidden_size)
|
61 |
+
self.fc_kwargs: dict[str, Any] = {'bias': bias}
|
62 |
if fc_type != 'te':
|
63 |
+
self.fc_kwargs['device'] = device
|
64 |
+
self.up_proj = FC_CLASS_REGISTRY[fc_type](d_model, ffn_hidden_size, **self.fc_kwargs)
|
65 |
+
self.act = act_fn
|
66 |
+
self.down_proj = FC_CLASS_REGISTRY[fc_type](ffn_hidden_size, d_model, **self.fc_kwargs)
|
67 |
self.down_proj._is_residual = True
|
68 |
|
69 |
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
70 |
return self.down_proj(self.act(self.up_proj(x)))
|
71 |
+
|
72 |
+
class MPTGLU(MPTMLP):
|
73 |
+
|
74 |
+
def __init__(self, d_model: int, expansion_ratio: Union[int, float], fc_type: str='torch', ffn_hidden_size: Optional[int]=None, act_fn: Callable[[torch.Tensor], torch.Tensor]=_DEFAULT_ACT_FN, device: Optional[str]=None, bias: bool=True):
|
75 |
+
super().__init__(d_model=d_model, expansion_ratio=expansion_ratio, fc_type=fc_type, ffn_hidden_size=ffn_hidden_size, act_fn=act_fn, device=device, bias=bias)
|
76 |
+
self.gate_proj = FC_CLASS_REGISTRY[fc_type](d_model, self.up_proj.out_features, **self.fc_kwargs)
|
77 |
+
|
78 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
79 |
+
return self.down_proj(self.act(self.gate_proj(x)) * self.up_proj(x))
|
80 |
+
FFN_CLASS_REGISTRY = {'mptmlp': MPTMLP, 'mptglu': MPTGLU}
|
81 |
if te is not None:
|
82 |
te.LayerNormMLP._has_norm = True
|
83 |
FFN_CLASS_REGISTRY['te_ln_mlp'] = te.LayerNormMLP
|
84 |
|
85 |
+
def build_ffn(d_model: int, expansion_ratio: Union[int, float], fc_type: str='torch', ffn_hidden_size: Optional[int]=None, ffn_act_fn: Optional[dict]=None, device: Optional[str]=None, bias: bool=True, **kwargs: Any) -> nn.Module:
|
86 |
ffn_type = kwargs.pop('ffn_type')
|
87 |
+
if ffn_type in ['mptmlp', 'mptglu']:
|
88 |
if len(kwargs) > 0:
|
89 |
+
raise ValueError(f'MPTMLP (or MPTGLU) got an unexpected keyword argument: {kwargs}')
|
90 |
+
return FFN_CLASS_REGISTRY[ffn_type](d_model=d_model, expansion_ratio=expansion_ratio, fc_type=fc_type, act_fn=resolve_ffn_act_fn(ffn_act_fn), ffn_hidden_size=ffn_hidden_size, device=device, bias=bias)
|
91 |
elif ffn_type == 'te_ln_mlp':
|
92 |
assert te is not None
|
93 |
+
ffn_hidden_size = resolve_ffn_hidden_size(d_model, expansion_ratio, ffn_hidden_size)
|
94 |
+
if ffn_act_fn is not None:
|
95 |
+
raise ValueError(f'Transformer Engine block does not support custom activation functions.')
|
96 |
+
return te.LayerNormMLP(hidden_size=d_model, ffn_hidden_size=ffn_hidden_size, bias=bias, **kwargs)
|
97 |
raise ValueError(f'ffn_type={ffn_type!r} not recognized.')
|
modeling_mpt.py
CHANGED
@@ -19,7 +19,7 @@ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutpu
|
|
19 |
from transformers.models.llama.modeling_llama import LlamaDynamicNTKScalingRotaryEmbedding as HFDynamicNTKScalingRotaryEmbedding
|
20 |
from transformers.models.llama.modeling_llama import LlamaLinearScalingRotaryEmbedding as HFLinearScalingRotaryEmbedding
|
21 |
from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding as HFRotaryEmbedding
|
22 |
-
from .attention import attn_bias_shape, build_attn_bias
|
23 |
from .blocks import MPTBlock
|
24 |
from .custom_embedding import SharedEmbedding
|
25 |
from .fc import FC_CLASS_REGISTRY as FC_CLASS_REGISTRY
|
@@ -51,6 +51,84 @@ def gen_rotary_embedding(rope_head_dim: int, rope_impl: str, rope_theta: int, ro
|
|
51 |
return HFDynamicNTKScalingRotaryEmbedding(rope_head_dim, max_position_embeddings=max_seq_len, base=rope_theta, scaling_factor=rope_hf_config['factor'], device='cpu')
|
52 |
raise ValueError('rope_impl needs to be either dail or hf')
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
class MPTPreTrainedModel(PreTrainedModel):
|
55 |
config_class = MPTConfig
|
56 |
base_model_prefix = 'model'
|
@@ -106,10 +184,10 @@ class MPTModel(MPTPreTrainedModel):
|
|
106 |
log.debug(self)
|
107 |
log.debug(f"Using {self.config.init_config['name']} initialization.")
|
108 |
|
109 |
-
def get_input_embeddings(self) -> nn.Embedding:
|
110 |
return self.wte
|
111 |
|
112 |
-
def set_input_embeddings(self, value: nn.Embedding) -> None:
|
113 |
self.wte = value
|
114 |
|
115 |
@torch.no_grad()
|
@@ -130,7 +208,7 @@ class MPTModel(MPTPreTrainedModel):
|
|
130 |
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
|
131 |
if self.attn_uses_sequence_id and sequence_id is not None:
|
132 |
assert isinstance(attn_bias, torch.Tensor)
|
133 |
-
attn_bias =
|
134 |
if attention_mask is not None:
|
135 |
s_k = attention_mask.shape[-1]
|
136 |
if attn_bias is None:
|
@@ -142,7 +220,7 @@ class MPTModel(MPTPreTrainedModel):
|
|
142 |
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
|
143 |
min_val = torch.finfo(attn_bias.dtype).min
|
144 |
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
|
145 |
-
return (attn_bias,
|
146 |
|
147 |
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor) -> torch.Tensor:
|
148 |
(s_k, s_q) = attn_bias.shape[-2:]
|
@@ -159,17 +237,7 @@ class MPTModel(MPTPreTrainedModel):
|
|
159 |
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
|
160 |
return attn_bias
|
161 |
|
162 |
-
def
|
163 |
-
seq_len = sequence_id.shape[-1]
|
164 |
-
if seq_len > self.config.max_seq_len:
|
165 |
-
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
|
166 |
-
attn_bias = attn_bias[..., :seq_len, :seq_len]
|
167 |
-
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
|
168 |
-
min_val = torch.finfo(attn_bias.dtype).min
|
169 |
-
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
|
170 |
-
return attn_bias
|
171 |
-
|
172 |
-
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None) -> BaseModelOutputWithPast:
|
173 |
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
174 |
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
175 |
if attention_mask is not None:
|
@@ -185,17 +253,25 @@ class MPTModel(MPTPreTrainedModel):
|
|
185 |
raise NotImplementedError('MPT does not support training with left padding.')
|
186 |
if self.prefix_lm and prefix_mask is None:
|
187 |
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
|
188 |
-
if inputs_embeds is not None:
|
189 |
-
raise NotImplementedError('inputs_embeds is not implemented for MPT.')
|
190 |
if self.training:
|
191 |
if self.attn_uses_sequence_id and sequence_id is None:
|
192 |
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
|
193 |
elif self.attn_uses_sequence_id is False and sequence_id is not None:
|
194 |
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
|
195 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
|
197 |
rotary_emb_w_meta_info = None
|
198 |
-
x = self.wte(input_ids)
|
199 |
if self.learned_pos_emb or self.rope:
|
200 |
past_position = 0
|
201 |
if past_key_values is not None:
|
@@ -207,7 +283,7 @@ class MPTModel(MPTPreTrainedModel):
|
|
207 |
if self.learned_pos_emb and S + past_position > self.config.max_seq_len:
|
208 |
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length ' + f'{S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
|
209 |
if self.learned_pos_emb or (self.rope and self.rope_impl == 'hf'):
|
210 |
-
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=
|
211 |
if attention_mask is not None:
|
212 |
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
|
213 |
if self.learned_pos_emb:
|
@@ -223,6 +299,10 @@ class MPTModel(MPTPreTrainedModel):
|
|
223 |
assert isinstance(self.emb_drop, nn.Module)
|
224 |
x = self.emb_drop(x_shrunk)
|
225 |
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
|
|
|
|
|
|
|
|
|
226 |
presents = () if use_cache else None
|
227 |
if use_cache and past_key_values is None:
|
228 |
past_key_values = [() for _ in range(self.config.n_layers)]
|
@@ -233,7 +313,7 @@ class MPTModel(MPTPreTrainedModel):
|
|
233 |
assert all_hidden_states is not None
|
234 |
all_hidden_states = all_hidden_states + (x,)
|
235 |
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
|
236 |
-
(x, attn_weights, present) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, rotary_emb_w_meta_info=rotary_emb_w_meta_info, attention_mask=attention_mask, is_causal=self.is_causal, output_attentions=bool(output_attentions))
|
237 |
if presents is not None:
|
238 |
presents += (present,)
|
239 |
if output_attentions:
|
@@ -259,10 +339,12 @@ class MPTForCausalLM(MPTPreTrainedModel):
|
|
259 |
|
260 |
def __init__(self, config: MPTConfig):
|
261 |
super().__init__(config)
|
262 |
-
if not config.tie_word_embeddings:
|
263 |
-
raise ValueError('MPTForCausalLM only supports tied word embeddings')
|
264 |
log.info(f'Instantiating an MPTForCausalLM model from {__file__}')
|
265 |
self.transformer: MPTModel = MPTModel(config)
|
|
|
|
|
|
|
|
|
266 |
for child in self.transformer.children():
|
267 |
if isinstance(child, torch.nn.ModuleList):
|
268 |
continue
|
@@ -278,17 +360,28 @@ class MPTForCausalLM(MPTPreTrainedModel):
|
|
278 |
raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
|
279 |
self.logit_scale = logit_scale
|
280 |
|
281 |
-
def get_input_embeddings(self) -> nn.Embedding:
|
282 |
-
return self.transformer.
|
283 |
|
284 |
def set_input_embeddings(self, value: Union[SharedEmbedding, nn.Embedding]) -> None:
|
285 |
-
self.transformer.
|
286 |
|
287 |
-
def get_output_embeddings(self) -> nn.Embedding:
|
288 |
-
|
|
|
|
|
289 |
|
290 |
-
def set_output_embeddings(self, new_embeddings: Union[SharedEmbedding, nn.Embedding]) -> None:
|
291 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
292 |
|
293 |
def set_decoder(self, decoder: MPTModel) -> None:
|
294 |
self.transformer = decoder
|
@@ -296,13 +389,16 @@ class MPTForCausalLM(MPTPreTrainedModel):
|
|
296 |
def get_decoder(self) -> MPTModel:
|
297 |
return self.transformer
|
298 |
|
299 |
-
def forward(self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> CausalLMOutputWithPast:
|
300 |
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
301 |
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
|
|
|
|
|
|
306 |
if self.logit_scale is not None:
|
307 |
if self.logit_scale == 0:
|
308 |
warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
|
@@ -322,11 +418,31 @@ class MPTForCausalLM(MPTPreTrainedModel):
|
|
322 |
return isinstance(module, MPTBlock)
|
323 |
|
324 |
def activation_checkpointing_fn(self, module: nn.Module) -> bool:
|
325 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
326 |
|
327 |
def prepare_inputs_for_generation(self, input_ids: torch.Tensor, past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]]=None, inputs_embeds: Optional[torch.Tensor]=None, **kwargs: Any) -> Dict[str, Any]:
|
328 |
-
if inputs_embeds is not None:
|
329 |
-
raise NotImplementedError('inputs_embeds is not implemented for MPT yet')
|
330 |
attention_mask = kwargs['attention_mask'].bool()
|
331 |
if attention_mask[:, -1].sum() != attention_mask.shape[0]:
|
332 |
raise NotImplementedError('MPT does not support generation with right padding.')
|
@@ -342,7 +458,12 @@ class MPTForCausalLM(MPTPreTrainedModel):
|
|
342 |
raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.')
|
343 |
else:
|
344 |
prefix_mask = None
|
345 |
-
|
|
|
|
|
|
|
|
|
|
|
346 |
|
347 |
@staticmethod
|
348 |
def _reorder_cache(past_key_values: List[Tuple[torch.Tensor, torch.Tensor]], beam_idx: torch.LongTensor) -> List[Tuple[torch.Tensor, ...]]:
|
|
|
19 |
from transformers.models.llama.modeling_llama import LlamaDynamicNTKScalingRotaryEmbedding as HFDynamicNTKScalingRotaryEmbedding
|
20 |
from transformers.models.llama.modeling_llama import LlamaLinearScalingRotaryEmbedding as HFLinearScalingRotaryEmbedding
|
21 |
from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding as HFRotaryEmbedding
|
22 |
+
from .attention import ATTN_CLASS_REGISTRY, attn_bias_shape, build_attn_bias, gen_slopes
|
23 |
from .blocks import MPTBlock
|
24 |
from .custom_embedding import SharedEmbedding
|
25 |
from .fc import FC_CLASS_REGISTRY as FC_CLASS_REGISTRY
|
|
|
51 |
return HFDynamicNTKScalingRotaryEmbedding(rope_head_dim, max_position_embeddings=max_seq_len, base=rope_theta, scaling_factor=rope_hf_config['factor'], device='cpu')
|
52 |
raise ValueError('rope_impl needs to be either dail or hf')
|
53 |
|
54 |
+
def gen_attention_mask_in_length(sequence_id: Union[None, torch.Tensor], S: int, attn_uses_sequence_id: bool, attn_impl: str, attention_mask: Union[torch.Tensor, None]):
|
55 |
+
"""Generates the attention mask used for sequence masking in FA v2.
|
56 |
+
|
57 |
+
Only supports sequence id based sparse attention for no attention masking or attention masking with right padding.
|
58 |
+
In case of left padding:
|
59 |
+
1. Training with left padding is not supported in MPT (see https://github.com/mosaicml/llm-foundry/blob/1eecd4cb8e734499f77f6a35f657b8b20c0adfcb/llmfoundry/models/mpt/modeling_mpt.py#L407).
|
60 |
+
2. For generation with left padding, we only have a single sequence id per sample, so we don't need sequence id based sparse attention.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
sequence_id (Union[None, torch.Tensor]): Tensor containing the sequence id for each token. Shape (batch_size, seq_len).
|
64 |
+
S (int): Sequence length
|
65 |
+
attn_uses_sequence_id (bool): Whether the attention uses sequence id based masking.
|
66 |
+
attn_impl (str): Attention implementation. This function is only creates attention_mask_in_length for flash attention.
|
67 |
+
attention_mask (Union[torch.Tensor, None]): Attention mask tensor of shape (batch_size, seq_len)
|
68 |
+
|
69 |
+
Returns:
|
70 |
+
attention_mask_in_length: (batch, seqlen), int, a nonzero number (e.g., 1, 2, 3, etc.) means length of concatenated sequence in b-th batch, and 0 means none. For example, if batch = 3 and seqlen = 6, the attention_mask_in_length is:
|
71 |
+
```
|
72 |
+
[
|
73 |
+
[2, 3, 0, 0, 0, 0],
|
74 |
+
[3, 2, 0, 0, 0, 0],
|
75 |
+
[6, 0, 0, 0, 0, 0]
|
76 |
+
]
|
77 |
+
```
|
78 |
+
, which refers to the 3D-attention mask:
|
79 |
+
```
|
80 |
+
[
|
81 |
+
[
|
82 |
+
[1, 0, 0, 0, 0, 0],
|
83 |
+
[1, 1, 0, 0, 0, 0],
|
84 |
+
[0, 0, 1, 0, 0, 0],
|
85 |
+
[0, 0, 1, 1, 0, 0],
|
86 |
+
[0, 0, 1, 1, 1, 0],
|
87 |
+
[0, 0, 0, 0, 0, 1]
|
88 |
+
],
|
89 |
+
[
|
90 |
+
[1, 0, 0, 0, 0, 0],
|
91 |
+
[1, 1, 0, 0, 0, 0],
|
92 |
+
[1, 1, 1, 0, 0, 0],
|
93 |
+
[0, 0, 0, 1, 0, 0],
|
94 |
+
[0, 0, 0, 1, 1, 0],
|
95 |
+
[0, 0, 0, 0, 0, 1]
|
96 |
+
],
|
97 |
+
[
|
98 |
+
[1, 0, 0, 0, 0, 0],
|
99 |
+
[1, 1, 0, 0, 0, 0],
|
100 |
+
[1, 1, 1, 0, 0, 0],
|
101 |
+
[1, 1, 1, 1, 0, 0],
|
102 |
+
[1, 1, 1, 1, 1, 0],
|
103 |
+
[1, 1, 1, 1, 1, 1]
|
104 |
+
]
|
105 |
+
]
|
106 |
+
```.
|
107 |
+
(The description above is taken verbatim from https://github.com/Dao-AILab/flash-attention/blob/9356a1c0389660d7e231ff3163c1ac17d9e3824a/flash_attn/bert_padding.py#L125 .)
|
108 |
+
"""
|
109 |
+
attention_mask_in_length = None
|
110 |
+
if sequence_id is not None and attn_uses_sequence_id and (attn_impl == 'flash'):
|
111 |
+
if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0]:
|
112 |
+
raise NotImplementedError('Left padding is not supported with flash attention when attn_uses_sequence_id is set to True.')
|
113 |
+
if S != sequence_id.shape[-1]:
|
114 |
+
raise ValueError(f'Sequence length ({S}) does not match length of sequences in sequence_id ({sequence_id.shape[-1]}).')
|
115 |
+
attention_mask_in_length = torch.nn.functional.one_hot(sequence_id)
|
116 |
+
if attention_mask is not None:
|
117 |
+
attention_mask_in_length = attention_mask_in_length.masked_fill(~attention_mask.unsqueeze(-1), 0)
|
118 |
+
attention_mask_in_length = attention_mask_in_length.sum(dim=1)
|
119 |
+
attention_mask_in_length = torch.nn.functional.pad(attention_mask_in_length, (0, S - attention_mask_in_length.shape[-1]), mode='constant', value=0)
|
120 |
+
return attention_mask_in_length
|
121 |
+
|
122 |
+
def apply_sequence_id(attn_bias: torch.Tensor, sequence_id: torch.LongTensor, max_seq_len: int) -> torch.Tensor:
|
123 |
+
seq_len = sequence_id.shape[-1]
|
124 |
+
if seq_len > max_seq_len:
|
125 |
+
raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={max_seq_len}')
|
126 |
+
attn_bias = attn_bias[..., :seq_len, :seq_len]
|
127 |
+
cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
|
128 |
+
min_val = torch.finfo(attn_bias.dtype).min
|
129 |
+
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
|
130 |
+
return attn_bias
|
131 |
+
|
132 |
class MPTPreTrainedModel(PreTrainedModel):
|
133 |
config_class = MPTConfig
|
134 |
base_model_prefix = 'model'
|
|
|
184 |
log.debug(self)
|
185 |
log.debug(f"Using {self.config.init_config['name']} initialization.")
|
186 |
|
187 |
+
def get_input_embeddings(self) -> Union[SharedEmbedding, nn.Embedding]:
|
188 |
return self.wte
|
189 |
|
190 |
+
def set_input_embeddings(self, value: Union[SharedEmbedding, nn.Embedding]) -> None:
|
191 |
self.wte = value
|
192 |
|
193 |
@torch.no_grad()
|
|
|
208 |
attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
|
209 |
if self.attn_uses_sequence_id and sequence_id is not None:
|
210 |
assert isinstance(attn_bias, torch.Tensor)
|
211 |
+
attn_bias = apply_sequence_id(attn_bias, sequence_id, self.config.max_seq_len)
|
212 |
if attention_mask is not None:
|
213 |
s_k = attention_mask.shape[-1]
|
214 |
if attn_bias is None:
|
|
|
220 |
raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
|
221 |
min_val = torch.finfo(attn_bias.dtype).min
|
222 |
attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
|
223 |
+
return (attn_bias, attention_mask)
|
224 |
|
225 |
def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor) -> torch.Tensor:
|
226 |
(s_k, s_q) = attn_bias.shape[-2:]
|
|
|
237 |
attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
|
238 |
return attn_bias
|
239 |
|
240 |
+
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None) -> BaseModelOutputWithPast:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
241 |
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
242 |
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
243 |
if attention_mask is not None:
|
|
|
253 |
raise NotImplementedError('MPT does not support training with left padding.')
|
254 |
if self.prefix_lm and prefix_mask is None:
|
255 |
raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
|
|
|
|
|
256 |
if self.training:
|
257 |
if self.attn_uses_sequence_id and sequence_id is None:
|
258 |
raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
|
259 |
elif self.attn_uses_sequence_id is False and sequence_id is not None:
|
260 |
warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
|
261 |
+
if input_ids is not None and inputs_embeds is not None:
|
262 |
+
raise ValueError('You cannot specify both input_ids and inputs_embeds.')
|
263 |
+
elif input_ids is not None:
|
264 |
+
S = input_ids.size(1)
|
265 |
+
x = self.wte(input_ids)
|
266 |
+
input_device = input_ids.device
|
267 |
+
elif inputs_embeds is not None:
|
268 |
+
S = inputs_embeds.size(1)
|
269 |
+
x = inputs_embeds
|
270 |
+
input_device = inputs_embeds.device
|
271 |
+
else:
|
272 |
+
raise ValueError('You must specify input_ids or inputs_embeds')
|
273 |
assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
|
274 |
rotary_emb_w_meta_info = None
|
|
|
275 |
if self.learned_pos_emb or self.rope:
|
276 |
past_position = 0
|
277 |
if past_key_values is not None:
|
|
|
283 |
if self.learned_pos_emb and S + past_position > self.config.max_seq_len:
|
284 |
raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length ' + f'{S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
|
285 |
if self.learned_pos_emb or (self.rope and self.rope_impl == 'hf'):
|
286 |
+
pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_device).unsqueeze(0)
|
287 |
if attention_mask is not None:
|
288 |
pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
|
289 |
if self.learned_pos_emb:
|
|
|
299 |
assert isinstance(self.emb_drop, nn.Module)
|
300 |
x = self.emb_drop(x_shrunk)
|
301 |
(attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
|
302 |
+
attention_mask_in_length = gen_attention_mask_in_length(sequence_id=sequence_id, S=S, attn_uses_sequence_id=self.attn_uses_sequence_id, attn_impl=self.attn_impl, attention_mask=attention_mask)
|
303 |
+
alibi_slopes = None
|
304 |
+
if self.alibi and self.attn_impl == 'flash':
|
305 |
+
alibi_slopes = gen_slopes(n_heads=self.config.n_heads, alibi_bias_max=self.alibi_bias_max, device=x.device, return_1d=True)
|
306 |
presents = () if use_cache else None
|
307 |
if use_cache and past_key_values is None:
|
308 |
past_key_values = [() for _ in range(self.config.n_layers)]
|
|
|
313 |
assert all_hidden_states is not None
|
314 |
all_hidden_states = all_hidden_states + (x,)
|
315 |
past_key_value = past_key_values[b_idx] if past_key_values is not None else None
|
316 |
+
(x, attn_weights, present) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, rotary_emb_w_meta_info=rotary_emb_w_meta_info, attention_mask=attention_mask, is_causal=self.is_causal, output_attentions=bool(output_attentions), attention_mask_in_length=attention_mask_in_length, alibi_slopes=alibi_slopes)
|
317 |
if presents is not None:
|
318 |
presents += (present,)
|
319 |
if output_attentions:
|
|
|
339 |
|
340 |
def __init__(self, config: MPTConfig):
|
341 |
super().__init__(config)
|
|
|
|
|
342 |
log.info(f'Instantiating an MPTForCausalLM model from {__file__}')
|
343 |
self.transformer: MPTModel = MPTModel(config)
|
344 |
+
self.lm_head = None
|
345 |
+
if not config.tie_word_embeddings:
|
346 |
+
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False, device=config.init_device)
|
347 |
+
self.lm_head._fsdp_wrap = True
|
348 |
for child in self.transformer.children():
|
349 |
if isinstance(child, torch.nn.ModuleList):
|
350 |
continue
|
|
|
360 |
raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
|
361 |
self.logit_scale = logit_scale
|
362 |
|
363 |
+
def get_input_embeddings(self) -> Union[SharedEmbedding, nn.Embedding]:
|
364 |
+
return self.transformer.get_input_embeddings()
|
365 |
|
366 |
def set_input_embeddings(self, value: Union[SharedEmbedding, nn.Embedding]) -> None:
|
367 |
+
self.transformer.set_input_embeddings(value)
|
368 |
|
369 |
+
def get_output_embeddings(self) -> Union[SharedEmbedding, nn.Embedding, nn.Linear]:
|
370 |
+
if self.lm_head is not None:
|
371 |
+
return self.lm_head
|
372 |
+
return self.transformer.get_input_embeddings()
|
373 |
|
374 |
+
def set_output_embeddings(self, new_embeddings: Union[SharedEmbedding, nn.Embedding, nn.Linear]) -> None:
|
375 |
+
if self.lm_head is not None:
|
376 |
+
self.lm_head = new_embeddings
|
377 |
+
else:
|
378 |
+
if not isinstance(new_embeddings, (SharedEmbedding, nn.Embedding)):
|
379 |
+
raise ValueError('new_embeddings must be an instance of SharedEmbedding ' + f'or nn.Embedding, but got {type(new_embeddings)}.')
|
380 |
+
warnings.warn('Using `set_output_embeddings` to set the embedding layer of ' + 'MPTForCausalLM with tied weights. Given weights are tied, ' + 'using `set_input_embeddings` is recommended over using ' + '`set_output_embeddings`.')
|
381 |
+
self.transformer.set_input_embeddings(new_embeddings)
|
382 |
+
|
383 |
+
def tie_weights(self) -> None:
|
384 |
+
self.lm_head = None
|
385 |
|
386 |
def set_decoder(self, decoder: MPTModel) -> None:
|
387 |
self.transformer = decoder
|
|
|
389 |
def get_decoder(self) -> MPTModel:
|
390 |
return self.transformer
|
391 |
|
392 |
+
def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> CausalLMOutputWithPast:
|
393 |
return_dict = return_dict if return_dict is not None else self.config.return_dict
|
394 |
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
395 |
+
outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, inputs_embeds=inputs_embeds)
|
396 |
+
if self.lm_head is not None:
|
397 |
+
logits = self.lm_head(outputs.last_hidden_state)
|
398 |
+
else:
|
399 |
+
out = outputs.last_hidden_state
|
400 |
+
out = out.to(self.transformer.wte.weight.device)
|
401 |
+
logits = self.transformer.wte(out, True)
|
402 |
if self.logit_scale is not None:
|
403 |
if self.logit_scale == 0:
|
404 |
warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
|
|
|
418 |
return isinstance(module, MPTBlock)
|
419 |
|
420 |
def activation_checkpointing_fn(self, module: nn.Module) -> bool:
|
421 |
+
act_ckpt_list = getattr(self.config, 'activation_checkpointing_target', None) or ['MPTBlock']
|
422 |
+
if isinstance(act_ckpt_list, str):
|
423 |
+
act_ckpt_list = [act_ckpt_list]
|
424 |
+
elif not isinstance(act_ckpt_list, list):
|
425 |
+
raise ValueError(f'activation_checkpointing_target must be either a single string or a list, but got {type(act_ckpt_list)}')
|
426 |
+
if 'MPTBlock' in act_ckpt_list or 'mptblock' in act_ckpt_list:
|
427 |
+
if len(act_ckpt_list) > 1:
|
428 |
+
log.info('Activation checkpointing MPTBlock only (ignoring other sub-block modules specified in activation_checkpointing_target).')
|
429 |
+
return isinstance(module, MPTBlock)
|
430 |
+
mod_types = ()
|
431 |
+
for mod_name in act_ckpt_list:
|
432 |
+
if mod_name.lower() == 'mptblock':
|
433 |
+
mod_types += (MPTBlock,)
|
434 |
+
elif mod_name in ATTN_CLASS_REGISTRY:
|
435 |
+
mod_types += (ATTN_CLASS_REGISTRY[mod_name],)
|
436 |
+
elif mod_name in FFN_CLASS_REGISTRY:
|
437 |
+
mod_types += (FFN_CLASS_REGISTRY[mod_name],)
|
438 |
+
elif mod_name in NORM_CLASS_REGISTRY:
|
439 |
+
mod_types += (NORM_CLASS_REGISTRY[mod_name],)
|
440 |
+
else:
|
441 |
+
msg = ', '.join(list(ATTN_CLASS_REGISTRY.keys()) + list(FFN_CLASS_REGISTRY.keys()) + list(NORM_CLASS_REGISTRY.keys()) + ['MPTBlock'])
|
442 |
+
raise ValueError(f'{mod_name} (specified in activation_checkpointing_target) is not a recognized option out of available options {msg}.')
|
443 |
+
return isinstance(module, mod_types)
|
444 |
|
445 |
def prepare_inputs_for_generation(self, input_ids: torch.Tensor, past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]]=None, inputs_embeds: Optional[torch.Tensor]=None, **kwargs: Any) -> Dict[str, Any]:
|
|
|
|
|
446 |
attention_mask = kwargs['attention_mask'].bool()
|
447 |
if attention_mask[:, -1].sum() != attention_mask.shape[0]:
|
448 |
raise NotImplementedError('MPT does not support generation with right padding.')
|
|
|
458 |
raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.')
|
459 |
else:
|
460 |
prefix_mask = None
|
461 |
+
if inputs_embeds is not None and past_key_values is None:
|
462 |
+
model_inputs = {'inputs_embeds': inputs_embeds}
|
463 |
+
else:
|
464 |
+
model_inputs = {'input_ids': input_ids}
|
465 |
+
model_inputs.update({'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True)})
|
466 |
+
return model_inputs
|
467 |
|
468 |
@staticmethod
|
469 |
def _reorder_cache(past_key_values: List[Tuple[torch.Tensor, torch.Tensor]], beam_idx: torch.LongTensor) -> List[Tuple[torch.Tensor, ...]]:
|