Create modeling_gpt2_mq.py
Browse files- modeling_gpt2_mq.py +346 -0
modeling_gpt2_mq.py
ADDED
@@ -0,0 +1,346 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""PyTorch OpenAI GPT-2 model modified with MultiQuery attention"""
|
2 |
+
|
3 |
+
|
4 |
+
import math
|
5 |
+
import os
|
6 |
+
from dataclasses import dataclass
|
7 |
+
from typing import Optional, Tuple, Union
|
8 |
+
|
9 |
+
import torch
|
10 |
+
import torch.utils.checkpoint
|
11 |
+
from torch import nn
|
12 |
+
from torch.cuda.amp import autocast
|
13 |
+
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
14 |
+
|
15 |
+
from transformers.activations import ACT2FN
|
16 |
+
from transformers.modeling_outputs import (
|
17 |
+
BaseModelOutputWithPastAndCrossAttentions,
|
18 |
+
CausalLMOutputWithCrossAttentions,
|
19 |
+
SequenceClassifierOutputWithPast,
|
20 |
+
TokenClassifierOutput,
|
21 |
+
)
|
22 |
+
from transformers.modeling_utils import PreTrainedModel, SequenceSummary
|
23 |
+
from transformers.pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
|
24 |
+
|
25 |
+
from transformers.utils import (
|
26 |
+
ModelOutput,
|
27 |
+
add_code_sample_docstrings,
|
28 |
+
add_start_docstrings,
|
29 |
+
add_start_docstrings_to_model_forward,
|
30 |
+
logging,
|
31 |
+
replace_return_docstrings,
|
32 |
+
)
|
33 |
+
from transformers.utils.model_parallel_utils import assert_device_map, get_device_map
|
34 |
+
from transformers.models.gpt2.modeling_gpt2 import GPT2Model, GPT2Block, GPT2PreTrainedModel, GPT2LMHeadModel
|
35 |
+
from .configuration_gpt2_mq import GPT2CustomConfig, MULTI_QUERY, MULTI_HEAD
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
class GPT2MQAttention(nn.Module):
|
40 |
+
def __init__(self, config, is_cross_attention=False, layer_idx=None):
|
41 |
+
super().__init__()
|
42 |
+
assert config.attention_head_type == MULTI_QUERY
|
43 |
+
|
44 |
+
max_positions = config.max_position_embeddings
|
45 |
+
self.register_buffer(
|
46 |
+
"bias",
|
47 |
+
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
|
48 |
+
1, 1, max_positions, max_positions
|
49 |
+
),
|
50 |
+
)
|
51 |
+
self.register_buffer("masked_bias", torch.tensor(-1e4))
|
52 |
+
|
53 |
+
self.embed_dim = config.hidden_size
|
54 |
+
self.num_heads = config.num_attention_heads
|
55 |
+
self.head_dim = self.embed_dim // self.num_heads
|
56 |
+
self.split_size = self.embed_dim
|
57 |
+
if self.head_dim * self.num_heads != self.embed_dim:
|
58 |
+
raise ValueError(
|
59 |
+
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
60 |
+
f" {self.num_heads})."
|
61 |
+
)
|
62 |
+
|
63 |
+
self.scale_attn_weights = config.scale_attn_weights
|
64 |
+
if is_cross_attention:
|
65 |
+
raise NotImplementedError("Cross-attention not implemented for MQA")
|
66 |
+
self.is_cross_attention = is_cross_attention
|
67 |
+
|
68 |
+
# Layer-wise attention scaling, reordering, and upcasting
|
69 |
+
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
|
70 |
+
self.layer_idx = layer_idx
|
71 |
+
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
|
72 |
+
|
73 |
+
if self.is_cross_attention:
|
74 |
+
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
|
75 |
+
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
|
76 |
+
else:
|
77 |
+
# self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
|
78 |
+
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
|
79 |
+
# Keys and values are shared across heads
|
80 |
+
self.kv_attn = Conv1D(2 * self.head_dim, self.embed_dim)
|
81 |
+
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
|
82 |
+
|
83 |
+
self.attn_dropout = nn.Dropout(config.attn_pdrop)
|
84 |
+
self.resid_dropout = nn.Dropout(config.resid_pdrop)
|
85 |
+
|
86 |
+
self.pruned_heads = set()
|
87 |
+
|
88 |
+
def prune_heads(self, heads):
|
89 |
+
if len(heads) == 0:
|
90 |
+
return
|
91 |
+
heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
|
92 |
+
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
|
93 |
+
|
94 |
+
# Prune conv1d layers
|
95 |
+
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
|
96 |
+
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
|
97 |
+
|
98 |
+
# Update hyper params
|
99 |
+
self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
|
100 |
+
self.num_heads = self.num_heads - len(heads)
|
101 |
+
self.pruned_heads = self.pruned_heads.union(heads)
|
102 |
+
|
103 |
+
def _attn(self, query, key, value, attention_mask=None, head_mask=None):
|
104 |
+
# query: (b, num_heads * sq, head_dim)
|
105 |
+
# key: (b, head_dim, sk)
|
106 |
+
# value: (b, sk, head_dim)
|
107 |
+
batch_size = query.size(0)
|
108 |
+
query_length = query.size(1) // self.num_heads
|
109 |
+
key_length = key.size(2)
|
110 |
+
# (b, num_heads * sq, head_dim) x (b, head_dim, sk) -> (b, num_heads * sq, sk)
|
111 |
+
attn_weights = torch.bmm(query, key)
|
112 |
+
# -> (b, num_heads, sq, sk)
|
113 |
+
attn_weights = attn_weights.view(batch_size, self.num_heads, query_length, key_length)
|
114 |
+
|
115 |
+
if self.scale_attn_weights:
|
116 |
+
attn_weights = attn_weights / torch.tensor(
|
117 |
+
value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device
|
118 |
+
)
|
119 |
+
|
120 |
+
# Layer-wise attention scaling
|
121 |
+
if self.scale_attn_by_inverse_layer_idx:
|
122 |
+
attn_weights = attn_weights / float(self.layer_idx + 1)
|
123 |
+
|
124 |
+
if not self.is_cross_attention:
|
125 |
+
# if only "normal" attention layer implements causal mask
|
126 |
+
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].to(torch.bool)
|
127 |
+
mask_value = torch.finfo(attn_weights.dtype).min
|
128 |
+
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
|
129 |
+
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
|
130 |
+
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
|
131 |
+
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
|
132 |
+
|
133 |
+
if attention_mask is not None:
|
134 |
+
# Apply the attention mask
|
135 |
+
attn_weights = attn_weights + attention_mask
|
136 |
+
|
137 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
138 |
+
|
139 |
+
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
|
140 |
+
attn_weights = attn_weights.type(value.dtype)
|
141 |
+
attn_weights = self.attn_dropout(attn_weights)
|
142 |
+
|
143 |
+
# Mask heads if we want to
|
144 |
+
if head_mask is not None:
|
145 |
+
attn_weights = attn_weights * head_mask
|
146 |
+
|
147 |
+
# (b, num_heads, sq, sk) -> (b, num_heads * sq, sk)
|
148 |
+
_attn_weights = attn_weights.view(batch_size, self.num_heads * query_length, key_length)
|
149 |
+
# (b, num_heads * sq, sk) x (b, sk, head_dim) -> (b, num_heads * sq, head_dim)
|
150 |
+
attn_output = torch.bmm(_attn_weights, value)
|
151 |
+
attn_output = attn_output.view(batch_size, self.num_heads, query_length, self.head_dim)
|
152 |
+
|
153 |
+
return attn_output, attn_weights
|
154 |
+
|
155 |
+
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
|
156 |
+
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
|
157 |
+
bsz, num_heads, q_seq_len, dk = query.size()
|
158 |
+
_, _, k_seq_len, _ = key.size()
|
159 |
+
|
160 |
+
# Preallocate attn_weights for `baddbmm`
|
161 |
+
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
|
162 |
+
|
163 |
+
# Compute Scale Factor
|
164 |
+
scale_factor = 1.0
|
165 |
+
if self.scale_attn_weights:
|
166 |
+
scale_factor /= float(value.size(-1)) ** 0.5
|
167 |
+
|
168 |
+
if self.scale_attn_by_inverse_layer_idx:
|
169 |
+
scale_factor /= float(self.layer_idx + 1)
|
170 |
+
|
171 |
+
# Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
|
172 |
+
with autocast(enabled=False):
|
173 |
+
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
|
174 |
+
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
|
175 |
+
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
|
176 |
+
|
177 |
+
if not self.is_cross_attention:
|
178 |
+
# if only "normal" attention layer implements causal mask
|
179 |
+
query_length, key_length = query.size(-2), key.size(-2)
|
180 |
+
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length].bool()
|
181 |
+
mask_value = torch.finfo(attn_weights.dtype).min
|
182 |
+
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
|
183 |
+
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
|
184 |
+
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
|
185 |
+
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
|
186 |
+
|
187 |
+
if attention_mask is not None:
|
188 |
+
# Apply the attention mask
|
189 |
+
attn_weights = attn_weights + attention_mask
|
190 |
+
|
191 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
192 |
+
|
193 |
+
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
|
194 |
+
if attn_weights.dtype != torch.float32:
|
195 |
+
raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
|
196 |
+
attn_weights = attn_weights.type(value.dtype)
|
197 |
+
attn_weights = self.attn_dropout(attn_weights)
|
198 |
+
|
199 |
+
# Mask heads if we want to
|
200 |
+
if head_mask is not None:
|
201 |
+
attn_weights = attn_weights * head_mask
|
202 |
+
|
203 |
+
attn_output = torch.matmul(attn_weights, value)
|
204 |
+
|
205 |
+
return attn_output, attn_weights
|
206 |
+
|
207 |
+
def _split_heads(self, tensor, num_heads, attn_head_size):
|
208 |
+
"""
|
209 |
+
Splits hidden_size dim into attn_head_size and num_heads
|
210 |
+
"""
|
211 |
+
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
|
212 |
+
tensor = tensor.view(new_shape)
|
213 |
+
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
|
214 |
+
|
215 |
+
def _merge_heads(self, tensor, num_heads, attn_head_size):
|
216 |
+
"""
|
217 |
+
Merges attn_head_size dim and num_attn_heads dim into hidden_size
|
218 |
+
"""
|
219 |
+
tensor = tensor.permute(0, 2, 1, 3).contiguous()
|
220 |
+
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
|
221 |
+
return tensor.view(new_shape)
|
222 |
+
|
223 |
+
def forward(
|
224 |
+
self,
|
225 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]],
|
226 |
+
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
227 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
228 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
229 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
230 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
231 |
+
use_cache: Optional[bool] = False,
|
232 |
+
output_attentions: Optional[bool] = False,
|
233 |
+
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]:
|
234 |
+
if encoder_hidden_states is not None:
|
235 |
+
raise NotImplementedError("Cross-attention not implemented for MQA")
|
236 |
+
if not hasattr(self, "q_attn"):
|
237 |
+
raise ValueError(
|
238 |
+
"If class is used as cross attention, the weights `q_attn` have to be defined. "
|
239 |
+
"Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`."
|
240 |
+
)
|
241 |
+
|
242 |
+
query = self.q_attn(hidden_states)
|
243 |
+
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
|
244 |
+
attention_mask = encoder_attention_mask
|
245 |
+
else:
|
246 |
+
query = self.q_attn(hidden_states)
|
247 |
+
key, value = self.kv_attn(hidden_states).split(self.head_dim, dim=2)
|
248 |
+
|
249 |
+
|
250 |
+
batch_size, seq_length = query.shape[:2]
|
251 |
+
# (query_length, batch, num_heads, head_dim)
|
252 |
+
# (batch, num_heads * query_length, head_dim)\
|
253 |
+
|
254 |
+
# (batch, query_length, hidden_size) -> (batch, num_heads, query_length, head_dim)
|
255 |
+
query = query.view(batch_size, seq_length, self.num_heads, self.head_dim).permute([0, 2, 1, 3])
|
256 |
+
# -> (batch, num_heads * query_length, head_dim)
|
257 |
+
query = query.reshape(batch_size, self.num_heads * seq_length, self.head_dim)
|
258 |
+
|
259 |
+
# (batch, query_length, hidden_size) -> (batch, query_length * num_heads, head_dim)
|
260 |
+
# query = query.view(
|
261 |
+
# batch_size, seq_length, self.num_heads, self.head_dim,
|
262 |
+
# ).reshape(
|
263 |
+
# batch_size, seq_length * self.num_heads, self.head_dim
|
264 |
+
# )
|
265 |
+
key = key.permute(0, 2, 1) # (batch_size, head_dim, seq_length)
|
266 |
+
# value (batch_size, seq_length, head_dim)
|
267 |
+
|
268 |
+
if layer_past is not None:
|
269 |
+
past_key, past_value = layer_past
|
270 |
+
# Concatenate on sequence dimension
|
271 |
+
key = torch.cat((past_key, key), dim=-1)
|
272 |
+
value = torch.cat((past_value, value), dim=-2)
|
273 |
+
|
274 |
+
if use_cache is True:
|
275 |
+
present = (key, value)
|
276 |
+
else:
|
277 |
+
present = None
|
278 |
+
|
279 |
+
if self.reorder_and_upcast_attn:
|
280 |
+
raise NotImplementedError("Reorder and upcast attention not implemented for MQA")
|
281 |
+
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
|
282 |
+
else:
|
283 |
+
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
|
284 |
+
|
285 |
+
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
|
286 |
+
attn_output = self.c_proj(attn_output)
|
287 |
+
attn_output = self.resid_dropout(attn_output)
|
288 |
+
|
289 |
+
outputs = (attn_output, present)
|
290 |
+
if output_attentions:
|
291 |
+
outputs += (attn_weights,)
|
292 |
+
|
293 |
+
return outputs # a, present, (attentions)
|
294 |
+
|
295 |
+
|
296 |
+
# inherit from gpt_modeling.py, and override `attn` module
|
297 |
+
class GPT2CustomBlock(GPT2Block):
|
298 |
+
|
299 |
+
def __init__(self, config: GPT2CustomConfig, layer_idx=None):
|
300 |
+
super().__init__(config, layer_idx)
|
301 |
+
# Override attention module if using multiquery
|
302 |
+
if config.attention_head_type == MULTI_QUERY:
|
303 |
+
self.attn = GPT2MQAttention(config, layer_idx=layer_idx)
|
304 |
+
if config.add_cross_attention:
|
305 |
+
raise NotImplementedError("Cross-attention not implemented for MQA")
|
306 |
+
|
307 |
+
|
308 |
+
# inherit from gpt_modeling.py and override `__init__` method
|
309 |
+
class GPT2CustomModel(GPT2Model):
|
310 |
+
config_class = GPT2CustomConfig
|
311 |
+
|
312 |
+
def __init__(self, config):
|
313 |
+
GPT2PreTrainedModel.__init__(self, config)
|
314 |
+
|
315 |
+
self.embed_dim = config.hidden_size
|
316 |
+
|
317 |
+
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
|
318 |
+
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
|
319 |
+
|
320 |
+
self.drop = nn.Dropout(config.embd_pdrop)
|
321 |
+
self.h = nn.ModuleList([GPT2CustomBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
|
322 |
+
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
|
323 |
+
|
324 |
+
# Model parallel
|
325 |
+
self.model_parallel = False
|
326 |
+
self.device_map = None
|
327 |
+
self.gradient_checkpointing = False
|
328 |
+
|
329 |
+
# Initialize weights and apply final processing
|
330 |
+
self.post_init()
|
331 |
+
|
332 |
+
|
333 |
+
class GPT2LMHeadCustomModel(GPT2LMHeadModel):
|
334 |
+
config_class = GPT2CustomConfig
|
335 |
+
|
336 |
+
def __init__(self, config):
|
337 |
+
GPT2PreTrainedModel.__init__(self, config)
|
338 |
+
self.transformer = GPT2CustomModel(config)
|
339 |
+
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
|
340 |
+
|
341 |
+
# Model parallel
|
342 |
+
self.model_parallel = False
|
343 |
+
self.device_map = None
|
344 |
+
|
345 |
+
# Initialize weights and apply final processing
|
346 |
+
self.post_init()
|