|
import math |
|
import os |
|
import random |
|
import warnings |
|
from dataclasses import dataclass |
|
from typing import Optional, Tuple, Union |
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
import torch.utils.checkpoint |
|
from einops import repeat |
|
from torch import nn |
|
from torch.cuda.amp import autocast |
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
|
from transformers.activations import ACT2FN |
|
from transformers.modeling_outputs import ( |
|
BaseModelOutputWithPastAndCrossAttentions, |
|
CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, |
|
SequenceClassifierOutputWithPast, TokenClassifierOutput) |
|
from transformers.modeling_utils import PreTrainedModel, SequenceSummary |
|
from transformers.utils import (ModelOutput, logging) |
|
from transformers.utils.model_parallel_utils import (assert_device_map, |
|
get_device_map) |
|
|
|
from .configuration_shrink import ShrinkConfig |
|
|
|
|
|
class SinusoidalPositional(torch.nn.Module): |
|
def __init__(self, embedding_dim, max_seq_length=5000): |
|
super().__init__() |
|
pe = torch.zeros(max_seq_length, embedding_dim) |
|
position = torch.arange(0, max_seq_length, dtype=torch.float).unsqueeze(1) |
|
div_term = torch.exp( |
|
torch.arange(0, embedding_dim, 2).float() |
|
* (-math.log(10000.0) / embedding_dim) |
|
) |
|
pe[:, 0::2] = torch.sin(position * div_term) |
|
pe[:, 1::2] = torch.cos(position * div_term) |
|
pe = pe.unsqueeze(0) |
|
self.register_buffer("pe", pe, persistent=False) |
|
|
|
def forward(self, input_ids): |
|
return self.pe[:, : input_ids.shape[1], :] |
|
|
|
|
|
class ScaledSinusoidal(SinusoidalPositional): |
|
def __init__(self, embedding_dim, max_seq_length): |
|
super().__init__(embedding_dim, max_seq_length) |
|
self.scale_factor = torch.nn.Parameter( |
|
torch.tensor([1.0 / embedding_dim**0.5]) |
|
) |
|
|
|
def forward(self, input_ids): |
|
return self.scale_factor * self.pe[:, : input_ids.shape[1], :] |
|
|
|
|
|
class ShrinkAttention(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.config = config |
|
self.head_dim = config.hidden_size // config.num_attention_heads |
|
assert ( |
|
self.head_dim * config.num_attention_heads == config.hidden_size |
|
), "d_model must be divisible by n_head" |
|
self.use_bias = config.use_bias |
|
|
|
if not config.combined_qkv or config.qk_hidden_size is not None: |
|
self.query = nn.Linear( |
|
config.hidden_size, config.hidden_size, bias=self.use_bias |
|
) |
|
self.key = nn.Linear( |
|
config.hidden_size |
|
if not config.qk_hidden_size |
|
else config.qk_hidden_size, |
|
config.hidden_size, |
|
bias=self.use_bias, |
|
) |
|
self.value = nn.Linear( |
|
config.hidden_size |
|
if not config.qk_hidden_size |
|
else config.qk_hidden_size, |
|
config.hidden_size, |
|
bias=self.use_bias, |
|
) |
|
else: |
|
self.qkv = nn.Linear( |
|
config.hidden_size, config.hidden_size * 3, bias=self.use_bias |
|
) |
|
self.out = nn.Linear(config.hidden_size, config.hidden_size, bias=self.use_bias) |
|
|
|
def forward(self, x0, x1=None, causal=False, mask=None): |
|
batch_size = x0.size(0) |
|
|
|
def split_heads(x): |
|
return x.view( |
|
batch_size, -1, self.config.num_attention_heads, self.head_dim |
|
).transpose(1, 2) |
|
|
|
if not self.config.combined_qkv: |
|
q = split_heads(self.query(x0)) |
|
k = split_heads(self.key(x1) if x1 is not None else self.key(x0)) |
|
v = split_heads(self.value(x1 if x1 is not None else x0)) |
|
else: |
|
q, k, v = self.qkv(x0).chunk(3,-1) |
|
q = split_heads(q) |
|
k = split_heads(k) |
|
v = split_heads(v) |
|
|
|
attn_output = F.scaled_dot_product_attention( |
|
q, k, v, attn_mask=None, dropout_p=0.0, is_causal=causal |
|
) |
|
attn_output = ( |
|
attn_output.transpose(1, 2) |
|
.contiguous() |
|
.view(batch_size, -1, self.config.hidden_size) |
|
) |
|
return self.out(attn_output) |
|
|
|
|
|
class ShrinkGLU(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.config = config |
|
self.gate_proj = nn.Linear( |
|
config.hidden_size, config.intermediate_size, bias=False |
|
) |
|
self.up_proj = nn.Linear( |
|
config.hidden_size, config.intermediate_size, bias=False |
|
) |
|
self.down_proj = nn.Linear( |
|
config.intermediate_size, config.hidden_size, bias=False |
|
) |
|
self.act_fn = ACT2FN[config.activation_function] |
|
|
|
def forward(self, x): |
|
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) |
|
|
|
|
|
class ShrinkBlock(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.config = config |
|
self.attn = ShrinkAttention(config) |
|
self.ffn = ShrinkGLU(config) |
|
self.ln1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) |
|
self.ln2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) |
|
|
|
def forward(self, x, mask=None): |
|
x = x + self.attn(self.ln1(x), causal=True, mask=mask) |
|
x = x + self.ffn(self.ln2(x)) |
|
return x |
|
|
|
|
|
class ShrinkPreTrainedModel(PreTrainedModel): |
|
config_class = ShrinkConfig |
|
base_model_prefix = "transformer" |
|
is_parallelizable = False |
|
supports_gradient_checkpointing = True |
|
_no_split_modules = ["ShrinkBlock"] |
|
_skip_keys_device_placement = "past_key_values" |
|
|
|
def __init__(self, *inputs, **kwargs): |
|
super().__init__(*inputs, **kwargs) |
|
|
|
def _init_weights(self, module): |
|
"""Initialize the weights.""" |
|
if isinstance(module, (nn.Linear)): |
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
|
if module.bias is not None: |
|
module.bias.data.zero_() |
|
elif isinstance(module, nn.Embedding): |
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
|
if module.padding_idx is not None: |
|
module.weight.data[module.padding_idx].zero_() |
|
elif isinstance(module, nn.LayerNorm): |
|
module.bias.data.zero_() |
|
module.weight.data.fill_(1.0) |
|
|
|
def _set_gradient_checkpointing(self, module, value=False): |
|
if isinstance(module, ShrinkModel): |
|
module.gradient_checkpointing = value |
|
|
|
|
|
class ShrinkModel(ShrinkPreTrainedModel): |
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.wte = nn.Sequential( |
|
nn.Embedding(config.vocab_size, config.hidden_size_0), |
|
nn.Linear(config.hidden_size_0, config.hidden_size), |
|
) |
|
self.wpe = ScaledSinusoidal(config.hidden_size, config.max_position_embeddings) |
|
self.wln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) |
|
self.h = nn.ModuleList( |
|
[ShrinkBlock(config) for i in range(config.num_hidden_layers)] |
|
) |
|
self.ln_f = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon) |
|
self.model_parallel = False |
|
self.device_map = None |
|
self.gradient_checkpointing = False |
|
self.post_init() |
|
|
|
def get_input_embeddings(self): |
|
return self.wte[0] |
|
|
|
def set_input_embeddings(self, new_embeddings): |
|
self.wte[0] = new_embeddings |
|
|
|
def forward( |
|
self, |
|
input_ids: Optional[torch.LongTensor] = None, |
|
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
position_ids: Optional[torch.LongTensor] = None, |
|
head_mask: Optional[torch.FloatTensor] = None, |
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
encoder_hidden_states: Optional[torch.Tensor] = None, |
|
encoder_attention_mask: Optional[torch.FloatTensor] = None, |
|
use_cache: Optional[bool] = None, |
|
output_attentions: Optional[bool] = None, |
|
output_hidden_states: Optional[bool] = None, |
|
return_dict: Optional[bool] = None, |
|
) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: |
|
|
|
output_attentions = ( |
|
output_attentions |
|
if output_attentions is not None |
|
else self.config.output_attentions |
|
) |
|
output_hidden_states = ( |
|
output_hidden_states |
|
if output_hidden_states is not None |
|
else self.config.output_hidden_states |
|
) |
|
use_cache = use_cache if use_cache is not None else self.config.use_cache |
|
return_dict = ( |
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
) |
|
if input_ids is not None and inputs_embeds is not None: |
|
raise ValueError( |
|
"You cannot specify both input_ids and inputs_embeds at the same time" |
|
) |
|
elif input_ids is not None: |
|
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
|
input_shape = input_ids.size() |
|
input_ids = input_ids.view(-1, input_shape[-1]) |
|
batch_size = input_ids.shape[0] |
|
elif inputs_embeds is not None: |
|
input_shape = inputs_embeds.size()[:-1] |
|
batch_size = inputs_embeds.shape[0] |
|
else: |
|
raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
|
device = input_ids.device if input_ids is not None else inputs_embeds.device |
|
|
|
if token_type_ids is not None: |
|
token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
|
if position_ids is not None: |
|
position_ids = position_ids.view(-1, input_shape[-1]) |
|
|
|
if past_key_values is None: |
|
past_length = 0 |
|
past_key_values = tuple([None] * len(self.h)) |
|
else: |
|
past_length = past_key_values[0][0].size(-2) |
|
if position_ids is None: |
|
position_ids = torch.arange( |
|
past_length, |
|
input_shape[-1] + past_length, |
|
dtype=torch.long, |
|
device=device, |
|
) |
|
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) |
|
|
|
if attention_mask is not None: |
|
if batch_size <= 0: |
|
raise ValueError("batch_size has to be defined and > 0") |
|
attention_mask = attention_mask.view(batch_size, -1) |
|
attention_mask = attention_mask[:, None, None, :] |
|
attention_mask = attention_mask.to(dtype=self.dtype) |
|
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min |
|
|
|
if self.config.add_cross_attention and encoder_hidden_states is not None: |
|
( |
|
encoder_batch_size, |
|
encoder_sequence_length, |
|
_, |
|
) = encoder_hidden_states.size() |
|
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
|
if encoder_attention_mask is None: |
|
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
|
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) |
|
else: |
|
encoder_attention_mask = None |
|
|
|
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) |
|
|
|
if inputs_embeds is None: |
|
inputs_embeds = self.wte(input_ids) |
|
position_embeds = self.wpe(input_ids) |
|
hidden_states = inputs_embeds + position_embeds |
|
hidden_states = self.wln(hidden_states) |
|
|
|
if token_type_ids is not None: |
|
token_type_embeds = self.wte(token_type_ids) |
|
hidden_states = hidden_states + token_type_embeds |
|
|
|
output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) |
|
|
|
if self.gradient_checkpointing and self.training: |
|
if use_cache: |
|
logger.warning_once( |
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
|
) |
|
use_cache = False |
|
|
|
presents = () if use_cache else None |
|
all_self_attentions = () if output_attentions else None |
|
all_cross_attentions = ( |
|
() if output_attentions and self.config.add_cross_attention else None |
|
) |
|
all_hidden_states = () if output_hidden_states else None |
|
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): |
|
if random.uniform(0, 1) > self.config.layer_dropout_prob: |
|
if self.model_parallel: |
|
torch.cuda.set_device(hidden_states.device) |
|
if layer_past is not None: |
|
layer_past = tuple( |
|
past_state.to(hidden_states.device) |
|
for past_state in layer_past |
|
) |
|
if attention_mask is not None: |
|
attention_mask = attention_mask.to(hidden_states.device) |
|
if isinstance(head_mask, torch.Tensor): |
|
head_mask = head_mask.to(hidden_states.device) |
|
if output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
outputs = block(hidden_states, mask=attention_mask) |
|
outputs = (outputs,) |
|
hidden_states = outputs[0] |
|
|
|
hidden_states = self.ln_f(hidden_states) |
|
hidden_states = hidden_states.view(output_shape) |
|
if output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
if not return_dict: |
|
return tuple( |
|
v |
|
for v in [hidden_states, None, all_hidden_states, None, None] |
|
if v is not None |
|
) |
|
|
|
return BaseModelOutputWithPastAndCrossAttentions( |
|
last_hidden_state=hidden_states, |
|
past_key_values=None, |
|
hidden_states=all_hidden_states, |
|
attentions=None, |
|
cross_attentions=None, |
|
) |
|
|
|
|
|
class ShrinkModelForCausalLM(ShrinkPreTrainedModel): |
|
_tied_weights_keys = ["lm_head.weight"] |
|
def __init__(self, config): |
|
super().__init__(config) |
|
self.transformer = ShrinkModel(config) |
|
self.lm_head = nn.Sequential( |
|
nn.Linear( |
|
config.hidden_size, config.hidden_size_0, bias=config.projection_bias |
|
), |
|
nn.Linear( |
|
config.hidden_size_0, config.vocab_size, bias=config.lm_head_bias |
|
), |
|
) |
|
self.model_parallel = False |
|
self.device_map = None |
|
self.post_init() |
|
|
|
def get_output_embeddings(self): |
|
return self.lm_head |
|
|
|
def set_output_embeddings(self, new_embeddings): |
|
self.lm_head = new_embeddings |
|
|
|
def prepare_inputs_for_generation( |
|
self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs |
|
): |
|
token_type_ids = kwargs.get("token_type_ids", None) |
|
|
|
if past_key_values: |
|
input_ids = input_ids[:, -1].unsqueeze(-1) |
|
if token_type_ids is not None: |
|
token_type_ids = token_type_ids[:, -1].unsqueeze(-1) |
|
|
|
attention_mask = kwargs.get("attention_mask", None) |
|
position_ids = kwargs.get("position_ids", None) |
|
|
|
if attention_mask is not None and position_ids is None: |
|
|
|
position_ids = attention_mask.long().cumsum(-1) - 1 |
|
position_ids.masked_fill_(attention_mask == 0, 1) |
|
if past_key_values: |
|
position_ids = position_ids[:, -1].unsqueeze(-1) |
|
else: |
|
position_ids = None |
|
|
|
|
|
if inputs_embeds is not None and past_key_values is None: |
|
model_inputs = {"inputs_embeds": inputs_embeds} |
|
else: |
|
model_inputs = {"input_ids": input_ids} |
|
|
|
model_inputs.update( |
|
{ |
|
"past_key_values": past_key_values, |
|
"use_cache": kwargs.get("use_cache"), |
|
"position_ids": position_ids, |
|
"attention_mask": attention_mask, |
|
"token_type_ids": token_type_ids, |
|
} |
|
) |
|
return model_inputs |
|
|
|
def forward( |
|
self, |
|
input_ids: Optional[torch.LongTensor] = None, |
|
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
|
attention_mask: Optional[torch.FloatTensor] = None, |
|
token_type_ids: Optional[torch.LongTensor] = None, |
|
position_ids: Optional[torch.LongTensor] = None, |
|
head_mask: Optional[torch.FloatTensor] = None, |
|
inputs_embeds: Optional[torch.FloatTensor] = None, |
|
encoder_hidden_states: Optional[torch.Tensor] = None, |
|
encoder_attention_mask: Optional[torch.FloatTensor] = None, |
|
labels: Optional[torch.LongTensor] = None, |
|
use_cache: Optional[bool] = None, |
|
output_attentions: Optional[bool] = None, |
|
output_hidden_states: Optional[bool] = None, |
|
return_dict: Optional[bool] = None, |
|
) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: |
|
r""" |
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
|
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
|
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
|
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
|
""" |
|
return_dict = ( |
|
return_dict if return_dict is not None else self.config.use_return_dict |
|
) |
|
|
|
transformer_outputs = self.transformer( |
|
input_ids, |
|
past_key_values=past_key_values, |
|
attention_mask=attention_mask, |
|
token_type_ids=token_type_ids, |
|
position_ids=position_ids, |
|
head_mask=head_mask, |
|
inputs_embeds=inputs_embeds, |
|
encoder_hidden_states=encoder_hidden_states, |
|
encoder_attention_mask=encoder_attention_mask, |
|
use_cache=use_cache, |
|
output_attentions=output_attentions, |
|
output_hidden_states=output_hidden_states, |
|
return_dict=return_dict, |
|
) |
|
hidden_states = transformer_outputs[0] |
|
|
|
|
|
if self.model_parallel: |
|
torch.cuda.set_device(self.transformer.first_device) |
|
hidden_states = hidden_states.to(self.lm_head.weight.device) |
|
|
|
lm_logits = self.lm_head(hidden_states) |
|
|
|
loss = None |
|
if labels is not None: |
|
|
|
labels = labels.to(lm_logits.device) |
|
|
|
shift_logits = lm_logits[..., :-1, :].contiguous() |
|
shift_labels = labels[..., 1:].contiguous() |
|
|
|
loss_fct = CrossEntropyLoss() |
|
loss = loss_fct( |
|
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) |
|
) |
|
|
|
if not return_dict: |
|
output = (lm_logits,) + transformer_outputs[1:] |
|
return ((loss,) + output) if loss is not None else output |
|
|
|
return CausalLMOutputWithCrossAttentions( |
|
loss=loss, |
|
logits=lm_logits, |
|
past_key_values=transformer_outputs.past_key_values, |
|
hidden_states=transformer_outputs.hidden_states, |
|
attentions=transformer_outputs.attentions, |
|
cross_attentions=transformer_outputs.cross_attentions, |
|
) |
|
|
|
@staticmethod |
|
def _reorder_cache( |
|
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor |
|
) -> Tuple[Tuple[torch.Tensor]]: |
|
""" |
|
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or |
|
[`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
|
beam_idx at every generation step. |
|
""" |
|
return tuple( |
|
tuple( |
|
past_state.index_select(0, beam_idx.to(past_state.device)) |
|
for past_state in layer_past |
|
) |
|
for layer_past in past_key_values |
|
) |
|
|