import torch from torch._C import NoopLogger import torch.nn import torch.nn.functional as F from torch import Tensor from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss from transformers import BertModel, BertPreTrainedModel from transformers import RobertaModel, RobertaPreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput, SequenceClassifierOutputWithPast, BaseModelOutput, Seq2SeqLMOutput from transformers import GPT2Model, GPT2PreTrainedModel, GPTNeoModel from model.prefix_encoder import PrefixEncoder from model.deberta import DebertaModel, DebertaPreTrainedModel, ContextPooler, StableDropout from model import utils import copy class BertForSequenceClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BertModel(config) self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) self.init_weights() self.embedding = utils.get_embeddings(self, config) self.embeddings_gradient = utils.GradientStorage(self.embedding) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, use_base_grad=False, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ utils.use_grad(self.bert, use_base_grad) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) elif self.config.problem_type == "em": predict_logp = F.log_softmax(pooled_output, dim=-1) target_logp = predict_logp.gather(-1, labels) target_logp = target_logp - 1e32 * labels.eq(0) # Apply mask loss = -torch.logsumexp(target_logp, dim=-1) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output loss.backward() return SequenceClassifierOutput( loss=loss, logits=pooled_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BertPrefixForSequenceClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BertModel(config) self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) for param in self.bert.parameters(): param.requires_grad = False self.pre_seq_len = config.pre_seq_len self.n_layer = config.num_hidden_layers self.n_head = config.num_attention_heads self.n_embd = config.hidden_size // config.num_attention_heads self.prefix_tokens = torch.arange(self.pre_seq_len).long() self.prefix_encoder = PrefixEncoder(config) bert_param = 0 for name, param in self.bert.named_parameters(): bert_param += param.numel() all_param = 0 for name, param in self.named_parameters(): all_param += param.numel() total_param = all_param - bert_param print('-> bert_param:{:0.2f}M P-tuning-V2 param is {}'.format(bert_param / 1000000, total_param)) self.embedding = utils.get_embeddings(self, config) self.embeddings_gradient = utils.GradientStorage(self.embedding) def get_prompt(self, batch_size): prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.bert.device) past_key_values = self.prefix_encoder(prefix_tokens) # bsz, seqlen, _ = past_key_values.shape past_key_values = past_key_values.view( batch_size, self.pre_seq_len, self.n_layer * 2, self.n_head, self.n_embd ) past_key_values = self.dropout(past_key_values) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(2) return past_key_values def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, use_base_grad=False, ): utils.use_grad(self.bert, use_base_grad) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size = input_ids.shape[0] past_key_values = self.get_prompt(batch_size=batch_size) prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.bert.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, past_key_values=past_key_values, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BertPromptForSequenceClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config) self.embeddings = self.bert.embeddings self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) for param in self.bert.parameters(): param.requires_grad = False self.pre_seq_len = config.pre_seq_len self.n_layer = config.num_hidden_layers self.n_head = config.num_attention_heads self.n_embd = config.hidden_size // config.num_attention_heads self.prefix_tokens = torch.arange(self.pre_seq_len).long() self.prefix_encoder = torch.nn.Embedding(self.pre_seq_len, config.hidden_size) self.embedding = utils.get_embeddings(self, config) self.embeddings_gradient = utils.GradientStorage(self.embedding) def get_prompt(self, batch_size): prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.bert.device) prompts = self.prefix_encoder(prefix_tokens) return prompts def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, use_base_grad=False, ): utils.use_grad(self.bert, use_base_grad) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size = input_ids.shape[0] raw_embedding = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, ) prompts = self.get_prompt(batch_size=batch_size) inputs_embeds = torch.cat((prompts, raw_embedding), dim=1) prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.bert.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) outputs = self.bert( # input_ids, attention_mask=attention_mask, # token_type_ids=token_type_ids, # position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, # past_key_values=past_key_values, ) # pooled_output = outputs[1] sequence_output = outputs[0] sequence_output = sequence_output[:, self.pre_seq_len:, :].contiguous() first_token_tensor = sequence_output[:, 0] pooled_output = self.bert.pooler.dense(first_token_tensor) pooled_output = self.bert.pooler.activation(pooled_output) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class RobertaPrefixForSequenceClassification(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.roberta = RobertaModel(config) self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) self.init_weights() for param in self.roberta.parameters(): param.requires_grad = False self.pre_seq_len = config.pre_seq_len self.n_layer = config.num_hidden_layers self.n_head = config.num_attention_heads self.n_embd = config.hidden_size // config.num_attention_heads self.prefix_tokens = torch.arange(self.pre_seq_len).long() self.prefix_encoder = PrefixEncoder(config) bert_param = 0 for name, param in self.roberta.named_parameters(): bert_param += param.numel() all_param = 0 for name, param in self.named_parameters(): all_param += param.numel() total_param = all_param - bert_param print('-> total param is {}'.format(total_param)) # 9860105 self.embedding = utils.get_embeddings(self, config) self.embeddings_gradient = utils.GradientStorage(self.embedding) def get_prompt(self, batch_size): prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.roberta.device) past_key_values = self.prefix_encoder(prefix_tokens) past_key_values = past_key_values.view( batch_size, self.pre_seq_len, self.n_layer * 2, self.n_head, self.n_embd ) past_key_values = self.dropout(past_key_values) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(2) return past_key_values def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, use_base_grad=False, ): utils.use_grad(self.roberta, use_base_grad) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size = input_ids.shape[0] past_key_values = self.get_prompt(batch_size=batch_size) prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.roberta.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, past_key_values=past_key_values, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class RobertaPromptForSequenceClassification(RobertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.roberta = RobertaModel(config) self.embeddings = self.roberta.embeddings self.dropout = torch.nn.Dropout(config.hidden_dropout_prob) self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels) for param in self.roberta.parameters(): param.requires_grad = False self.pre_seq_len = config.pre_seq_len self.n_layer = config.num_hidden_layers self.n_head = config.num_attention_heads self.n_embd = config.hidden_size // config.num_attention_heads self.prefix_tokens = torch.arange(self.pre_seq_len).long() self.prefix_encoder = torch.nn.Embedding(self.pre_seq_len, config.hidden_size) self.embedding = utils.get_embeddings(self, config) self.embeddings_gradient = utils.GradientStorage(self.embedding) def get_prompt(self, batch_size): prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.roberta.device) prompts = self.prefix_encoder(prefix_tokens) return prompts def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, use_base_grad=False ): utils.use_grad(self.roberta, use_base_grad) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size = input_ids.shape[0] raw_embedding = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, ) prompts = self.get_prompt(batch_size=batch_size) inputs_embeds = torch.cat((prompts, raw_embedding), dim=1) prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.roberta.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) outputs = self.roberta( # input_ids, attention_mask=attention_mask, # token_type_ids=token_type_ids, # position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, # past_key_values=past_key_values, ) # pooled_output = outputs[1] sequence_output = outputs[0] sequence_output = sequence_output[:, self.pre_seq_len:, :].contiguous() first_token_tensor = sequence_output[:, 0] pooled_output = self.roberta.pooler.dense(first_token_tensor) pooled_output = self.roberta.pooler.activation(pooled_output) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class DebertaPrefixForSequenceClassification(DebertaPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.deberta = DebertaModel(config) self.pooler = ContextPooler(config) output_dim = self.pooler.output_dim self.classifier = torch.nn.Linear(output_dim, self.num_labels) self.dropout = StableDropout(config.hidden_dropout_prob) self.init_weights() for param in self.deberta.parameters(): param.requires_grad = False self.pre_seq_len = config.pre_seq_len self.n_layer = config.num_hidden_layers self.n_head = config.num_attention_heads self.n_embd = config.hidden_size // config.num_attention_heads self.prefix_tokens = torch.arange(self.pre_seq_len).long() self.prefix_encoder = PrefixEncoder(config) deberta_param = 0 for name, param in self.deberta.named_parameters(): deberta_param += param.numel() all_param = 0 for name, param in self.named_parameters(): all_param += param.numel() total_param = all_param - deberta_param print('total param is {}'.format(total_param)) # 9860105 self.embedding = utils.get_embeddings(self, config) self.embeddings_gradient = utils.GradientStorage(self.embedding) def get_prompt(self, batch_size): prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.deberta.device) past_key_values = self.prefix_encoder(prefix_tokens) # bsz, seqlen, _ = past_key_values.shape past_key_values = past_key_values.view( batch_size, self.pre_seq_len, self.n_layer * 2, self.n_head, self.n_embd ) past_key_values = self.dropout(past_key_values) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(2) return past_key_values def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, use_base_grad=False ): utils.use_grad(self.bert, use_base_grad) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size = input_ids.shape[0] past_key_values = self.get_prompt(batch_size=batch_size) prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.deberta.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) outputs = self.deberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, past_key_values=past_key_values, ) encoder_layer = outputs[0] pooled_output = self.pooler(encoder_layer) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.num_labels == 1: # regression task loss_fn = torch.nn.MSELoss() logits = logits.view(-1).to(labels.dtype) loss = loss_fn(logits, labels.view(-1)) elif labels.dim() == 1 or labels.size(-1) == 1: label_index = (labels >= 0).nonzero() labels = labels.long() if label_index.size(0) > 0: labeled_logits = torch.gather(logits, 0, label_index.expand(label_index.size(0), logits.size(1))) labels = torch.gather(labels, 0, label_index.view(-1)) loss_fct = CrossEntropyLoss() loss = loss_fct(labeled_logits.view(-1, self.num_labels).float(), labels.view(-1)) else: loss = torch.tensor(0).to(logits) else: log_softmax = torch.nn.LogSoftmax(-1) loss = -((log_softmax(logits) * labels).sum(-1)).mean() if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output else: return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class GPT2PromptForSequenceClassification(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.gpt2 = GPT2Model(config) self.dropout = StableDropout(config.embd_pdrop) self.classifier = torch.nn.Linear(config.n_embd, self.num_labels) for param in self.gpt2.parameters(): param.requires_grad = False self.pre_seq_len = config.pre_seq_len self.n_layer = config.num_hidden_layers self.n_head = config.num_attention_heads self.n_embd = config.hidden_size // config.num_attention_heads self.prefix_tokens = torch.arange(self.pre_seq_len).long() self.prefix_encoder = torch.nn.Embedding(self.pre_seq_len, config.hidden_size) # Model parallel self.model_parallel = False self.device_map = None gpt2_param = 0 for name, param in self.gpt2.named_parameters(): gpt2_param += param.numel() all_param = 0 for name, param in self.named_parameters(): all_param += param.numel() total_param = all_param - gpt2_param print('-> total param is {}'.format(total_param)) # 9860105 self.embedding = self.gpt2.wte self.embeddings_gradient = utils.GradientStorage(self.embedding) def get_prompt(self, batch_size): prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.gpt2.device) prompts = self.prefix_encoder(prefix_tokens) return prompts def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, use_base_grad=False ): utils.use_grad(self.gpt2, use_base_grad) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size = input_ids.shape[0] raw_embedding = self.embedding(input_ids) prompts = self.get_prompt(batch_size=batch_size) inputs_embeds = torch.cat((prompts, raw_embedding), dim=1) prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.gpt2.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) transformer_outputs = self.gpt2( # input_ids, attention_mask=attention_mask, # token_type_ids=token_type_ids, # position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, # past_key_values=past_key_values, ) hidden_states = transformer_outputs[0] logits = self.classifier(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] assert ( self.config.pad_token_id is not None or batch_size == 1 ), "Cannot handle batch sizes > 1 if no " \ "padding token is defined." if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) class GPT2PrefixForSequenceClassification(GPT2PreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.gpt2 = GPT2Model(config) self.dropout = StableDropout(config.hidden_dropout_prob) self.classifier = torch.nn.Linear(config.n_embd, self.num_labels) for param in self.gpt2.parameters(): param.requires_grad = False self.pre_seq_len = config.pre_seq_len self.n_layer = config.num_hidden_layers self.n_head = config.num_attention_heads self.n_embd = config.hidden_size // config.num_attention_heads self.prefix_tokens = torch.arange(self.pre_seq_len).long() self.prefix_encoder = PrefixEncoder(config) # Model parallel self.model_parallel = False self.device_map = None gpt2_param = 0 for name, param in self.gpt2.named_parameters(): gpt2_param += param.numel() all_param = 0 for name, param in self.named_parameters(): all_param += param.numel() total_param = all_param - gpt2_param print('-> gpt2_param:{:0.2f}M P-tuning-V2 param is {}'.format(gpt2_param/1000000, total_param)) self.embedding = self.gpt2.wte self.embeddings_gradient = utils.GradientStorage(self.embedding) def get_prompt(self, batch_size): prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(self.gpt2.device) past_key_values = self.prefix_encoder(prefix_tokens) past_key_values = past_key_values.view( batch_size, self.pre_seq_len, self.n_layer * 2, self.n_head, self.n_embd ) past_key_values = self.dropout(past_key_values) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(2) return past_key_values def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, use_base_grad=False, use_cache=None ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ utils.use_grad(self.gpt2, use_base_grad) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size = input_ids.shape[0] past_key_values = self.get_prompt(batch_size=batch_size) prefix_attention_mask = torch.ones(batch_size, self.pre_seq_len).to(self.gpt2.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) transformer_outputs = self.gpt2( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.classifier(hidden_states) if input_ids is not None: batch_size, sequence_length = input_ids.shape[:2] else: batch_size, sequence_length = inputs_embeds.shape[:2] assert ( self.config.pad_token_id is not None or batch_size == 1 ), "Cannot handle batch sizes > 1 if no padding token is defined." if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) if __name__ == "__main__": from transformers import AutoConfig config = AutoConfig.from_pretrained("gpt2-large") config.hidden_dropout_prob = 0.1 config.pre_seq_len = 128 config.prefix_projection = True config.num_labels = 2 config.prefix_hidden_size = 1024 model = GPT2PrefixForSequenceClassification(config) for name, param in model.named_parameters(): print(name, param.shape)