Sentence Similarity
Transformers
Safetensors
English
llama
feature-extraction
text-embedding
embeddings
information-retrieval
beir
text-classification
language-model
text-clustering
text-semantic-similarity
text-evaluation
text-reranking
Sentence Similarity
natural_questions
ms_marco
fever
hotpot_qa
mteb
custom_code
text-generation-inference
Inference Endpoints
from typing import List, Optional, Tuple, Union | |
import torch | |
from transformers import LlamaModel, LlamaPreTrainedModel, LlamaForCausalLM, AutoModel | |
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm, LlamaRotaryEmbedding, LlamaConfig, LlamaMLP, LlamaAttention, LlamaFlashAttention2, LlamaSdpaAttention | |
from transformers.utils import logging | |
from torch import nn | |
from torch.nn import CrossEntropyLoss | |
import torch.nn.functional as F | |
from transformers.modeling_outputs import BaseModelOutputWithPast, MaskedLMOutput, CausalLMOutputWithPast, TokenClassifierOutput | |
from transformers.cache_utils import Cache, DynamicCache | |
from transformers.modeling_attn_mask_utils import AttentionMaskConverter | |
from transformers.cache_utils import Cache, DynamicCache, StaticCache | |
logger = logging.get_logger(__name__) | |
class ModifiedLlamaAttention(LlamaAttention): | |
def __init__(self, *args, **kwargs): | |
super().__init__(*args, **kwargs) | |
self.is_causal = False | |
class ModifiedLlamaFlashAttention2(LlamaFlashAttention2): | |
def __init__(self, *args, **kwargs): | |
super().__init__(*args, **kwargs) | |
self.is_causal = False | |
class ModifiedLlamaSdpaAttention(LlamaSdpaAttention): | |
def __init__(self, *args, **kwargs): | |
super().__init__(*args, **kwargs) | |
self.is_causal = False | |
LLAMA_ATTENTION_CLASSES = { | |
"eager": ModifiedLlamaAttention, | |
"flash_attention_2": ModifiedLlamaFlashAttention2, | |
"sdpa": ModifiedLlamaSdpaAttention, | |
} | |
class ModifiedLlamaDecoderLayer(LlamaDecoderLayer): | |
def __init__(self, config: LlamaConfig, layer_idx: int): | |
nn.Module.__init__(self) | |
self.hidden_size = config.hidden_size | |
self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) | |
self.mlp = LlamaMLP(config) | |
self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
class LlamaEncoderModel(LlamaModel): | |
def __init__(self, config): | |
LlamaPreTrainedModel.__init__(self, config) | |
self.padding_idx = config.pad_token_id | |
self.vocab_size = config.vocab_size | |
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) | |
self.layers = nn.ModuleList( | |
[ModifiedLlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] | |
) | |
self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
self.rotary_emb = LlamaRotaryEmbedding(config=config) | |
self.gradient_checkpointing = False | |
# Initialize weights and apply final processing | |
self.post_init() | |
def _update_causal_mask( | |
self, | |
attention_mask: torch.Tensor, | |
input_tensor: torch.Tensor, | |
cache_position: torch.Tensor, | |
past_key_values: Cache, | |
output_attentions: bool, | |
): | |
# TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static | |
# KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. | |
# (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using | |
# `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 | |
if self.config._attn_implementation == "flash_attention_2": | |
if attention_mask is not None and 0.0 in attention_mask: | |
return attention_mask | |
return None | |
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in | |
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail | |
# to infer the attention mask. | |
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 | |
using_static_cache = isinstance(past_key_values, StaticCache) | |
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward | |
# if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions: | |
# if AttentionMaskConverter._ignore_causal_mask_sdpa( | |
# attention_mask, | |
# inputs_embeds=input_tensor, | |
# past_key_values_length=past_seen_tokens, | |
# is_training=self.training, | |
# ): | |
# return None | |
dtype, device = input_tensor.dtype, input_tensor.device | |
min_dtype = torch.finfo(dtype).min | |
sequence_length = input_tensor.shape[1] | |
if using_static_cache: | |
target_length = past_key_values.get_max_length() | |
else: | |
target_length = ( | |
attention_mask.shape[-1] | |
if isinstance(attention_mask, torch.Tensor) | |
else past_seen_tokens + sequence_length + 1 | |
) | |
if attention_mask is not None and attention_mask.dim() == 4: | |
# in this case we assume that the mask comes already in inverted form and requires no inversion or slicing | |
if attention_mask.max() != 0: | |
raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`") | |
causal_mask = attention_mask | |
else: | |
causal_mask = torch.zeros( | |
(sequence_length, target_length), dtype=dtype, device=device | |
) | |
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) | |
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) | |
if attention_mask is not None: | |
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit | |
mask_length = attention_mask.shape[-1] | |
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :] | |
padding_mask = padding_mask == 0 | |
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill( | |
padding_mask, min_dtype | |
) | |
if ( | |
self.config._attn_implementation == "sdpa" | |
and attention_mask is not None | |
and attention_mask.device.type == "cuda" | |
and not output_attentions | |
): | |
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when | |
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. | |
# Details: https://github.com/pytorch/pytorch/issues/110213 | |
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) | |
return causal_mask | |