Text Generation
Transformers
Safetensors
Finnish
llama
finnish
conversational
text-generation-inference
Ahma-7B / EasyLM /models /llama /llama_model.py
aapot
Add training codes
a85f909
raw
history blame
60.2 kB
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import json
import tempfile
from functools import partial
import numpy as np
import jax
import jax.numpy as jnp
from jax import lax
from jax.sharding import PartitionSpec as PS
import flax.linen as nn
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from flax.traverse_util import flatten_dict, unflatten_dict
from flax.linen import partitioning as nn_partitioning
import einops
import sentencepiece as spm
from transformers import AutoTokenizer
from transformers.convert_slow_tokenizer import import_protobuf
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
from ml_collections import ConfigDict
from ml_collections.config_dict import config_dict
from mlxu import function_args_to_config, load_pickle, open_file
from EasyLM.bpt import blockwise_ffn, blockwise_attn
from EasyLM.jax_utils import (
with_sharding_constraint, get_jax_mesh, get_gradient_checkpoint_policy
)
LLAMA_STANDARD_CONFIGS = {
'small': {
'vocab_size': 64256,
'hidden_size': 768,
'intermediate_size': 3072,
'num_hidden_layers': 12,
'num_attention_heads': 12,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'medium': {
'vocab_size': 64256,
'hidden_size': 1024,
'intermediate_size': 4096,
'num_hidden_layers': 24,
'num_attention_heads': 16,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'large': {
'vocab_size': 64256,
'hidden_size': 1536,
'intermediate_size': 6144,
'num_hidden_layers': 24,
'num_attention_heads': 16,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'xlarge': {
'vocab_size': 64256,
'hidden_size': 2048,
'intermediate_size': 8192,
'num_hidden_layers': 24,
'num_attention_heads': 32,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'1b': {
'vocab_size': 64256,
'hidden_size': 2048,
'intermediate_size': 5504,
'num_hidden_layers': 22,
'num_attention_heads': 16,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'3b': {
'vocab_size': 64256,
'hidden_size': 3200,
'intermediate_size': 8640,
'num_hidden_layers': 26,
'num_attention_heads': 32,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'7b': {
'vocab_size': 64256,
'hidden_size': 4096,
'intermediate_size': 11008,
'num_hidden_layers': 32,
'num_attention_heads': 32,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'13b': {
'vocab_size': 64256,
'hidden_size': 5120,
'intermediate_size': 13824,
'num_hidden_layers': 40,
'num_attention_heads': 40,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'30b': {
'vocab_size': 64256,
'hidden_size': 6656,
'intermediate_size': 17920,
'num_hidden_layers': 60,
'num_attention_heads': 52,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'65b': {
'vocab_size': 64256,
'hidden_size': 8192,
'intermediate_size': 22016,
'num_hidden_layers': 80,
'num_attention_heads': 64,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
},
'debug': { # A small model for debugging
'vocab_size': 64256,
'hidden_size': 128,
'intermediate_size': 256,
'num_hidden_layers': 2,
'num_attention_heads': 4,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
}
class LLaMAConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`~LLaMAModel`]. It is used to instantiate an LLaMA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LLaMA-7B.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`~LLaMAModel`] or [`~TFLLaMAModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_sequence_length (`int`, *optional*, defaults to 2048):
Max sequence length for model (for RoPE computation)
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
Example:
```python
>>> from transformers import LLaMAModel, LLaMAConfig
>>> # Initializing a LLaMA llama-7b style configuration
>>> configuration = LLaMAConfig()
>>> # Initializing a model from the llama-7b style configuration
>>> model = LLaMAModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llama"
def __init__(
self,
vocab_size=32000,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
max_sequence_length=2048,
rms_norm_eps=1e-6,
initializer_range=0.02,
use_cache=True,
# pad_token_id=-1,
bos_token_id=0,
eos_token_id=1,
resid_pdrop=0.0,
embd_pdrop=0.0,
attn_pdrop=0.0,
tie_word_embeddings=False,
remat_block='nothing_saveable',
remat_attention='',
remat_mlp='',
scan_attention=False,
scan_mlp=False,
scan_query_chunk_size=1024,
scan_key_chunk_size=1024,
scan_mlp_chunk_size=1024,
fcm_min_ratio=0.0,
fcm_max_ratio=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = initializer_range
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_sequence_length = max_sequence_length
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.remat_block = remat_block
self.remat_attention = remat_attention
self.remat_mlp = remat_mlp
self.scan_attention = scan_attention
self.scan_mlp = scan_mlp
self.scan_query_chunk_size = scan_query_chunk_size
self.scan_key_chunk_size = scan_key_chunk_size
self.scan_mlp_chunk_size = scan_mlp_chunk_size
self.fcm_min_ratio = fcm_min_ratio
self.fcm_max_ratio = fcm_max_ratio
super().__init__(
# pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
@classmethod
def get_default_config(cls, updates=None):
config = function_args_to_config(cls.__init__)
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@staticmethod
def get_jax_mesh(axis_dims):
return get_jax_mesh(axis_dims, ('dp', 'fsdp', 'mp'))
@staticmethod
def get_partition_rules():
""" Parition rules for GPTJ. Note that these rules are orderd, so that
the beginning rules match first. It is important to use
PartitionSpec() instead of None here because JAX does not treat
None as a pytree leaf.
"""
return (
# embeddings
("transformer/wte/embedding", PS("mp", "fsdp")),
# atention
("attention/(wq|wk|wv)/kernel", PS("fsdp", "mp")),
("attention/wo/kernel", PS("mp", "fsdp")),
# mlp
("feed_forward/w1/kernel", PS("fsdp", "mp")),
("feed_forward/w2/kernel", PS("mp", "fsdp")),
("feed_forward/w3/kernel", PS("fsdp", "mp")),
# layer norms
("attention_norm/kernel", PS(None)),
("ffn_norm/kernel", PS(None)),
# output head
("transformer/ln_f/kernel", PS(None)),
("lm_head/kernel", PS("fsdp", "mp")),
('.*', PS(None)),
)
@staticmethod
def get_weight_decay_exclusions():
return (
"attention_norm/kernel",
"ffn_norm/kernel",
"transformer/ln_f/kernel",
)
@staticmethod
def rng_keys():
return ('params', 'dropout', 'fcm')
@staticmethod
def get_tokenizer_config(updates=None):
config = ConfigDict()
config.vocab_file = ''
config.pretrained_model_name_or_path = ''
config.add_bos_token = False
config.add_eos_token = False
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_tokenizer(cls, config, padding_side='left', truncation_side='right'):
config = cls.get_tokenizer_config(config)
if config.vocab_file == '':
assert config.pretrained_model_name_or_path != '', 'vocab_file or pretrained_model_name_or_path must be specified'
if config.pretrained_model_name_or_path != '':
tokenizer = AutoTokenizer.from_pretrained(
config.pretrained_model_name_or_path,
add_bos_token=config.add_bos_token,
add_eos_token=config.add_eos_token,
padding_side=padding_side,
truncation_side=truncation_side,
)
else:
tokenizer = LlamaTokenizer(
vocab_file=config.vocab_file,
add_bos_token=config.add_bos_token,
add_eos_token=config.add_eos_token,
padding_side=padding_side,
truncation_side=truncation_side,
)
return tokenizer
@classmethod
def load_config(cls, path):
if path in LLAMA_STANDARD_CONFIGS:
return cls.from_dict(LLAMA_STANDARD_CONFIGS[path])
load_type, load_path = path.split('::', 1)
if load_type == 'pickle':
return cls.from_dict(load_pickle(load_path)['llama_config'])
elif load_type == 'json':
with open_file(load_path, 'r') as fin:
raw_config = fin.read()
return cls.from_dict(json.loads(raw_config))
else:
raise ValueError(f'Unsupported load config type: {load_type}')
remat = nn_partitioning.remat
logger = logging.get_logger(__name__)
class RMSNorm(nn.Module):
dim: int
eps: float=1e-6
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
def setup(self) -> None:
self.weight = self.param(
'kernel',
nn.initializers.ones,
(self.dim,),
self.param_dtype,
)
def _norm(self, x: jnp.ndarray) -> jnp.ndarray:
return x * jax.lax.rsqrt(jnp.square(x).mean(-1, keepdims=True) + self.eps)
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
x = x.astype(jnp.promote_types(self.dtype, jnp.float32))
output = self._norm(x).astype(self.dtype)
weight = jnp.asarray(self.weight, self.dtype)
return output * weight
def precompute_freqs_cis(dim: int, end: int, theta: float=10000.0, dtype: jnp.dtype=jnp.float32) -> jnp.ndarray:
freqs = 1.0 / (theta ** (np.arange(0, dim, 2)[: (dim // 2)].astype(dtype) / dim))
t = np.arange(end) # type: ignore
freqs = np.outer(t, freqs).astype(dtype) # type: ignore
sin, cos = np.sin(freqs), np.cos(freqs)
freqs_cis = np.complex64(cos + 1j * sin)
return jnp.asarray(freqs_cis)
def apply_rotary_emb(
xq: jnp.ndarray,
xk: jnp.ndarray,
freqs_cis: jnp.ndarray,
dtype: jnp.dtype=jnp.float32,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
reshape_xq = xq.astype(jnp.float32).reshape(*xq.shape[:-1], -1, 2)
reshape_xk = xk.astype(jnp.float32).reshape(*xk.shape[:-1], -1, 2)
xq_ = jax.lax.complex(reshape_xq[..., 0], reshape_xq[..., 1])
xk_ = jax.lax.complex(reshape_xk[..., 0], reshape_xk[..., 1])
# add head dim
freqs_cis = jnp.reshape(freqs_cis, (*freqs_cis.shape[:2], 1, *freqs_cis.shape[2:]))
xq_out = xq_ * freqs_cis
xq_out = jnp.stack((jnp.real(xq_out), jnp.imag(xq_out)), axis=-1).reshape(*xq_out.shape[:-1], -1)
xk_out = xk_ * freqs_cis
xk_out = jnp.stack((jnp.real(xk_out), jnp.imag(xk_out)), axis=-1).reshape(*xk_out.shape[:-1], -1)
return xq_out.astype(dtype), xk_out.astype(dtype)
class FlaxLLaMAAttention(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self):
config = self.config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.wq = nn.Dense(
config.num_attention_heads*self.head_dim,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.wk = nn.Dense(
config.num_attention_heads*self.head_dim,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.wv = nn.Dense(
config.num_attention_heads*self.head_dim,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.wo = nn.Dense(
config.hidden_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.resid_dropout = nn.Dropout(rate=config.resid_pdrop)
self.causal_mask = make_causal_mask(jnp.ones((1, config.max_sequence_length), dtype="bool"), dtype="bool")
self.freqs_cis = precompute_freqs_cis(
self.head_dim,
config.max_sequence_length * 2,
dtype=self.dtype,
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
fcm_mask=None,
):
xq, xk, xv = self.wq(hidden_states), self.wk(hidden_states), self.wv(hidden_states)
xq = with_sharding_constraint(xq, PS(("dp", "fsdp"), None, "mp"))
xk = with_sharding_constraint(xk, PS(("dp", "fsdp"), None, "mp"))
xv = with_sharding_constraint(xv, PS(("dp", "fsdp"), None, "mp"))
xq = self._split_heads(xq)
xk = self._split_heads(xk)
xv = self._split_heads(xv)
freqs_cis = jnp.take(self.freqs_cis, position_ids, axis=0)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis, dtype=self.dtype)
dropout_rng = None
if not deterministic and self.config.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
if self.config.scan_attention and not (self.has_variable("cache", "cached_key") or init_cache):
# doesn't need blockwise attention if we are doing autoregressive decoding since no quadratic memory
# attention mask without nxn materlization, blockwise_attn will handle the rest
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
# transform boolean mask into float mask
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
)
attn_weights = None
attn_output = blockwise_attn(
xq,
xk,
xv,
bias=attention_bias,
deterministic=deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.config.attn_pdrop,
causal=True,
query_chunk_size=self.config.scan_query_chunk_size,
key_chunk_size=self.config.scan_key_chunk_size,
dtype=self.dtype,
policy=get_gradient_checkpoint_policy('nothing_saveable'),
precision=self.precision,
float32_logits=True,
prevent_cse=True,
)
attn_output = with_sharding_constraint(attn_output, PS(("dp", "fsdp"), None, "mp", None))
else:
query_length, key_length = xq.shape[1], xk.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
batch_size = hidden_states.shape[0]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
attention_mask = combine_masks(attention_mask, causal_mask, fcm_mask)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
xk, xv, attention_mask = self._concatenate_to_cache(xk, xv, xq, attention_mask)
# transform boolean mask into float mask
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
)
attn_weights = dot_product_attention_weights(
xq,
xk,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=jnp.promote_types(self.dtype, jnp.float32),
precision=self.precision,
)
attn_weights = with_sharding_constraint(attn_weights, PS(("dp", "fsdp"), "mp", None, None))
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, xv, precision=self.precision)
attn_output = self._merge_heads(attn_output)
attn_output = self.wo(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
class FlaxLLaMAMLP(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self) -> None:
config = self.config
self.w1 = nn.Dense(
config.intermediate_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.w2 = nn.Dense(
config.hidden_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.w3 = nn.Dense(
config.intermediate_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.dropout = nn.Dropout(rate=self.config.resid_pdrop)
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
x = self.w2(nn.silu(self.w1(x)) * self.w3(x))
x = self.dropout(x, deterministic=deterministic)
return x
class FlaxLLaMABlock(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self) -> None:
attention_module = FlaxLLaMAAttention
mlp_module = FlaxLLaMAMLP
if self.config.remat_attention != '':
attention_module = remat(
FlaxLLaMAAttention, static_argnums=(3, 4, 5),
policy=get_gradient_checkpoint_policy(self.config.remat_attention),
prevent_cse=True,
)
if self.config.remat_mlp != '':
mlp_module = remat(
FlaxLLaMAMLP, static_argnums=(1,),
policy=get_gradient_checkpoint_policy(self.config.remat_mlp),
prevent_cse=True,
)
self.attention = attention_module(
self.config,
dtype=self.dtype,
param_dtype=self.param_dtype,
precision=self.precision,
)
self.feed_forward = mlp_module(
self.config,
dtype=self.dtype,
param_dtype=self.param_dtype,
precision=self.precision,
)
self.attention_norm = RMSNorm(
self.config.hidden_size,
eps=self.config.rms_norm_eps,
dtype=self.dtype,
param_dtype=self.param_dtype,
)
self.ffn_norm = RMSNorm(
self.config.hidden_size,
eps=self.config.rms_norm_eps,
dtype=self.dtype,
param_dtype=self.param_dtype,
)
def __call__(
self,
hidden_states,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
fcm_mask: Optional[jnp.ndarray] = None,
):
attn_outputs = self.attention(
self.attention_norm(hidden_states),
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
attn_output = attn_outputs[0]
hidden_states = hidden_states + attn_output
feed_forward_input = self.ffn_norm(hidden_states)
if self.config.scan_mlp:
feed_forward_hidden_states = blockwise_ffn(
self.feed_forward,
feed_forward_input,
self.config.scan_mlp_chunk_size,
deterministic,
)
else:
feed_forward_hidden_states = self.feed_forward(
feed_forward_input,
deterministic,
)
feed_forward_hidden_states = with_sharding_constraint(feed_forward_hidden_states, PS(("dp", "fsdp"), None, "mp"))
hidden_states = hidden_states + feed_forward_hidden_states
return (hidden_states,) + attn_outputs[1:]
class FlaxLLaMAPreTrainedModel(FlaxPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LLaMAConfig
base_model_prefix = "transformer"
module_class: nn.Module = None
def __init__(
self,
config: LLaMAConfig,
input_shape: Tuple = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
_do_init: bool = True,
**kwargs,
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
attention_mask = jnp.ones_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
if self.config.add_cross_attention:
encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
encoder_attention_mask = attention_mask
module_init_outputs = self.module.init(
rngs,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states,
encoder_attention_mask,
return_dict=False,
)
else:
module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
random_params = module_init_outputs["params"]
if params is not None:
random_params = flatten_dict(unfreeze(random_params))
params = flatten_dict(unfreeze(params))
for missing_key in self._missing_keys:
params[missing_key] = random_params[missing_key]
self._missing_keys = set()
return freeze(unflatten_dict(params))
else:
return random_params
def init_cache(self, batch_size, max_length):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
"""
# init input variables to retrieve cache
input_ids = jnp.ones((batch_size, max_length))
attention_mask = jnp.ones_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
init_variables = self.module.init(
jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
)
return init_variables["cache"]
@add_start_docstrings_to_model_forward("")
def __call__(
self,
input_ids,
attention_mask=None,
position_ids=None,
params: dict = None,
past_key_values: dict = None,
dropout_rng: jax.random.PRNGKey = None,
train: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
batch_size, sequence_length = input_ids.shape
if position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
if attention_mask is None:
attention_mask = jnp.ones((batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTJAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
outputs = self.module.apply(
inputs,
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
jnp.array(position_ids, dtype="i4"),
not train,
False,
output_attentions,
output_hidden_states,
return_dict,
rngs=rngs,
mutable=mutable,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past_key_values = outputs
outputs["past_key_values"] = unfreeze(past_key_values["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past_key_values = outputs
outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
return outputs
class FlaxLLaMABlockCollection(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self):
block = FlaxLLaMABlock
if self.config.remat_block != '':
block = remat(
FlaxLLaMABlock, static_argnums=(3, 4, 5),
policy=get_gradient_checkpoint_policy(self.config.remat_block)
)
self.blocks = [
block(
self.config,
name=str(i),
dtype=self.dtype,
param_dtype=self.param_dtype,
precision=self.precision
) for i in range(self.config.num_hidden_layers)
]
def __call__(
self,
hidden_states,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
if not deterministic and self.config.fcm_max_ratio > 0:
# Apply forgetful causal mask
batch_size, seq_length = hidden_states.shape[0], hidden_states.shape[1]
fcm_ratio = jax.random.uniform(
self.make_rng('fcm'), shape=(batch_size, 1, 1, 1),
minval=self.config.fcm_min_ratio,
maxval=self.config.fcm_max_ratio
)
fcm_mask = jax.random.uniform(
self.make_rng('fcm'),
shape=(batch_size, 1, 1, seq_length)
) > fcm_ratio
fcm_mask = fcm_mask.at[:, :, :, 0].set(True)
fcm_mask = fcm_mask.astype('bool')
else:
fcm_mask = None
for block in self.blocks:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = block(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions += (layer_outputs[1],)
# this contains possible `None` values - `FlaxGPTJModule` will filter them out
outputs = (hidden_states, all_hidden_states, all_attentions)
return outputs
class FlaxLLaMAModule(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self):
self.embed_dim = self.config.hidden_size
self.wte = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
dtype=self.dtype,
param_dtype=self.param_dtype,
)
self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
self.h = FlaxLLaMABlockCollection(self.config, dtype=self.dtype, param_dtype=self.param_dtype, precision=self.precision)
self.ln_f = RMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps, dtype=self.dtype, param_dtype=self.param_dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
deterministic=True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
input_embeds = self.wte(input_ids.astype("i4"))
hidden_states = self.dropout(input_embeds, deterministic=deterministic)
outputs = self.h(
hidden_states,
attention_mask,
position_ids=position_ids,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
all_hidden_states = outputs[1] + (hidden_states,)
outputs = (hidden_states, all_hidden_states) + outputs[2:]
else:
outputs = (hidden_states,) + outputs[1:]
if not return_dict:
return tuple(v for v in outputs if v is not None)
return FlaxBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=outputs[1],
attentions=outputs[-1],
)
@add_start_docstrings("", "")
class FlaxLLaMAModel(FlaxLLaMAPreTrainedModel):
module_class = FlaxLLaMAModule
# append_call_sample_docstring(
# FlaxLLaMAModel,
# _TOKENIZER_FOR_DOC,
# _CHECKPOINT_FOR_DOC,
# FlaxCausalLMOutput,
# _CONFIG_FOR_DOC,
# )
class FlaxLLaMAForCausalLMModule(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self):
self.transformer = FlaxLLaMAModule(self.config, dtype=self.dtype)
self.lm_head = nn.Dense(
self.config.vocab_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
precision=self.precision,
)
def __call__(
self,
input_ids,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
batch_size, seq_length = input_ids.shape
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
position_ids = jnp.broadcast_to(
jnp.clip(jnp.cumsum(attention_mask, axis=-1) - 1, a_min=0),
(batch_size, seq_length)
)
outputs = self.transformer(
input_ids,
attention_mask,
position_ids,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
if self.config.tie_word_embeddings:
shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T
lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states)
else:
lm_logits = self.lm_head(hidden_states)
if not return_dict:
return (lm_logits,) + outputs[1:]
return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
@add_start_docstrings("", "")
class FlaxLLaMAForCausalLM(FlaxLLaMAPreTrainedModel):
module_class = FlaxLLaMAForCausalLMModule
def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
# initializing the cache
batch_size, seq_length = input_ids.shape
past_key_values = self.init_cache(batch_size, max_length)
# Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
# But since GPTJ uses a causal mask, those positions are masked anyways.
# Thus we can create a single static attention_mask here, which is more efficient for compilation
extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
if attention_mask is not None:
position_ids = attention_mask.cumsum(axis=-1) - 1
extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
else:
position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
return {
"past_key_values": past_key_values,
"attention_mask": extended_attention_mask,
"position_ids": position_ids,
}
def update_inputs_for_generation(self, model_outputs, model_kwargs):
model_kwargs["past_key_values"] = model_outputs.past_key_values
model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
return model_kwargs
# append_call_sample_docstring(
# FlaxGPTJForCausalLM,
# _TOKENIZER_FOR_DOC,
# _CHECKPOINT_FOR_DOC,
# FlaxCausalLMOutput,
# _CONFIG_FOR_DOC,
# )
VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
PRETRAINED_VOCAB_FILES_MAP = {}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
SPIECE_UNDERLINE = "▁"
class LlamaTokenizer(PreTrainedTokenizer):
"""
Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
no padding token in the original model.
Args:
vocab_file (`str`):
Path to the vocabulary file.
unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
The end of sequence token.
pad_token (`str` or `tokenizers.AddedToken`, *optional*):
A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
attention mechanisms or loss computation.
sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
to set:
- `enable_sampling`: Enable subword regularization.
- `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
- `nbest_size = {0,1}`: No sampling is performed.
- `nbest_size > 1`: samples from the nbest_size results.
- `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
using forward-filtering-and-backward-sampling algorithm.
- `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
BPE-dropout.
add_bos_token (`bool`, *optional*, defaults to `True`):
Whether or not to add an `bos_token` at the start of sequences.
add_eos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add an `eos_token` at the end of sequences.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
use_default_system_prompt (`bool`, *optional*, defaults to `False`):
Whether or not the default system prompt for Llama should be used.
spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to add spaces between special tokens.
legacy (`bool`, *optional*):
Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple
example:
- `legacy=True`:
```python
>>> from transformers import T5Tokenizer
>>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True)
>>> tokenizer.encode("Hello <extra_id_0>.")
[8774, 32099, 3, 5, 1]
```
- `legacy=False`:
```python
>>> from transformers import T5Tokenizer
>>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False)
>>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
[8774, 32099, 5, 1]
```
Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
unk_token="<unk>",
bos_token="<s>",
eos_token="</s>",
pad_token=None,
sp_model_kwargs: Optional[Dict[str, Any]] = None,
add_bos_token=False,
add_eos_token=False,
clean_up_tokenization_spaces=False,
use_default_system_prompt=False,
spaces_between_special_tokens=False,
legacy=None,
**kwargs,
):
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
if legacy is None:
logger.warning_once(
f"You are using the default legacy behaviour of the {self.__class__}. This is"
" expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
" If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
" means, and thoroughly read the reason why this was added as explained in"
" https://github.com/huggingface/transformers/pull/24565"
)
legacy = True
self.legacy = legacy
self.vocab_file = vocab_file
self.add_bos_token = add_bos_token
self.add_eos_token = add_eos_token
self.use_default_system_prompt = use_default_system_prompt
self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
add_bos_token=add_bos_token,
add_eos_token=add_eos_token,
sp_model_kwargs=self.sp_model_kwargs,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
use_default_system_prompt=use_default_system_prompt,
spaces_between_special_tokens=spaces_between_special_tokens,
legacy=legacy,
**kwargs,
)
@property
def unk_token_length(self):
return len(self.sp_model.encode(str(self.unk_token)))
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
def get_spm_processor(self, from_slow=False):
tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
if self.legacy or from_slow: # no dependency on protobuf
tokenizer.Load(self.vocab_file)
return tokenizer
with open(self.vocab_file, "rb") as f:
sp_model = f.read()
model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
model = model_pb2.ModelProto.FromString(sp_model)
normalizer_spec = model_pb2.NormalizerSpec()
normalizer_spec.add_dummy_prefix = False
model.normalizer_spec.MergeFrom(normalizer_spec)
sp_model = model.SerializeToString()
tokenizer.LoadFromSerializedProto(sp_model)
return tokenizer
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
state["sp_model_proto"] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, d):
self.__dict__ = d
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def vocab_size(self):
"""Returns vocab size"""
return self.sp_model.get_piece_size()
def get_vocab(self):
"""Returns vocab as a dict"""
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
def tokenize(self, text: "TextInput", add_special_tokens=False, **kwargs) -> List[str]:
"""
Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
first token is special.
"""
if self.legacy or len(text) == 0:
return super().tokenize(text, **kwargs)
tokens = super().tokenize(SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " "), **kwargs)
if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
tokens = tokens[1:]
return tokens
# Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
def _tokenize(self, text, **kwargs):
"""
Returns a tokenized string.
We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
`['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
`unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
`self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
"""
tokens = self.sp_model.encode(text, out_type=str)
if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
return tokens
# 1. Encode string + prefix ex: "<unk> Hey"
tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
# 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
token = self.sp_model.IdToPiece(index)
return token
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
# since we manually add the prefix space, we have to remove it when decoding
if tokens[0].startswith(SPIECE_UNDERLINE):
tokens[0] = tokens[0][1:]
current_sub_tokens = []
out_string = ""
prev_is_special = False
for i, token in enumerate(tokens):
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special and i != 0 and self.legacy:
out_string += " "
out_string += self.sp_model.decode(current_sub_tokens) + token
prev_is_special = True
current_sub_tokens = []
else:
current_sub_tokens.append(token)
prev_is_special = False
out_string += self.sp_model.decode(current_sub_tokens)
return out_string
def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
"""
Save the vocabulary and special tokens file to a directory.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
Returns:
`Tuple(str)`: Paths to the files saved.
"""
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, out_vocab_file)
elif not os.path.isfile(self.vocab_file):
with open(out_vocab_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (out_vocab_file,)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
bos_token_id = [self.bos_token_id] if self.add_bos_token else []
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
output = bos_token_id + token_ids_0 + eos_token_id
if token_ids_1 is not None:
output = output + bos_token_id + token_ids_1 + eos_token_id
return output
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
bos_token_id = [1] if self.add_bos_token else []
eos_token_id = [1] if self.add_eos_token else []
if token_ids_1 is None:
return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
return (
bos_token_id
+ ([0] * len(token_ids_0))
+ eos_token_id
+ bos_token_id
+ ([0] * len(token_ids_1))
+ eos_token_id
)
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
sequence pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
if token_ids_1 is None, only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of ids.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
bos_token_id = [self.bos_token_id] if self.add_bos_token else []
eos_token_id = [self.eos_token_id] if self.add_eos_token else []
output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
if token_ids_1 is not None:
output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
return output