|
""" Moss model configuration""" |
|
|
|
from transformers.utils import logging |
|
from transformers.configuration_utils import PretrainedConfig |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
class MossConfig(PretrainedConfig): |
|
r""" |
|
This is the configuration class to store the configuration of a [`MossModel`]. It is used to instantiate a |
|
Moss model according to the specified arguments, defining the model architecture. Configuration objects |
|
inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from |
|
[`PretrainedConfig`] for more information. |
|
|
|
Args: |
|
vocab_size (`int`, *optional*, defaults to 92494): |
|
Vocabulary size of the Moss model. Defines the number of different tokens that can be represented by the |
|
`inputs_ids` passed when calling [`MossModel`] |
|
hidden_size (`int`, *optional*, defaults to 4096): |
|
Dimension of the hidden representations. |
|
intermediate_size (`int`, *optional*, defaults to 11008): |
|
Dimension of the MLP representations. |
|
num_hidden_layers (`int`, *optional*, defaults to 32): |
|
Number of hidden layers in the Transformer encoder. |
|
num_attention_heads (`int`, *optional*, defaults to 32): |
|
Number of attention heads for each attention layer in the Transformer encoder. |
|
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): |
|
The non-linear activation function (function or string) in the decoder. |
|
max_position_embeddings (`int`, *optional*, defaults to 2048): |
|
The maximum sequence length that this model might ever be used with. Typically set this to something large |
|
just in case (e.g., 512 or 1024 or 2048). |
|
initializer_range (`float`, *optional*, defaults to 0.02): |
|
The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
|
rms_norm_eps (`float`, *optional*, defaults to 1e-05): |
|
The epsilon used by the rms normalization layers. |
|
use_cache (`bool`, *optional*, defaults to `True`): |
|
Whether or not the model should return the last key/values attentions (not used by all models). Only |
|
relevant if `config.is_decoder=True`. |
|
tie_word_embeddings(`bool`, *optional*, defaults to `False`): |
|
Whether to tie weight embeddings |
|
Example: |
|
|
|
```python |
|
>>> from transformers import MossModel, MossConfig |
|
|
|
>>> # Initializing a Moss-7b style configuration |
|
>>> configuration = MossConfig() |
|
|
|
>>> # Initializing a model from the Moss-7b style configuration |
|
>>> model = MossModel(configuration) |
|
|
|
>>> # Accessing the model configuration |
|
>>> configuration = model.config |
|
```""" |
|
model_type = "moss" |
|
keys_to_ignore_at_inference = ["past_key_values"] |
|
|
|
def __init__( |
|
self, |
|
vocab_size=92494, |
|
hidden_size=4096, |
|
intermediate_size=11008, |
|
num_hidden_layers=32, |
|
num_attention_heads=32, |
|
hidden_act="silu", |
|
max_position_embeddings=2048, |
|
initializer_range=0.02, |
|
rms_norm_eps=1e-05, |
|
use_cache=True, |
|
pad_token_id=0, |
|
bos_token_id=1, |
|
eos_token_id=2, |
|
tie_word_embeddings=False, |
|
**kwargs, |
|
): |
|
self.vocab_size = vocab_size |
|
self.max_position_embeddings = max_position_embeddings |
|
self.hidden_size = hidden_size |
|
self.intermediate_size = intermediate_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
self.hidden_act = hidden_act |
|
self.initializer_range = initializer_range |
|
self.rms_norm_eps = rms_norm_eps |
|
self.use_cache = use_cache |
|
super().__init__( |
|
pad_token_id=pad_token_id, |
|
bos_token_id=bos_token_id, |
|
eos_token_id=eos_token_id, |
|
tie_word_embeddings=tie_word_embeddings, |
|
**kwargs, |
|
) |
|
|