InternLM-Math commited on
Commit
c5cf94d
1 Parent(s): 2b67ca8

Delete configuration_internlm.py

Browse files
Files changed (1) hide show
  1. configuration_internlm.py +0 -159
configuration_internlm.py DELETED
@@ -1,159 +0,0 @@
1
- # coding=utf-8
2
- # Copyright (c) InternLM. All rights reserved.
3
- #
4
- # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
- # and OPT implementations in this library. It has been modified from its
6
- # original forms to accommodate minor architectural differences compared
7
- # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
- """ InternLM model configuration"""
21
-
22
- from transformers.configuration_utils import PretrainedConfig
23
- from transformers.utils import logging
24
-
25
- logger = logging.get_logger(__name__)
26
-
27
- INTERNLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
28
-
29
-
30
- class InternLMConfig(PretrainedConfig):
31
- r"""
32
- This is the configuration class to store the configuration of a [`InternLMModel`]. It is used to instantiate
33
- an InternLM model according to the specified arguments, defining the model architecture. Instantiating a
34
- configuration with the defaults will yield a similar configuration to that of the InternLM-7B.
35
-
36
- Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
- documentation from [`PretrainedConfig`] for more information.
38
-
39
-
40
- Args:
41
- vocab_size (`int`, *optional*, defaults to 32000):
42
- Vocabulary size of the InternLM model. Defines the number of different tokens that can be represented by the
43
- `inputs_ids` passed when calling [`InternLMModel`]
44
- hidden_size (`int`, *optional*, defaults to 4096):
45
- Dimension of the hidden representations.
46
- intermediate_size (`int`, *optional*, defaults to 11008):
47
- Dimension of the MLP representations.
48
- num_hidden_layers (`int`, *optional*, defaults to 32):
49
- Number of hidden layers in the Transformer encoder.
50
- num_attention_heads (`int`, *optional*, defaults to 32):
51
- Number of attention heads for each attention layer in the Transformer encoder.
52
- num_key_value_heads (`int`, *optional*):
53
- This is the number of key_value heads that should be used to implement Grouped Query Attention. If
54
- `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
55
- `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
56
- converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
57
- by meanpooling all the original heads within that group. For more details checkout [this
58
- paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
59
- `num_attention_heads`.
60
- hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
- The non-linear activation function (function or string) in the decoder.
62
- max_position_embeddings (`int`, *optional*, defaults to 2048):
63
- The maximum sequence length that this model might ever be used with. Typically set this to something large
64
- just in case (e.g., 512 or 1024 or 2048).
65
- initializer_range (`float`, *optional*, defaults to 0.02):
66
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
- rms_norm_eps (`float`, *optional*, defaults to 1e-12):
68
- The epsilon used by the rms normalization layers.
69
- use_cache (`bool`, *optional*, defaults to `True`):
70
- Whether or not the model should return the last key/values attentions (not used by all models). Only
71
- relevant if `config.is_decoder=True`.
72
- tie_word_embeddings(`bool`, *optional*, defaults to `False`):
73
- Whether to tie weight embeddings
74
- Example:
75
-
76
- ```python
77
- >>> from transformers import InternLMModel, InternLMConfig
78
-
79
- >>> # Initializing a InternLM internlm-7b style configuration
80
- >>> configuration = InternLMConfig()
81
-
82
- >>> # Initializing a model from the internlm-7b style configuration
83
- >>> model = InternLMModel(configuration)
84
-
85
- >>> # Accessing the model configuration
86
- >>> configuration = model.config
87
- ```"""
88
- model_type = "internlm"
89
- _auto_class = "AutoConfig"
90
-
91
- def __init__( # pylint: disable=W0102
92
- self,
93
- vocab_size=103168,
94
- hidden_size=4096,
95
- intermediate_size=11008,
96
- num_hidden_layers=32,
97
- num_attention_heads=32,
98
- num_key_value_heads=None,
99
- hidden_act="silu",
100
- max_position_embeddings=2048,
101
- initializer_range=0.02,
102
- rms_norm_eps=1e-6,
103
- use_cache=True,
104
- pad_token_id=0,
105
- bos_token_id=1,
106
- eos_token_id=2,
107
- tie_word_embeddings=False,
108
- bias=True,
109
- rope_theta=10000,
110
- rope_scaling=None,
111
- **kwargs,
112
- ):
113
- self.vocab_size = vocab_size
114
- self.max_position_embeddings = max_position_embeddings
115
- self.hidden_size = hidden_size
116
- self.intermediate_size = intermediate_size
117
- self.num_hidden_layers = num_hidden_layers
118
- self.num_attention_heads = num_attention_heads
119
- self.bias = bias
120
-
121
- if num_key_value_heads is None:
122
- num_key_value_heads = num_attention_heads
123
- self.num_key_value_heads = num_key_value_heads
124
-
125
- self.hidden_act = hidden_act
126
- self.initializer_range = initializer_range
127
- self.rms_norm_eps = rms_norm_eps
128
- self.use_cache = use_cache
129
- self.rope_theta = rope_theta
130
- self.rope_scaling = rope_scaling
131
- self._rope_scaling_validation()
132
- super().__init__(
133
- pad_token_id=pad_token_id,
134
- bos_token_id=bos_token_id,
135
- eos_token_id=eos_token_id,
136
- tie_word_embeddings=tie_word_embeddings,
137
- **kwargs,
138
- )
139
-
140
- def _rope_scaling_validation(self):
141
- """
142
- Validate the `rope_scaling` configuration.
143
- """
144
- if self.rope_scaling is None:
145
- return
146
-
147
- if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
148
- raise ValueError(
149
- "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
150
- f"got {self.rope_scaling}"
151
- )
152
- rope_scaling_type = self.rope_scaling.get("type", None)
153
- rope_scaling_factor = self.rope_scaling.get("factor", None)
154
- if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
155
- raise ValueError(
156
- f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
157
- )
158
- if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
159
- raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")