LeroyDyer commited on
Commit
0c33959
1 Parent(s): ac7dacc

Upload 3 files

Browse files
Files changed (3) hide show
  1. configuration_mistral.py +150 -8
  2. modeling_mistral.py +0 -0
  3. params.json +11 -0
configuration_mistral.py CHANGED
@@ -12,19 +12,14 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """ Mistral model configuration"""
16
 
17
- from ...configuration_utils import PretrainedConfig
18
- from ...utils import logging
19
 
20
 
21
  logger = logging.get_logger(__name__)
22
 
23
- MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
- "mistralai/Mistral-7B-v0.1": "https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/config.json",
25
- "mistralai/Mistral-7B-Instruct-v0.1": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/config.json",
26
- }
27
-
28
 
29
  class MistralConfig(PretrainedConfig):
30
  r"""
@@ -101,6 +96,151 @@ class MistralConfig(PretrainedConfig):
101
  model_type = "mistral"
102
  keys_to_ignore_at_inference = ["past_key_values"]
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  def __init__(
105
  self,
106
  vocab_size=32000,
@@ -122,6 +262,7 @@ class MistralConfig(PretrainedConfig):
122
  sliding_window=4096,
123
  attention_dropout=0.0,
124
  max_thoughts=16,
 
125
  merged_talk_heads=True,
126
  merged_lm_and_talk_heads=False,
127
  merged_lm_and_think_heads=True,
@@ -153,6 +294,7 @@ class MistralConfig(PretrainedConfig):
153
  self.rope_theta = rope_theta
154
  self.attention_dropout = attention_dropout
155
  self.max_thoughts = max_thoughts
 
156
  self.merged_talk_heads = merged_talk_heads
157
  self.merged_lm_and_talk_heads = merged_lm_and_talk_heads
158
  self.merged_lm_and_think_heads = merged_lm_and_think_heads
 
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
+ """Mistral model configuration"""
16
 
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.utils import logging
19
 
20
 
21
  logger = logging.get_logger(__name__)
22
 
 
 
 
 
 
23
 
24
  class MistralConfig(PretrainedConfig):
25
  r"""
 
96
  model_type = "mistral"
97
  keys_to_ignore_at_inference = ["past_key_values"]
98
 
99
+ def __init__(
100
+ self,
101
+ vocab_size=32000,
102
+ hidden_size=4096,
103
+ intermediate_size=14336,
104
+ num_hidden_layers=32,
105
+ num_attention_heads=32,
106
+ num_key_value_heads=8,
107
+ hidden_act="silu",
108
+ max_position_embeddings=4096 * 32,
109
+ initializer_range=0.02,
110
+ rms_norm_eps=1e-6,
111
+ use_cache=True,
112
+ pad_token_id=None,
113
+ bos_token_id=1,
114
+ eos_token_id=2,
115
+ tie_word_embeddings=False,
116
+ rope_theta=10000.0,
117
+ sliding_window=4096,
118
+ attention_dropout=0.0,
119
+ max_thoughts=16,thought_length = 1024,
120
+ merged_talk_heads=True,
121
+ merged_lm_and_talk_heads=False,
122
+ merged_lm_and_think_heads=True,
123
+ use_concat_talk_head=True,
124
+ use_shallow_think=True,
125
+ use_shallow_talk=False,
126
+ use_complex_think_head=False,
127
+ use_complex_talk_head=True,
128
+ use_weighted_talk_head=True,
129
+ **kwargs,
130
+ ):
131
+ self.vocab_size = vocab_size
132
+ self.max_position_embeddings = max_position_embeddings
133
+ self.hidden_size = hidden_size
134
+ self.intermediate_size = intermediate_size
135
+ self.num_hidden_layers = num_hidden_layers
136
+ self.num_attention_heads = num_attention_heads
137
+ self.sliding_window = sliding_window
138
+
139
+ # for backward compatibility
140
+ if num_key_value_heads is None:
141
+ num_key_value_heads = num_attention_heads
142
+
143
+ self.num_key_value_heads = num_key_value_heads
144
+ self.hidden_act = hidden_act
145
+ self.initializer_range = initializer_range
146
+ self.rms_norm_eps = rms_norm_eps
147
+ self.use_cache = use_cache
148
+ self.rope_theta = rope_theta
149
+ self.attention_dropout = attention_dropout
150
+ self.max_thoughts = max_thoughts
151
+ self.thought_length = thought_length
152
+ self.merged_talk_heads = merged_talk_heads
153
+ self.merged_lm_and_talk_heads = merged_lm_and_talk_heads
154
+ self.merged_lm_and_think_heads = merged_lm_and_think_heads
155
+ self.use_concat_talk_head = use_concat_talk_head
156
+ self.use_shallow_think = use_shallow_think
157
+ self.use_shallow_talk = use_shallow_talk
158
+ self.use_complex_think_head = use_complex_think_head
159
+ self.use_complex_talk_head = use_complex_talk_head
160
+ self.use_weighted_talk_head = use_weighted_talk_head
161
+
162
+ super().__init__(
163
+ pad_token_id=pad_token_id,
164
+ bos_token_id=bos_token_id,
165
+ eos_token_id=eos_token_id,
166
+ tie_word_embeddings=tie_word_embeddings,
167
+ **kwargs,
168
+ )
169
+ class MistralStarConfig(PretrainedConfig):
170
+ r"""
171
+ This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
172
+ Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
173
+ with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.
174
+
175
+ [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
176
+ [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)
177
+
178
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
179
+ documentation from [`PretrainedConfig`] for more information.
180
+
181
+
182
+ Args:
183
+ vocab_size (`int`, *optional*, defaults to 32000):
184
+ Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the
185
+ `inputs_ids` passed when calling [`MistralModel`]
186
+ hidden_size (`int`, *optional*, defaults to 4096):
187
+ Dimension of the hidden representations.
188
+ intermediate_size (`int`, *optional*, defaults to 14336):
189
+ Dimension of the MLP representations.
190
+ num_hidden_layers (`int`, *optional*, defaults to 32):
191
+ Number of hidden layers in the Transformer encoder.
192
+ num_attention_heads (`int`, *optional*, defaults to 32):
193
+ Number of attention heads for each attention layer in the Transformer encoder.
194
+ num_key_value_heads (`int`, *optional*, defaults to 8):
195
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
196
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
197
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
198
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
199
+ by meanpooling all the original heads within that group. For more details checkout [this
200
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
201
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
202
+ The non-linear activation function (function or string) in the decoder.
203
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
204
+ The maximum sequence length that this model might ever be used with. Mistral's sliding window attention
205
+ allows sequence of up to 4096*32 tokens.
206
+ initializer_range (`float`, *optional*, defaults to 0.02):
207
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
208
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
209
+ The epsilon used by the rms normalization layers.
210
+ use_cache (`bool`, *optional*, defaults to `True`):
211
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
212
+ relevant if `config.is_decoder=True`.
213
+ pad_token_id (`int`, *optional*):
214
+ The id of the padding token.
215
+ bos_token_id (`int`, *optional*, defaults to 1):
216
+ The id of the "beginning-of-sequence" token.
217
+ eos_token_id (`int`, *optional*, defaults to 2):
218
+ The id of the "end-of-sequence" token.
219
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
220
+ Whether the model's input and output word embeddings should be tied.
221
+ rope_theta (`float`, *optional*, defaults to 10000.0):
222
+ The base period of the RoPE embeddings.
223
+ sliding_window (`int`, *optional*, defaults to 4096):
224
+ Sliding window attention window size. If not specified, will default to `4096`.
225
+ attention_dropout (`float`, *optional*, defaults to 0.0):
226
+ The dropout ratio for the attention probabilities.
227
+
228
+ ```python
229
+ >>> from transformers import MistralModel, MistralConfig
230
+
231
+ >>> # Initializing a Mistral 7B style configuration
232
+ >>> configuration = MistralConfig()
233
+
234
+ >>> # Initializing a model from the Mistral 7B style configuration
235
+ >>> model = MistralModel(configuration)
236
+
237
+ >>> # Accessing the model configuration
238
+ >>> configuration = model.config
239
+ ```"""
240
+
241
+ model_type = "mistralstar"
242
+ keys_to_ignore_at_inference = ["past_key_values"]
243
+
244
  def __init__(
245
  self,
246
  vocab_size=32000,
 
262
  sliding_window=4096,
263
  attention_dropout=0.0,
264
  max_thoughts=16,
265
+ thought_length = 1024,
266
  merged_talk_heads=True,
267
  merged_lm_and_talk_heads=False,
268
  merged_lm_and_think_heads=True,
 
294
  self.rope_theta = rope_theta
295
  self.attention_dropout = attention_dropout
296
  self.max_thoughts = max_thoughts
297
+ self.thought_length = thought_length
298
  self.merged_talk_heads = merged_talk_heads
299
  self.merged_lm_and_talk_heads = merged_lm_and_talk_heads
300
  self.merged_lm_and_think_heads = merged_lm_and_think_heads
modeling_mistral.py CHANGED
The diff for this file is too large to render. See raw diff
 
params.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dim": 4096,
3
+ "n_layers": 32,
4
+ "head_dim": 128,
5
+ "hidden_dim": 14336,
6
+ "n_heads": 32,
7
+ "n_kv_heads": 8,
8
+ "norm_eps": 1e-05,
9
+ "vocab_size": 32768,
10
+ "rope_theta": 1000000.0
11
+ }