mikkelyo commited on
Commit
5b6f661
1 Parent(s): db82011

Create confirguration_falcon.py

Browse files
Files changed (1) hide show
  1. confirguration_falcon.py +139 -0
confirguration_falcon.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Falcon configuration"""
16
+ from transformers.configuration_utils import PretrainedConfig
17
+ from transformers.utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+ FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP = {
23
+ "tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
24
+ "tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
25
+ }
26
+
27
+
28
+ class FalconConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`FalconModel`]. It is used to instantiate a Falcon
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the
33
+ [tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) architecture.
34
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
35
+ documentation from [`PretrainedConfig`] for more information.
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 65024):
38
+ Vocabulary size of the Falcon model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`FalconModel`]
40
+ hidden_size (`int`, *optional*, defaults to 4544):
41
+ Dimension of the hidden representations.
42
+ num_hidden_layers (`int`, *optional*, defaults to 32):
43
+ Number of hidden layers in the Transformer decoder.
44
+ num_attention_heads (`int`, *optional*, defaults to 71):
45
+ Number of attention heads for each attention layer in the Transformer encoder.
46
+ initializer_range (`float`, *optional*, defaults to 0.02):
47
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
48
+ use_cache (`bool`, *optional*, defaults to `True`):
49
+ Whether the model should return the last key/values attentions (not used by all models). Only relevant if
50
+ `config.is_decoder=True`.
51
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
52
+ The epsilon used by the layer normalization layers.
53
+ hidden_dropout (`float`, *optional*, defaults to 0.0):
54
+ The dropout probability for MLP layers.
55
+ attention_dropout (`float`, *optional*, defaults to 0.0):
56
+ The dropout probability for attention layers.
57
+ num_kv_heads (`int`, *optional*):
58
+ Number of key-value heads to use per attention layer. If unset, defaults to the same value as
59
+ `num_attention_heads`.
60
+ alibi (`bool`, *optional*, defaults to `False`):
61
+ Whether to use ALiBi positional biases during self-attention.
62
+ new_decoder_architecture (`bool`, *optional*, defaults to `False`):
63
+ Whether to use the new (Falcon-40B) decoder architecture. If `True`, the `multi_query` and `parallel_attn`
64
+ arguments are ignored, as the new decoder always uses parallel attention.
65
+ multi_query (`bool`, *optional*, defaults to `True`):
66
+ Whether to use multi-query attention in the decoder. Ignored when `new_decoder_architecture` is `True`.
67
+ parallel_attn (`bool`, *optional*, defaults to `True`):
68
+ Whether to compute attention in parallel with the feedforward layer. If False, they are consecutive
69
+ instead, as in the original Transformer architecture. Ignored when `new_decoder_architecture` is `True`.
70
+ bias (`bool`, *optional*, defaults to `False`):
71
+ Whether to use bias on Linear layers.
72
+ bos_token_id (`int`, *optional*, defaults to 11):
73
+ The id of the "beginning-of-sequence" token.
74
+ eos_token_id (`int`, *optional*, defaults to 11):
75
+ The id of the "end-of-sequence" token.
76
+ Example:
77
+ ```python
78
+ >>> from transformers import FalconModel, FalconConfig
79
+ >>> # Initializing a small (2-layer) Falcon configuration
80
+ >>> configuration = FalconConfig(num_hidden_layers=2)
81
+ >>> # Initializing a model from the small configuration
82
+ >>> model = FalconModel(configuration)
83
+ >>> # Accessing the model configuration
84
+ >>> configuration = model.config
85
+ ```"""
86
+ model_type = "falcon"
87
+ keys_to_ignore_at_inference = ["past_key_values"]
88
+
89
+ def __init__(
90
+ self,
91
+ vocab_size=65024,
92
+ hidden_size=4544,
93
+ num_hidden_layers=32,
94
+ num_attention_heads=71,
95
+ layer_norm_epsilon=1e-5,
96
+ initializer_range=0.02,
97
+ use_cache=True,
98
+ hidden_dropout=0.0,
99
+ attention_dropout=0.0,
100
+ num_kv_heads=None,
101
+ alibi=False,
102
+ new_decoder_architecture=False,
103
+ multi_query=True,
104
+ parallel_attn=True,
105
+ bias=False,
106
+ bos_token_id=11,
107
+ eos_token_id=11,
108
+ **kwargs,
109
+ ):
110
+ self.vocab_size = vocab_size
111
+ # Backward compatibility with n_embed kwarg
112
+ n_embed = kwargs.pop("n_embed", None)
113
+ self.hidden_size = hidden_size if n_embed is None else n_embed
114
+ self.num_hidden_layers = num_hidden_layers
115
+ self.num_attention_heads = num_attention_heads
116
+ self.layer_norm_epsilon = layer_norm_epsilon
117
+ self.initializer_range = initializer_range
118
+ self.use_cache = use_cache
119
+ self.hidden_dropout = hidden_dropout
120
+ self.attention_dropout = attention_dropout
121
+
122
+ self.bos_token_id = bos_token_id
123
+ self.eos_token_id = eos_token_id
124
+ self.num_kv_heads = num_attention_heads if num_kv_heads is None else num_kv_heads
125
+ self.alibi = alibi
126
+ self.new_decoder_architecture = new_decoder_architecture
127
+ self.multi_query = multi_query # Ignored when new_decoder_architecture is True
128
+ self.parallel_attn = parallel_attn
129
+ self.bias = bias
130
+
131
+ super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
132
+
133
+ @property
134
+ def head_dim(self):
135
+ return self.hidden_size // self.num_attention_heads
136
+
137
+ @property
138
+ def rotary(self):
139
+ return not self.alibi