Refact-1_6-base / configuration_gpt_refact.py
svakhreev's picture
Upload GPTRefactForCausalLM
a4ba993
raw
history blame
1.98 kB
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class GPTRefactConfig(PretrainedConfig):
model_type = "gpt_refact"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(
self,
vocab_size=49216,
n_positions=1024,
n_embd=768,
n_layer=12,
n_head=12,
n_inner=None,
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
scale_attn_weights=True,
use_cache=True,
bos_token_id=-1,
eos_token_id=0,
max_position_embeddings: int = 4096,
multi_query: bool = True,
attention_softmax_in_fp32=False,
scale_attention_softmax_in_fp32=False,
**kwargs,
):
self.vocab_size = vocab_size
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.scale_attn_weights = scale_attn_weights
self.use_cache = use_cache
self.attention_softmax_in_fp32 = attention_softmax_in_fp32
self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.multi_query = multi_query
self.max_position_embeddings = max_position_embeddings
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)