gemma-2-baku-2b / model_config.yaml
t-w's picture
Upload folder using huggingface_hub
75d6bcb verified
raw
history blame contribute delete
752 Bytes
attention_logit_softcapping: 50.0
attention_scores_scalar: 256
bias: false
block_size: 8192
final_logit_softcapping: 30.0
gelu_approximate: tanh
head_size: 256
hf_config:
name: gemma-2-2b
org: google
intermediate_size: 9216
lm_head_bias: false
mlp_class_name: GemmaMLP
n_embd: 2304
n_expert: 0
n_expert_per_token: 0
n_head: 8
n_layer: 26
n_query_groups: 4
name: Gemma-2-2b
norm_class_name: RMSNorm
norm_eps: 1.0e-05
padded_vocab_size: 256000
padding_multiple: 512
parallel_residual: false
post_attention_norm: true
post_mlp_norm: true
rope_base: 10000
rope_condense_ratio: 1
rotary_percentage: 1.0
scale_embeddings: true
shared_attention_norm: false
sliding_window_layer_placing: 2
sliding_window_size: 4096
use_flash_attn: true
vocab_size: 256000