Bo1015 commited on
Commit
dc34acc
·
verified ·
1 Parent(s): 015d6dc

Upload 2 files

Browse files
Files changed (2) hide show
  1. config.json +5 -5
  2. configuration_proteinglm.py +7 -7
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "proteinglm-1b-mlm",
3
  "add_bias_linear": true,
4
  "add_qkv_bias": true,
5
  "apply_query_key_layer_scaling": true,
@@ -20,15 +20,15 @@
20
  "bias_dropout_fusion": true,
21
  "deepnorm": true,
22
  "experts_per_token": 0,
23
- "ffn_hidden_size": 5461,
24
  "fp32_residual_connection": false,
25
  "glu_activation": "geglu",
26
  "head_num": 1,
27
  "hidden_dropout": 0.0,
28
- "hidden_size": 2048,
29
  "initializer_range": 0.02,
30
  "is_causal": false,
31
- "kv_channels": 64,
32
  "layernorm_epsilon": 1e-05,
33
  "model_type": "ProteinGLM",
34
  "moe": false,
@@ -36,7 +36,7 @@
36
  "multi_query_group_num": 1,
37
  "num_attention_heads": 32,
38
  "num_experts": 0,
39
- "num_layers": 24,
40
  "padded_vocab_size": 128,
41
  "post_layer_norm": true,
42
  "quantization_bit": 0,
 
1
  {
2
+ "_name_or_path": "proteinglm-10b-mlm",
3
  "add_bias_linear": true,
4
  "add_qkv_bias": true,
5
  "apply_query_key_layer_scaling": true,
 
20
  "bias_dropout_fusion": true,
21
  "deepnorm": true,
22
  "experts_per_token": 0,
23
+ "ffn_hidden_size": 11606,
24
  "fp32_residual_connection": false,
25
  "glu_activation": "geglu",
26
  "head_num": 1,
27
  "hidden_dropout": 0.0,
28
+ "hidden_size": 4352,
29
  "initializer_range": 0.02,
30
  "is_causal": false,
31
+ "kv_channels": 136,
32
  "layernorm_epsilon": 1e-05,
33
  "model_type": "ProteinGLM",
34
  "moe": false,
 
36
  "multi_query_group_num": 1,
37
  "num_attention_heads": 32,
38
  "num_experts": 0,
39
+ "num_layers": 47,
40
  "padded_vocab_size": 128,
41
  "post_layer_norm": true,
42
  "quantization_bit": 0,
configuration_proteinglm.py CHANGED
@@ -5,16 +5,17 @@ class ProteinGLMConfig(PretrainedConfig):
5
  model_type = "ProteinGLM"
6
  def __init__(
7
  self,
8
- num_layers=24,
9
  padded_vocab_size=128,
10
- hidden_size=2048,
11
- ffn_hidden_size=5461,
12
- kv_channels=64,
13
  num_attention_heads=32,
14
  seq_length=1024,
15
  hidden_dropout=0.0,
16
  attention_dropout=0.0,
17
  layernorm_epsilon=1e-5,
 
18
  glu_activation='geglu',
19
  rmsnorm=False,
20
  deepnorm=True,
@@ -33,7 +34,6 @@ class ProteinGLMConfig(PretrainedConfig):
33
  use_pytorch_sdpa=True,
34
  is_causal=False,
35
  use_cache=True,
36
- initializer_range=0.02,
37
  moe=False,
38
  num_experts=0,
39
  experts_per_token=0,
@@ -60,6 +60,7 @@ class ProteinGLMConfig(PretrainedConfig):
60
  self.attention_dropout = attention_dropout
61
  self.layernorm_epsilon = layernorm_epsilon
62
  self.glu_activation = glu_activation
 
63
  self.rmsnorm = rmsnorm
64
  self.deepnorm = deepnorm
65
  self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
@@ -75,8 +76,7 @@ class ProteinGLMConfig(PretrainedConfig):
75
  self.quantization_bit = quantization_bit
76
  self.rotary_embedding_2d = rotary_embedding_2d
77
  self.is_causal = is_causal
78
- self.use_cache = use_cache
79
- self.initializer_range = initializer_range
80
  self.use_pytorch_sdpa = use_pytorch_sdpa
81
  self.moe = moe
82
  self.num_experts = num_experts
 
5
  model_type = "ProteinGLM"
6
  def __init__(
7
  self,
8
+ num_layers=47,
9
  padded_vocab_size=128,
10
+ hidden_size=4352,
11
+ ffn_hidden_size=11606,
12
+ kv_channels=136,
13
  num_attention_heads=32,
14
  seq_length=1024,
15
  hidden_dropout=0.0,
16
  attention_dropout=0.0,
17
  layernorm_epsilon=1e-5,
18
+ initializer_range=0.02,
19
  glu_activation='geglu',
20
  rmsnorm=False,
21
  deepnorm=True,
 
34
  use_pytorch_sdpa=True,
35
  is_causal=False,
36
  use_cache=True,
 
37
  moe=False,
38
  num_experts=0,
39
  experts_per_token=0,
 
60
  self.attention_dropout = attention_dropout
61
  self.layernorm_epsilon = layernorm_epsilon
62
  self.glu_activation = glu_activation
63
+ self.initializer_range = initializer_range
64
  self.rmsnorm = rmsnorm
65
  self.deepnorm = deepnorm
66
  self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
 
76
  self.quantization_bit = quantization_bit
77
  self.rotary_embedding_2d = rotary_embedding_2d
78
  self.is_causal = is_causal
79
+ self.use_cache=use_cache
 
80
  self.use_pytorch_sdpa = use_pytorch_sdpa
81
  self.moe = moe
82
  self.num_experts = num_experts