iwaitu commited on
Commit
9a319e2
1 Parent(s): ad9fd7f

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<eop>": 151334,
3
+ "<sop>": 151333,
4
+ "<|assistant|>": 151337,
5
+ "<|begin_of_image|>": 151339,
6
+ "<|begin_of_video|>": 151341,
7
+ "<|end_of_image|>": 151340,
8
+ "<|end_of_video|>": 151342,
9
+ "<|endoftext|>": 151329,
10
+ "<|observation|>": 151338,
11
+ "<|system|>": 151335,
12
+ "<|user|>": 151336,
13
+ "[MASK]": 151330,
14
+ "[gMASK]": 151331,
15
+ "[sMASK]": 151332
16
+ }
config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "glm-4-9b-chat-1m",
3
+ "add_bias_linear": false,
4
+ "add_qkv_bias": true,
5
+ "apply_query_key_layer_scaling": true,
6
+ "apply_residual_connection_post_layernorm": false,
7
+ "architectures": [
8
+ "ChatGLMForConditionalGeneration"
9
+ ],
10
+ "attention_dropout": 0.0,
11
+ "attention_softmax_in_fp32": true,
12
+ "auto_map": {
13
+ "AutoConfig": "configuration_chatglm.ChatGLMConfig",
14
+ "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration",
15
+ "AutoModelForCausalLM": "modeling_chatglm.ChatGLMForConditionalGeneration",
16
+ "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration",
17
+ "AutoModelForSequenceClassification": "modeling_chatglm.ChatGLMForSequenceClassification"
18
+ },
19
+ "bias_dropout_fusion": true,
20
+ "classifier_dropout": null,
21
+ "eos_token_id": [
22
+ 151329,
23
+ 151336,
24
+ 151338
25
+ ],
26
+ "ffn_hidden_size": 13696,
27
+ "fp32_residual_connection": false,
28
+ "hidden_dropout": 0.0,
29
+ "hidden_size": 4096,
30
+ "kv_channels": 128,
31
+ "layernorm_epsilon": 1.5625e-07,
32
+ "model_type": "chatglm",
33
+ "multi_query_attention": true,
34
+ "multi_query_group_num": 4,
35
+ "num_attention_heads": 32,
36
+ "num_hidden_layers": 40,
37
+ "num_layers": 40,
38
+ "original_rope": true,
39
+ "pad_token_id": 151329,
40
+ "padded_vocab_size": 151552,
41
+ "post_layer_norm": true,
42
+ "rmsnorm": true,
43
+ "rope_ratio": 10000,
44
+ "seq_length": 1048576,
45
+ "tie_word_embeddings": false,
46
+ "torch_dtype": "bfloat16",
47
+ "transformers_version": "4.41.2",
48
+ "use_cache": true,
49
+ "vocab_size": 151552
50
+ }
configuration_chatglm.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class ChatGLMConfig(PretrainedConfig):
5
+ model_type = "chatglm"
6
+
7
+ def __init__(
8
+ self,
9
+ num_layers=28,
10
+ padded_vocab_size=65024,
11
+ hidden_size=4096,
12
+ ffn_hidden_size=13696,
13
+ kv_channels=128,
14
+ num_attention_heads=32,
15
+ seq_length=2048,
16
+ hidden_dropout=0.0,
17
+ classifier_dropout=None,
18
+ attention_dropout=0.0,
19
+ layernorm_epsilon=1e-5,
20
+ rmsnorm=True,
21
+ apply_residual_connection_post_layernorm=False,
22
+ post_layer_norm=True,
23
+ add_bias_linear=False,
24
+ add_qkv_bias=False,
25
+ bias_dropout_fusion=True,
26
+ multi_query_attention=False,
27
+ multi_query_group_num=1,
28
+ rope_ratio=1,
29
+ apply_query_key_layer_scaling=True,
30
+ attention_softmax_in_fp32=True,
31
+ fp32_residual_connection=False,
32
+ **kwargs
33
+ ):
34
+ self.num_layers = num_layers
35
+ self.vocab_size = padded_vocab_size
36
+ self.padded_vocab_size = padded_vocab_size
37
+ self.hidden_size = hidden_size
38
+ self.ffn_hidden_size = ffn_hidden_size
39
+ self.kv_channels = kv_channels
40
+ self.num_attention_heads = num_attention_heads
41
+ self.seq_length = seq_length
42
+ self.hidden_dropout = hidden_dropout
43
+ self.classifier_dropout = classifier_dropout
44
+ self.attention_dropout = attention_dropout
45
+ self.layernorm_epsilon = layernorm_epsilon
46
+ self.rmsnorm = rmsnorm
47
+ self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
48
+ self.post_layer_norm = post_layer_norm
49
+ self.add_bias_linear = add_bias_linear
50
+ self.add_qkv_bias = add_qkv_bias
51
+ self.bias_dropout_fusion = bias_dropout_fusion
52
+ self.multi_query_attention = multi_query_attention
53
+ self.multi_query_group_num = multi_query_group_num
54
+ self.rope_ratio = rope_ratio
55
+ self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
56
+ self.attention_softmax_in_fp32 = attention_softmax_in_fp32
57
+ self.fp32_residual_connection = fp32_residual_connection
58
+ super().__init__(**kwargs)
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_sample": true,
3
+ "eos_token_id": [
4
+ 151329,
5
+ 151336,
6
+ 151338
7
+ ],
8
+ "max_length": 1024000,
9
+ "pad_token_id": 151329,
10
+ "temperature": 0.8,
11
+ "top_p": 0.8,
12
+ "transformers_version": "4.41.2"
13
+ }
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c3c9a6e47557faf025ffef82fa6de622d3173fae5ce88c0c184717f38546a66
3
+ size 4992537608
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47602104aa56d745398d2cd4d19d3af967b69076e4e4d307110ea620adb48095
3
+ size 4978978184
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddadd4eed0ac684ec04ac06df7c27307a6c9288eae689706e4cbef9ecd0f936e
3
+ size 4945415320
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87ded50946747a95324b32300fa5a21cc4d3c99344dfa03df3f759786a6210e3
3
+ size 4050823224
model.safetensors.index.json ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 18967715904
4
+ },
5
+ "weight_map": {
6
+ "transformer.embedding.word_embeddings.weight": "model-00001-of-00004.safetensors",
7
+ "transformer.encoder.final_layernorm.weight": "model-00004-of-00004.safetensors",
8
+ "transformer.encoder.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
+ "transformer.encoder.layers.0.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
10
+ "transformer.encoder.layers.0.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
11
+ "transformer.encoder.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
12
+ "transformer.encoder.layers.0.self_attention.dense.weight": "model-00001-of-00004.safetensors",
13
+ "transformer.encoder.layers.0.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
14
+ "transformer.encoder.layers.0.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
15
+ "transformer.encoder.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
16
+ "transformer.encoder.layers.1.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
17
+ "transformer.encoder.layers.1.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
18
+ "transformer.encoder.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
19
+ "transformer.encoder.layers.1.self_attention.dense.weight": "model-00001-of-00004.safetensors",
20
+ "transformer.encoder.layers.1.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
21
+ "transformer.encoder.layers.1.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
22
+ "transformer.encoder.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
23
+ "transformer.encoder.layers.10.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
24
+ "transformer.encoder.layers.10.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
25
+ "transformer.encoder.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
26
+ "transformer.encoder.layers.10.self_attention.dense.weight": "model-00002-of-00004.safetensors",
27
+ "transformer.encoder.layers.10.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
28
+ "transformer.encoder.layers.10.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
29
+ "transformer.encoder.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
30
+ "transformer.encoder.layers.11.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
31
+ "transformer.encoder.layers.11.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
32
+ "transformer.encoder.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
33
+ "transformer.encoder.layers.11.self_attention.dense.weight": "model-00002-of-00004.safetensors",
34
+ "transformer.encoder.layers.11.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
35
+ "transformer.encoder.layers.11.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
36
+ "transformer.encoder.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
37
+ "transformer.encoder.layers.12.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
38
+ "transformer.encoder.layers.12.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
39
+ "transformer.encoder.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
40
+ "transformer.encoder.layers.12.self_attention.dense.weight": "model-00002-of-00004.safetensors",
41
+ "transformer.encoder.layers.12.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
42
+ "transformer.encoder.layers.12.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
43
+ "transformer.encoder.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
44
+ "transformer.encoder.layers.13.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
45
+ "transformer.encoder.layers.13.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
46
+ "transformer.encoder.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
47
+ "transformer.encoder.layers.13.self_attention.dense.weight": "model-00002-of-00004.safetensors",
48
+ "transformer.encoder.layers.13.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
49
+ "transformer.encoder.layers.13.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
50
+ "transformer.encoder.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
51
+ "transformer.encoder.layers.14.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
52
+ "transformer.encoder.layers.14.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
53
+ "transformer.encoder.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
54
+ "transformer.encoder.layers.14.self_attention.dense.weight": "model-00002-of-00004.safetensors",
55
+ "transformer.encoder.layers.14.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
56
+ "transformer.encoder.layers.14.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
57
+ "transformer.encoder.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
58
+ "transformer.encoder.layers.15.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
59
+ "transformer.encoder.layers.15.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
60
+ "transformer.encoder.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
61
+ "transformer.encoder.layers.15.self_attention.dense.weight": "model-00002-of-00004.safetensors",
62
+ "transformer.encoder.layers.15.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
63
+ "transformer.encoder.layers.15.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
64
+ "transformer.encoder.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
65
+ "transformer.encoder.layers.16.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
66
+ "transformer.encoder.layers.16.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
67
+ "transformer.encoder.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
68
+ "transformer.encoder.layers.16.self_attention.dense.weight": "model-00002-of-00004.safetensors",
69
+ "transformer.encoder.layers.16.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
70
+ "transformer.encoder.layers.16.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
71
+ "transformer.encoder.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
72
+ "transformer.encoder.layers.17.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
73
+ "transformer.encoder.layers.17.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
74
+ "transformer.encoder.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
75
+ "transformer.encoder.layers.17.self_attention.dense.weight": "model-00002-of-00004.safetensors",
76
+ "transformer.encoder.layers.17.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
77
+ "transformer.encoder.layers.17.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
78
+ "transformer.encoder.layers.18.input_layernorm.weight": "model-00002-of-00004.safetensors",
79
+ "transformer.encoder.layers.18.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
80
+ "transformer.encoder.layers.18.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
81
+ "transformer.encoder.layers.18.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
82
+ "transformer.encoder.layers.18.self_attention.dense.weight": "model-00002-of-00004.safetensors",
83
+ "transformer.encoder.layers.18.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
84
+ "transformer.encoder.layers.18.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
85
+ "transformer.encoder.layers.19.input_layernorm.weight": "model-00002-of-00004.safetensors",
86
+ "transformer.encoder.layers.19.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
87
+ "transformer.encoder.layers.19.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
88
+ "transformer.encoder.layers.19.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
89
+ "transformer.encoder.layers.19.self_attention.dense.weight": "model-00002-of-00004.safetensors",
90
+ "transformer.encoder.layers.19.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
91
+ "transformer.encoder.layers.19.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
92
+ "transformer.encoder.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
93
+ "transformer.encoder.layers.2.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
94
+ "transformer.encoder.layers.2.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
95
+ "transformer.encoder.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
96
+ "transformer.encoder.layers.2.self_attention.dense.weight": "model-00001-of-00004.safetensors",
97
+ "transformer.encoder.layers.2.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
98
+ "transformer.encoder.layers.2.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
99
+ "transformer.encoder.layers.20.input_layernorm.weight": "model-00002-of-00004.safetensors",
100
+ "transformer.encoder.layers.20.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
101
+ "transformer.encoder.layers.20.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
102
+ "transformer.encoder.layers.20.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
103
+ "transformer.encoder.layers.20.self_attention.dense.weight": "model-00002-of-00004.safetensors",
104
+ "transformer.encoder.layers.20.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
105
+ "transformer.encoder.layers.20.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
106
+ "transformer.encoder.layers.21.input_layernorm.weight": "model-00002-of-00004.safetensors",
107
+ "transformer.encoder.layers.21.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
108
+ "transformer.encoder.layers.21.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
109
+ "transformer.encoder.layers.21.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
110
+ "transformer.encoder.layers.21.self_attention.dense.weight": "model-00002-of-00004.safetensors",
111
+ "transformer.encoder.layers.21.self_attention.query_key_value.bias": "model-00002-of-00004.safetensors",
112
+ "transformer.encoder.layers.21.self_attention.query_key_value.weight": "model-00002-of-00004.safetensors",
113
+ "transformer.encoder.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
114
+ "transformer.encoder.layers.22.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
115
+ "transformer.encoder.layers.22.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
116
+ "transformer.encoder.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
117
+ "transformer.encoder.layers.22.self_attention.dense.weight": "model-00003-of-00004.safetensors",
118
+ "transformer.encoder.layers.22.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
119
+ "transformer.encoder.layers.22.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
120
+ "transformer.encoder.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
121
+ "transformer.encoder.layers.23.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
122
+ "transformer.encoder.layers.23.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
123
+ "transformer.encoder.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
124
+ "transformer.encoder.layers.23.self_attention.dense.weight": "model-00003-of-00004.safetensors",
125
+ "transformer.encoder.layers.23.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
126
+ "transformer.encoder.layers.23.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
127
+ "transformer.encoder.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
128
+ "transformer.encoder.layers.24.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
129
+ "transformer.encoder.layers.24.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
130
+ "transformer.encoder.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
131
+ "transformer.encoder.layers.24.self_attention.dense.weight": "model-00003-of-00004.safetensors",
132
+ "transformer.encoder.layers.24.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
133
+ "transformer.encoder.layers.24.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
134
+ "transformer.encoder.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
135
+ "transformer.encoder.layers.25.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
136
+ "transformer.encoder.layers.25.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
137
+ "transformer.encoder.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
138
+ "transformer.encoder.layers.25.self_attention.dense.weight": "model-00003-of-00004.safetensors",
139
+ "transformer.encoder.layers.25.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
140
+ "transformer.encoder.layers.25.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
141
+ "transformer.encoder.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
142
+ "transformer.encoder.layers.26.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
143
+ "transformer.encoder.layers.26.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
144
+ "transformer.encoder.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
145
+ "transformer.encoder.layers.26.self_attention.dense.weight": "model-00003-of-00004.safetensors",
146
+ "transformer.encoder.layers.26.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
147
+ "transformer.encoder.layers.26.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
148
+ "transformer.encoder.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
149
+ "transformer.encoder.layers.27.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
150
+ "transformer.encoder.layers.27.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
151
+ "transformer.encoder.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
152
+ "transformer.encoder.layers.27.self_attention.dense.weight": "model-00003-of-00004.safetensors",
153
+ "transformer.encoder.layers.27.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
154
+ "transformer.encoder.layers.27.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
155
+ "transformer.encoder.layers.28.input_layernorm.weight": "model-00003-of-00004.safetensors",
156
+ "transformer.encoder.layers.28.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
157
+ "transformer.encoder.layers.28.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
158
+ "transformer.encoder.layers.28.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
159
+ "transformer.encoder.layers.28.self_attention.dense.weight": "model-00003-of-00004.safetensors",
160
+ "transformer.encoder.layers.28.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
161
+ "transformer.encoder.layers.28.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
162
+ "transformer.encoder.layers.29.input_layernorm.weight": "model-00003-of-00004.safetensors",
163
+ "transformer.encoder.layers.29.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
164
+ "transformer.encoder.layers.29.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
165
+ "transformer.encoder.layers.29.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
166
+ "transformer.encoder.layers.29.self_attention.dense.weight": "model-00003-of-00004.safetensors",
167
+ "transformer.encoder.layers.29.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
168
+ "transformer.encoder.layers.29.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
169
+ "transformer.encoder.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
170
+ "transformer.encoder.layers.3.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
171
+ "transformer.encoder.layers.3.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
172
+ "transformer.encoder.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
173
+ "transformer.encoder.layers.3.self_attention.dense.weight": "model-00001-of-00004.safetensors",
174
+ "transformer.encoder.layers.3.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
175
+ "transformer.encoder.layers.3.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
176
+ "transformer.encoder.layers.30.input_layernorm.weight": "model-00003-of-00004.safetensors",
177
+ "transformer.encoder.layers.30.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
178
+ "transformer.encoder.layers.30.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
179
+ "transformer.encoder.layers.30.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
180
+ "transformer.encoder.layers.30.self_attention.dense.weight": "model-00003-of-00004.safetensors",
181
+ "transformer.encoder.layers.30.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
182
+ "transformer.encoder.layers.30.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
183
+ "transformer.encoder.layers.31.input_layernorm.weight": "model-00003-of-00004.safetensors",
184
+ "transformer.encoder.layers.31.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
185
+ "transformer.encoder.layers.31.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
186
+ "transformer.encoder.layers.31.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
187
+ "transformer.encoder.layers.31.self_attention.dense.weight": "model-00003-of-00004.safetensors",
188
+ "transformer.encoder.layers.31.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
189
+ "transformer.encoder.layers.31.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
190
+ "transformer.encoder.layers.32.input_layernorm.weight": "model-00003-of-00004.safetensors",
191
+ "transformer.encoder.layers.32.mlp.dense_4h_to_h.weight": "model-00003-of-00004.safetensors",
192
+ "transformer.encoder.layers.32.mlp.dense_h_to_4h.weight": "model-00003-of-00004.safetensors",
193
+ "transformer.encoder.layers.32.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
194
+ "transformer.encoder.layers.32.self_attention.dense.weight": "model-00003-of-00004.safetensors",
195
+ "transformer.encoder.layers.32.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
196
+ "transformer.encoder.layers.32.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
197
+ "transformer.encoder.layers.33.input_layernorm.weight": "model-00003-of-00004.safetensors",
198
+ "transformer.encoder.layers.33.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
199
+ "transformer.encoder.layers.33.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
200
+ "transformer.encoder.layers.33.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
201
+ "transformer.encoder.layers.33.self_attention.dense.weight": "model-00003-of-00004.safetensors",
202
+ "transformer.encoder.layers.33.self_attention.query_key_value.bias": "model-00003-of-00004.safetensors",
203
+ "transformer.encoder.layers.33.self_attention.query_key_value.weight": "model-00003-of-00004.safetensors",
204
+ "transformer.encoder.layers.34.input_layernorm.weight": "model-00004-of-00004.safetensors",
205
+ "transformer.encoder.layers.34.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
206
+ "transformer.encoder.layers.34.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
207
+ "transformer.encoder.layers.34.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
208
+ "transformer.encoder.layers.34.self_attention.dense.weight": "model-00004-of-00004.safetensors",
209
+ "transformer.encoder.layers.34.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
210
+ "transformer.encoder.layers.34.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
211
+ "transformer.encoder.layers.35.input_layernorm.weight": "model-00004-of-00004.safetensors",
212
+ "transformer.encoder.layers.35.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
213
+ "transformer.encoder.layers.35.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
214
+ "transformer.encoder.layers.35.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
215
+ "transformer.encoder.layers.35.self_attention.dense.weight": "model-00004-of-00004.safetensors",
216
+ "transformer.encoder.layers.35.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
217
+ "transformer.encoder.layers.35.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
218
+ "transformer.encoder.layers.36.input_layernorm.weight": "model-00004-of-00004.safetensors",
219
+ "transformer.encoder.layers.36.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
220
+ "transformer.encoder.layers.36.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
221
+ "transformer.encoder.layers.36.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
222
+ "transformer.encoder.layers.36.self_attention.dense.weight": "model-00004-of-00004.safetensors",
223
+ "transformer.encoder.layers.36.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
224
+ "transformer.encoder.layers.36.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
225
+ "transformer.encoder.layers.37.input_layernorm.weight": "model-00004-of-00004.safetensors",
226
+ "transformer.encoder.layers.37.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
227
+ "transformer.encoder.layers.37.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
228
+ "transformer.encoder.layers.37.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
229
+ "transformer.encoder.layers.37.self_attention.dense.weight": "model-00004-of-00004.safetensors",
230
+ "transformer.encoder.layers.37.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
231
+ "transformer.encoder.layers.37.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
232
+ "transformer.encoder.layers.38.input_layernorm.weight": "model-00004-of-00004.safetensors",
233
+ "transformer.encoder.layers.38.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
234
+ "transformer.encoder.layers.38.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
235
+ "transformer.encoder.layers.38.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
236
+ "transformer.encoder.layers.38.self_attention.dense.weight": "model-00004-of-00004.safetensors",
237
+ "transformer.encoder.layers.38.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
238
+ "transformer.encoder.layers.38.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
239
+ "transformer.encoder.layers.39.input_layernorm.weight": "model-00004-of-00004.safetensors",
240
+ "transformer.encoder.layers.39.mlp.dense_4h_to_h.weight": "model-00004-of-00004.safetensors",
241
+ "transformer.encoder.layers.39.mlp.dense_h_to_4h.weight": "model-00004-of-00004.safetensors",
242
+ "transformer.encoder.layers.39.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
243
+ "transformer.encoder.layers.39.self_attention.dense.weight": "model-00004-of-00004.safetensors",
244
+ "transformer.encoder.layers.39.self_attention.query_key_value.bias": "model-00004-of-00004.safetensors",
245
+ "transformer.encoder.layers.39.self_attention.query_key_value.weight": "model-00004-of-00004.safetensors",
246
+ "transformer.encoder.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
247
+ "transformer.encoder.layers.4.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
248
+ "transformer.encoder.layers.4.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
249
+ "transformer.encoder.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
250
+ "transformer.encoder.layers.4.self_attention.dense.weight": "model-00001-of-00004.safetensors",
251
+ "transformer.encoder.layers.4.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
252
+ "transformer.encoder.layers.4.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
253
+ "transformer.encoder.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
254
+ "transformer.encoder.layers.5.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
255
+ "transformer.encoder.layers.5.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
256
+ "transformer.encoder.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
257
+ "transformer.encoder.layers.5.self_attention.dense.weight": "model-00001-of-00004.safetensors",
258
+ "transformer.encoder.layers.5.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
259
+ "transformer.encoder.layers.5.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
260
+ "transformer.encoder.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
261
+ "transformer.encoder.layers.6.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
262
+ "transformer.encoder.layers.6.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
263
+ "transformer.encoder.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
264
+ "transformer.encoder.layers.6.self_attention.dense.weight": "model-00001-of-00004.safetensors",
265
+ "transformer.encoder.layers.6.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
266
+ "transformer.encoder.layers.6.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
267
+ "transformer.encoder.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
268
+ "transformer.encoder.layers.7.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
269
+ "transformer.encoder.layers.7.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
270
+ "transformer.encoder.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
271
+ "transformer.encoder.layers.7.self_attention.dense.weight": "model-00001-of-00004.safetensors",
272
+ "transformer.encoder.layers.7.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
273
+ "transformer.encoder.layers.7.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
274
+ "transformer.encoder.layers.8.input_layernorm.weight": "model-00001-of-00004.safetensors",
275
+ "transformer.encoder.layers.8.mlp.dense_4h_to_h.weight": "model-00001-of-00004.safetensors",
276
+ "transformer.encoder.layers.8.mlp.dense_h_to_4h.weight": "model-00001-of-00004.safetensors",
277
+ "transformer.encoder.layers.8.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
278
+ "transformer.encoder.layers.8.self_attention.dense.weight": "model-00001-of-00004.safetensors",
279
+ "transformer.encoder.layers.8.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
280
+ "transformer.encoder.layers.8.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
281
+ "transformer.encoder.layers.9.input_layernorm.weight": "model-00001-of-00004.safetensors",
282
+ "transformer.encoder.layers.9.mlp.dense_4h_to_h.weight": "model-00002-of-00004.safetensors",
283
+ "transformer.encoder.layers.9.mlp.dense_h_to_4h.weight": "model-00002-of-00004.safetensors",
284
+ "transformer.encoder.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
285
+ "transformer.encoder.layers.9.self_attention.dense.weight": "model-00002-of-00004.safetensors",
286
+ "transformer.encoder.layers.9.self_attention.query_key_value.bias": "model-00001-of-00004.safetensors",
287
+ "transformer.encoder.layers.9.self_attention.query_key_value.weight": "model-00001-of-00004.safetensors",
288
+ "transformer.output_layer.weight": "model-00004-of-00004.safetensors",
289
+ "transformer.rotary_pos_emb.inv_freq": "model-00001-of-00004.safetensors"
290
+ }
291
+ }
modeling_chatglm.py ADDED
@@ -0,0 +1,1215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch ChatGLM model. """
2
+ import json
3
+ import math
4
+ import copy
5
+ import warnings
6
+ import re
7
+ import sys
8
+
9
+ import torch
10
+ import torch.utils.checkpoint
11
+ import torch.nn.functional as F
12
+ from torch import nn
13
+ from torch.nn import CrossEntropyLoss, LayerNorm, MSELoss, BCEWithLogitsLoss
14
+ from torch.nn.utils import skip_init
15
+ from typing import Optional, Tuple, Union, List, Callable, Dict, Any
16
+ from copy import deepcopy
17
+
18
+ from transformers.modeling_outputs import (
19
+ BaseModelOutputWithPast,
20
+ CausalLMOutputWithPast,
21
+ SequenceClassifierOutputWithPast,
22
+ )
23
+ from transformers.modeling_utils import PreTrainedModel
24
+ from transformers.utils import logging
25
+ from transformers.generation.logits_process import LogitsProcessor
26
+ from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
27
+
28
+ from .configuration_chatglm import ChatGLMConfig
29
+
30
+ # flags required to enable jit fusion kernels
31
+
32
+ if sys.platform != 'darwin':
33
+ torch._C._jit_set_profiling_mode(False)
34
+ torch._C._jit_set_profiling_executor(False)
35
+ torch._C._jit_override_can_fuse_on_cpu(True)
36
+ torch._C._jit_override_can_fuse_on_gpu(True)
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+ _CHECKPOINT_FOR_DOC = "THUDM/ChatGLM"
41
+ _CONFIG_FOR_DOC = "ChatGLMConfig"
42
+
43
+ def default_init(cls, *args, **kwargs):
44
+ return cls(*args, **kwargs)
45
+
46
+
47
+ class InvalidScoreLogitsProcessor(LogitsProcessor):
48
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
49
+ if torch.isnan(scores).any() or torch.isinf(scores).any():
50
+ scores.zero_()
51
+ scores[..., 198] = 5e4
52
+ return scores
53
+
54
+
55
+ def split_tensor_along_last_dim(
56
+ tensor: torch.Tensor,
57
+ num_partitions: int,
58
+ contiguous_split_chunks: bool = False,
59
+ ) -> List[torch.Tensor]:
60
+ """Split a tensor along its last dimension.
61
+
62
+ Arguments:
63
+ tensor: input tensor.
64
+ num_partitions: number of partitions to split the tensor
65
+ contiguous_split_chunks: If True, make each chunk contiguous
66
+ in memory.
67
+
68
+ Returns:
69
+ A list of Tensors
70
+ """
71
+ # Get the size and dimension.
72
+ last_dim = tensor.dim() - 1
73
+ last_dim_size = tensor.size()[last_dim] // num_partitions
74
+ # Split.
75
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
76
+ # Note: torch.split does not create contiguous tensors by default.
77
+ if contiguous_split_chunks:
78
+ return tuple(chunk.contiguous() for chunk in tensor_list)
79
+
80
+ return tensor_list
81
+
82
+
83
+ class RotaryEmbedding(nn.Module):
84
+ def __init__(self, dim, rope_ratio=1, original_impl=False, device=None, dtype=None):
85
+ super().__init__()
86
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim))
87
+ self.register_buffer("inv_freq", inv_freq)
88
+ self.dim = dim
89
+ self.original_impl = original_impl
90
+ self.rope_ratio = rope_ratio
91
+
92
+ def forward_impl(
93
+ self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000
94
+ ):
95
+ """Enhanced Transformer with Rotary Position Embedding.
96
+
97
+ Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
98
+ transformers/rope/__init__.py. MIT License:
99
+ https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
100
+ """
101
+ # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
102
+ base = base * self.rope_ratio
103
+ theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem))
104
+
105
+ # Create position indexes `[0, 1, ..., seq_len - 1]`
106
+ seq_idx = torch.arange(seq_len, dtype=torch.float, device=device)
107
+
108
+ # Calculate the product of position index and $\theta_i$
109
+ idx_theta = torch.outer(seq_idx, theta).float()
110
+
111
+ cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)
112
+
113
+ # this is to mimic the behaviour of complex32, else we will get different results
114
+ if dtype in (torch.float16, torch.bfloat16, torch.int8):
115
+ cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half()
116
+ return cache
117
+
118
+ def forward(self, max_seq_len, offset=0):
119
+ return self.forward_impl(
120
+ max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device
121
+ )
122
+
123
+
124
+ @torch.jit.script
125
+ def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:
126
+ # x: [b, np, sq, hn]
127
+ b, np, sq, hn = x.size(0), x.size(1), x.size(2), x.size(3)
128
+ rot_dim = rope_cache.shape[-2] * 2
129
+ x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
130
+ # truncate to support variable sizes
131
+ rope_cache = rope_cache[:, :sq]
132
+ xshaped = x.reshape(b, np, sq, rot_dim // 2, 2)
133
+ rope_cache = rope_cache.view(-1, 1, sq, xshaped.size(3), 2)
134
+ x_out2 = torch.stack(
135
+ [
136
+ xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
137
+ xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
138
+ ],
139
+ -1,
140
+ )
141
+ x_out2 = x_out2.flatten(3)
142
+ return torch.cat((x_out2, x_pass), dim=-1)
143
+
144
+
145
+ class RMSNorm(torch.nn.Module):
146
+ def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):
147
+ super().__init__()
148
+ self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))
149
+ self.eps = eps
150
+
151
+ def forward(self, hidden_states: torch.Tensor):
152
+ input_dtype = hidden_states.dtype
153
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
154
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
155
+
156
+ return (self.weight * hidden_states).to(input_dtype)
157
+
158
+
159
+ class CoreAttention(torch.nn.Module):
160
+ def __init__(self, config: ChatGLMConfig, layer_number):
161
+ super(CoreAttention, self).__init__()
162
+
163
+ self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
164
+ self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
165
+ if self.apply_query_key_layer_scaling:
166
+ self.attention_softmax_in_fp32 = True
167
+ self.layer_number = max(1, layer_number)
168
+
169
+ projection_size = config.kv_channels * config.num_attention_heads
170
+
171
+ # Per attention head and per partition values.
172
+ self.hidden_size_per_partition = projection_size
173
+ self.hidden_size_per_attention_head = projection_size // config.num_attention_heads
174
+ self.num_attention_heads_per_partition = config.num_attention_heads
175
+
176
+ coeff = None
177
+ self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
178
+ if self.apply_query_key_layer_scaling:
179
+ coeff = self.layer_number
180
+ self.norm_factor *= coeff
181
+ self.coeff = coeff
182
+
183
+ self.attention_dropout = torch.nn.Dropout(config.attention_dropout)
184
+
185
+ def forward(self, query_layer, key_layer, value_layer, attention_mask):
186
+ pytorch_major_version = int(torch.__version__.split('.')[0])
187
+ if pytorch_major_version >= 2:
188
+ if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:
189
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
190
+ is_causal=True)
191
+ else:
192
+ if attention_mask is not None:
193
+ attention_mask = ~attention_mask
194
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
195
+ attention_mask)
196
+ context_layer = context_layer.transpose(1, 2).contiguous()
197
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
198
+ context_layer = context_layer.reshape(*new_context_layer_shape)
199
+ else:
200
+ # Raw attention scores
201
+
202
+ # [b, np, sq, sk]
203
+ output_size = (query_layer.size(0), query_layer.size(1), query_layer.size(2), key_layer.size(2))
204
+
205
+ # [b, np, sq, hn] -> [b * np, sq, hn]
206
+ query_layer = query_layer.view(output_size[0] * output_size[1], output_size[2], -1)
207
+ # [b, np, sk, hn] -> [b * np, sk, hn]
208
+ key_layer = key_layer.view(output_size[0] * output_size[1], output_size[3], -1)
209
+
210
+ # preallocting input tensor: [b * np, sq, sk]
211
+ matmul_input_buffer = torch.empty(
212
+ output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,
213
+ device=query_layer.device
214
+ )
215
+
216
+ # Raw attention scores. [b * np, sq, sk]
217
+ matmul_result = torch.baddbmm(
218
+ matmul_input_buffer,
219
+ query_layer, # [b * np, sq, hn]
220
+ key_layer.transpose(1, 2), # [b * np, hn, sk]
221
+ beta=0.0,
222
+ alpha=(1.0 / self.norm_factor),
223
+ )
224
+
225
+ # change view to [b, np, sq, sk]
226
+ attention_scores = matmul_result.view(*output_size)
227
+
228
+ # ===========================
229
+ # Attention probs and dropout
230
+ # ===========================
231
+
232
+ # attention scores and attention mask [b, np, sq, sk]
233
+ if self.attention_softmax_in_fp32:
234
+ attention_scores = attention_scores.float()
235
+ if self.coeff is not None:
236
+ attention_scores = attention_scores * self.coeff
237
+ if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:
238
+ attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],
239
+ device=attention_scores.device, dtype=torch.bool)
240
+ attention_mask.tril_()
241
+ attention_mask = ~attention_mask
242
+ if attention_mask is not None:
243
+ attention_scores = attention_scores.masked_fill(attention_mask, float("-inf"))
244
+ attention_probs = F.softmax(attention_scores, dim=-1)
245
+ attention_probs = attention_probs.type_as(value_layer)
246
+
247
+ # This is actually dropping out entire tokens to attend to, which might
248
+ # seem a bit unusual, but is taken from the original Transformer paper.
249
+ attention_probs = self.attention_dropout(attention_probs)
250
+
251
+ # query layer shape: [b * np, sq, hn]
252
+ # value layer shape: [b, np, sk, hn]
253
+ # attention shape: [b, np, sq, sk]
254
+ # context layer shape: [b, np, sq, hn]
255
+ output_size = (value_layer.size(0), value_layer.size(1), query_layer.size(1), value_layer.size(3))
256
+ # change view [b * np, sk, hn]
257
+ value_layer = value_layer.view(output_size[0] * output_size[1], value_layer.size(2), -1)
258
+ # change view [b * np, sq, sk]
259
+ attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
260
+ # matmul: [b * np, sq, hn]
261
+ context_layer = torch.bmm(attention_probs, value_layer)
262
+ # change view [b, np, sq, hn]
263
+ context_layer = context_layer.view(*output_size)
264
+ # [b, np, sq, hn] --> [b, sq, np, hn]
265
+ context_layer = context_layer.transpose(1, 2).contiguous()
266
+ # [b, sq, np, hn] --> [b, sq, hp]
267
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
268
+ context_layer = context_layer.reshape(*new_context_layer_shape)
269
+
270
+ return context_layer
271
+
272
+
273
+ class SelfAttention(torch.nn.Module):
274
+ """Parallel self-attention layer abstract class.
275
+
276
+ Self-attention layer takes input with size [s, b, h]
277
+ and returns output of the same size.
278
+ """
279
+
280
+ def __init__(self, config: ChatGLMConfig, layer_number, device=None):
281
+ super(SelfAttention, self).__init__()
282
+ self.layer_number = max(1, layer_number)
283
+
284
+ self.projection_size = config.kv_channels * config.num_attention_heads
285
+
286
+ # Per attention head and per partition values.
287
+ self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads
288
+ self.num_attention_heads_per_partition = config.num_attention_heads
289
+
290
+ self.multi_query_attention = config.multi_query_attention
291
+ self.qkv_hidden_size = 3 * self.projection_size
292
+ if self.multi_query_attention:
293
+ self.num_multi_query_groups_per_partition = config.multi_query_group_num
294
+ self.qkv_hidden_size = (
295
+ self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num
296
+ )
297
+ self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,
298
+ bias=config.add_bias_linear or config.add_qkv_bias,
299
+ device=device, **_config_to_kwargs(config)
300
+ )
301
+
302
+ self.core_attention = CoreAttention(config, self.layer_number)
303
+
304
+ # Output.
305
+ self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear,
306
+ device=device, **_config_to_kwargs(config)
307
+ )
308
+
309
+ def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):
310
+ if self.multi_query_attention:
311
+ num_attention_heads = self.num_multi_query_groups_per_partition
312
+ else:
313
+ num_attention_heads = self.num_attention_heads_per_partition
314
+ return torch.empty(
315
+ inference_max_sequence_len,
316
+ batch_size,
317
+ num_attention_heads,
318
+ self.hidden_size_per_attention_head,
319
+ dtype=dtype,
320
+ device=device,
321
+ )
322
+
323
+ def forward(
324
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True
325
+ ):
326
+ # hidden_states: [b, sq, h]
327
+
328
+ # =================================================
329
+ # Pre-allocate memory for key-values for inference.
330
+ # =================================================
331
+ # =====================
332
+ # Query, Key, and Value
333
+ # =====================
334
+
335
+ # Attention heads [b, sq, h] --> [b, sq, (np * 3 * hn)]
336
+ mixed_x_layer = self.query_key_value(hidden_states)
337
+
338
+ if self.multi_query_attention:
339
+ (query_layer, key_layer, value_layer) = mixed_x_layer.split(
340
+ [
341
+ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,
342
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
343
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
344
+ ],
345
+ dim=-1,
346
+ )
347
+ query_layer = query_layer.view(
348
+ query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
349
+ )
350
+ key_layer = key_layer.view(
351
+ key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
352
+ )
353
+ value_layer = value_layer.view(
354
+ value_layer.size()[:-1]
355
+ + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
356
+ )
357
+ else:
358
+ new_tensor_shape = mixed_x_layer.size()[:-1] + \
359
+ (self.num_attention_heads_per_partition,
360
+ 3 * self.hidden_size_per_attention_head)
361
+ mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
362
+
363
+ # [b, sq, np, 3 * hn] --> 3 [b, sq, np, hn]
364
+ (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
365
+
366
+ # [b, sq, np, hn] -> [b, np, sq, hn]
367
+ query_layer, key_layer, value_layer = [k.transpose(1, 2) for k in [query_layer, key_layer, value_layer]]
368
+
369
+ # apply relative positional encoding (rotary embedding)
370
+ if rotary_pos_emb is not None:
371
+ query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb)
372
+ key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb)
373
+
374
+ # adjust key and value for inference
375
+ if kv_cache is not None:
376
+ cache_k, cache_v = kv_cache
377
+ key_layer = torch.cat((cache_k, key_layer), dim=2)
378
+ value_layer = torch.cat((cache_v, value_layer), dim=2)
379
+ if use_cache:
380
+ if kv_cache is None:
381
+ kv_cache = torch.cat((key_layer.unsqueeze(0).unsqueeze(0), value_layer.unsqueeze(0).unsqueeze(0)), dim=1)
382
+ else:
383
+ kv_cache = (key_layer, value_layer)
384
+ else:
385
+ kv_cache = None
386
+
387
+ if self.multi_query_attention:
388
+ key_layer = key_layer.unsqueeze(2)
389
+ key_layer = key_layer.expand(
390
+ -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1
391
+ )
392
+ key_layer = key_layer.contiguous().view(
393
+ key_layer.size()[:1] + (self.num_attention_heads_per_partition,) + key_layer.size()[3:]
394
+ )
395
+ value_layer = value_layer.unsqueeze(2)
396
+ value_layer = value_layer.expand(
397
+ -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1
398
+ )
399
+ value_layer = value_layer.contiguous().view(
400
+ value_layer.size()[:1] + (self.num_attention_heads_per_partition,) + value_layer.size()[3:]
401
+ )
402
+
403
+ # ==================================
404
+ # core attention computation
405
+ # ==================================
406
+
407
+ context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask)
408
+
409
+ # =================
410
+ # Output. [sq, b, h]
411
+ # =================
412
+
413
+ output = self.dense(context_layer)
414
+
415
+ return output, kv_cache
416
+
417
+
418
+ def _config_to_kwargs(args):
419
+ common_kwargs = {
420
+ "dtype": args.torch_dtype,
421
+ }
422
+ return common_kwargs
423
+
424
+
425
+ class MLP(torch.nn.Module):
426
+ """MLP.
427
+
428
+ MLP will take the input with h hidden state, project it to 4*h
429
+ hidden dimension, perform nonlinear transformation, and project the
430
+ state back into h hidden dimension.
431
+ """
432
+
433
+ def __init__(self, config: ChatGLMConfig, device=None):
434
+ super(MLP, self).__init__()
435
+
436
+ self.add_bias = config.add_bias_linear
437
+
438
+ # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
439
+ self.dense_h_to_4h = nn.Linear(
440
+ config.hidden_size,
441
+ config.ffn_hidden_size * 2,
442
+ bias=self.add_bias,
443
+ device=device,
444
+ **_config_to_kwargs(config)
445
+ )
446
+
447
+ def swiglu(x):
448
+ x = torch.chunk(x, 2, dim=-1)
449
+ return F.silu(x[0]) * x[1]
450
+
451
+ self.activation_func = swiglu
452
+
453
+ # Project back to h.
454
+ self.dense_4h_to_h = nn.Linear(
455
+ config.ffn_hidden_size,
456
+ config.hidden_size,
457
+ bias=self.add_bias,
458
+ device=device,
459
+ **_config_to_kwargs(config)
460
+ )
461
+
462
+ def forward(self, hidden_states):
463
+ # [s, b, 4hp]
464
+ intermediate_parallel = self.dense_h_to_4h(hidden_states)
465
+ intermediate_parallel = self.activation_func(intermediate_parallel)
466
+ # [s, b, h]
467
+ output = self.dense_4h_to_h(intermediate_parallel)
468
+ return output
469
+
470
+
471
+ class GLMBlock(torch.nn.Module):
472
+ """A single transformer layer.
473
+
474
+ Transformer layer takes input with size [s, b, h] and returns an
475
+ output of the same size.
476
+ """
477
+
478
+ def __init__(self, config: ChatGLMConfig, layer_number, device=None):
479
+ super(GLMBlock, self).__init__()
480
+ self.layer_number = layer_number
481
+
482
+ self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
483
+
484
+ self.fp32_residual_connection = config.fp32_residual_connection
485
+
486
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
487
+ # Layernorm on the input data.
488
+ self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
489
+ dtype=config.torch_dtype)
490
+
491
+ # Self attention.
492
+ self.self_attention = SelfAttention(config, layer_number, device=device)
493
+ self.hidden_dropout = config.hidden_dropout
494
+
495
+ # Layernorm on the attention output
496
+ self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
497
+ dtype=config.torch_dtype)
498
+
499
+ # MLP
500
+ self.mlp = MLP(config, device=device)
501
+
502
+ def forward(
503
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,
504
+ ):
505
+ # hidden_states: [s, b, h]
506
+
507
+ # Layer norm at the beginning of the transformer layer.
508
+ layernorm_output = self.input_layernorm(hidden_states)
509
+ # Self attention.
510
+ attention_output, kv_cache = self.self_attention(
511
+ layernorm_output,
512
+ attention_mask,
513
+ rotary_pos_emb,
514
+ kv_cache=kv_cache,
515
+ use_cache=use_cache
516
+ )
517
+
518
+ # Residual connection.
519
+ if self.apply_residual_connection_post_layernorm:
520
+ residual = layernorm_output
521
+ else:
522
+ residual = hidden_states
523
+
524
+ layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)
525
+ layernorm_input = residual + layernorm_input
526
+
527
+ # Layer norm post the self attention.
528
+ layernorm_output = self.post_attention_layernorm(layernorm_input)
529
+
530
+ # MLP.
531
+ mlp_output = self.mlp(layernorm_output)
532
+
533
+ # Second residual connection.
534
+ if self.apply_residual_connection_post_layernorm:
535
+ residual = layernorm_output
536
+ else:
537
+ residual = layernorm_input
538
+
539
+ output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)
540
+ output = residual + output
541
+
542
+ return output, kv_cache
543
+
544
+
545
+ class GLMTransformer(torch.nn.Module):
546
+ """Transformer class."""
547
+
548
+ def __init__(self, config: ChatGLMConfig, device=None):
549
+ super(GLMTransformer, self).__init__()
550
+
551
+ self.fp32_residual_connection = config.fp32_residual_connection
552
+ self.post_layer_norm = config.post_layer_norm
553
+
554
+ # Number of layers.
555
+ self.num_layers = config.num_layers
556
+
557
+ # Transformer layers.
558
+ def build_layer(layer_number):
559
+ return GLMBlock(config, layer_number, device=device)
560
+
561
+ self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])
562
+
563
+ if self.post_layer_norm:
564
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
565
+ # Final layer norm before output.
566
+ self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
567
+ dtype=config.torch_dtype)
568
+
569
+ self.gradient_checkpointing = False
570
+
571
+ def _get_layer(self, layer_number):
572
+ return self.layers[layer_number]
573
+
574
+ def forward(
575
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,
576
+ use_cache: Optional[bool] = True,
577
+ output_hidden_states: Optional[bool] = False,
578
+ ):
579
+ if not kv_caches:
580
+ kv_caches = [None for _ in range(self.num_layers)]
581
+ presents = () if use_cache else None
582
+ if self.gradient_checkpointing and self.training:
583
+ if use_cache:
584
+ logger.warning_once(
585
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
586
+ )
587
+ use_cache = False
588
+
589
+ all_self_attentions = None
590
+ all_hidden_states = () if output_hidden_states else None
591
+ for index in range(self.num_layers):
592
+ if output_hidden_states:
593
+ all_hidden_states = all_hidden_states + (hidden_states,)
594
+
595
+ layer = self._get_layer(index)
596
+ if self.gradient_checkpointing and self.training:
597
+ layer_ret = torch.utils.checkpoint.checkpoint(
598
+ layer,
599
+ hidden_states,
600
+ attention_mask,
601
+ rotary_pos_emb,
602
+ kv_caches[index],
603
+ use_cache,
604
+ use_reentrant=False
605
+ )
606
+ else:
607
+ layer_ret = layer(
608
+ hidden_states,
609
+ attention_mask,
610
+ rotary_pos_emb,
611
+ kv_cache=kv_caches[index],
612
+ use_cache=use_cache
613
+ )
614
+ hidden_states, kv_cache = layer_ret
615
+ if use_cache:
616
+ # token by token decoding, use tuple format
617
+ if kv_caches[0] is not None:
618
+ presents = presents + (kv_cache,)
619
+ # prefilling in decoding, use tensor format to save cuda memory
620
+ else:
621
+ if len(presents) == 0:
622
+ presents = kv_cache
623
+ else:
624
+ presents = torch.cat((presents, kv_cache.to(presents.device)), dim=0)
625
+
626
+ if output_hidden_states:
627
+ all_hidden_states = all_hidden_states + (hidden_states,)
628
+
629
+ # Final layer norm.
630
+ if self.post_layer_norm:
631
+ hidden_states = self.final_layernorm(hidden_states)
632
+
633
+ return hidden_states, presents, all_hidden_states, all_self_attentions
634
+
635
+
636
+ class ChatGLMPreTrainedModel(PreTrainedModel):
637
+ """
638
+ An abstract class to handle weights initialization and
639
+ a simple interface for downloading and loading pretrained models.
640
+ """
641
+
642
+ is_parallelizable = False
643
+ supports_gradient_checkpointing = True
644
+ config_class = ChatGLMConfig
645
+ base_model_prefix = "transformer"
646
+ _no_split_modules = ["GLMBlock"]
647
+
648
+ def _init_weights(self, module: nn.Module):
649
+ """Initialize the weights."""
650
+ return
651
+
652
+ def get_masks(self, input_ids, past_key_values, padding_mask=None):
653
+ batch_size, seq_length = input_ids.shape
654
+ full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)
655
+ full_attention_mask.tril_()
656
+ past_length = 0
657
+ if past_key_values:
658
+ past_length = past_key_values[0][0].shape[2]
659
+ if past_length:
660
+ full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,
661
+ device=input_ids.device), full_attention_mask), dim=-1)
662
+ if padding_mask is not None:
663
+ full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
664
+ if not past_length and padding_mask is not None:
665
+ full_attention_mask -= padding_mask.unsqueeze(-1) - 1
666
+ full_attention_mask = (full_attention_mask < 0.5).bool()
667
+ full_attention_mask.unsqueeze_(1)
668
+ return full_attention_mask
669
+
670
+ def get_position_ids(self, input_ids, device):
671
+ batch_size, seq_length = input_ids.shape
672
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
673
+ return position_ids
674
+
675
+ def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):
676
+ if not self.supports_gradient_checkpointing:
677
+ raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
678
+
679
+
680
+ class Embedding(torch.nn.Module):
681
+ """Language model embeddings."""
682
+
683
+ def __init__(self, config: ChatGLMConfig, device=None):
684
+ super(Embedding, self).__init__()
685
+
686
+ self.hidden_size = config.hidden_size
687
+ # Word embeddings (parallel).
688
+ self.word_embeddings = nn.Embedding(
689
+ config.padded_vocab_size,
690
+ self.hidden_size,
691
+ dtype=config.torch_dtype,
692
+ device=device
693
+ )
694
+ self.fp32_residual_connection = config.fp32_residual_connection
695
+
696
+ def forward(self, input_ids):
697
+ # Embeddings.
698
+ words_embeddings = self.word_embeddings(input_ids)
699
+ embeddings = words_embeddings
700
+ # If the input flag for fp32 residual connection is set, convert for float.
701
+ if self.fp32_residual_connection:
702
+ embeddings = embeddings.float()
703
+ return embeddings
704
+
705
+
706
+ class ChatGLMModel(ChatGLMPreTrainedModel):
707
+ def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):
708
+ super().__init__(config)
709
+ if empty_init:
710
+ init_method = skip_init
711
+ else:
712
+ init_method = default_init
713
+ init_kwargs = {}
714
+ if device is not None:
715
+ init_kwargs["device"] = device
716
+ self.embedding = init_method(Embedding, config, **init_kwargs)
717
+ self.num_layers = config.num_layers
718
+ self.multi_query_group_num = config.multi_query_group_num
719
+ self.kv_channels = config.kv_channels
720
+
721
+ # Rotary positional embeddings
722
+ self.seq_length = config.seq_length
723
+ rotary_dim = (
724
+ config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels
725
+ )
726
+
727
+ self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, rope_ratio=config.rope_ratio, original_impl=config.original_rope,
728
+ device=device, dtype=config.torch_dtype)
729
+ self.encoder = init_method(GLMTransformer, config, **init_kwargs)
730
+ self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,
731
+ dtype=config.torch_dtype, **init_kwargs)
732
+
733
+ def get_input_embeddings(self):
734
+ return self.embedding.word_embeddings
735
+
736
+ def set_input_embeddings(self, value):
737
+ self.embedding.word_embeddings = value
738
+
739
+ def forward(
740
+ self,
741
+ input_ids,
742
+ position_ids: Optional[torch.Tensor] = None,
743
+ attention_mask: Optional[torch.BoolTensor] = None,
744
+ full_attention_mask: Optional[torch.BoolTensor] = None,
745
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
746
+ inputs_embeds: Optional[torch.Tensor] = None,
747
+ use_cache: Optional[bool] = None,
748
+ output_hidden_states: Optional[bool] = None,
749
+ return_dict: Optional[bool] = None,
750
+ ):
751
+ output_hidden_states = (
752
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
753
+ )
754
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
755
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
756
+
757
+ batch_size, seq_length = input_ids.shape
758
+
759
+ if inputs_embeds is None:
760
+ inputs_embeds = self.embedding(input_ids)
761
+
762
+ if full_attention_mask is None:
763
+ if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):
764
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)
765
+
766
+ # Rotary positional embeddings
767
+ rotary_pos_emb = self.rotary_pos_emb(self.seq_length)
768
+ if position_ids is not None:
769
+ rotary_pos_emb = rotary_pos_emb[position_ids]
770
+ else:
771
+ rotary_pos_emb = rotary_pos_emb[None, :seq_length]
772
+
773
+ # Run encoder.
774
+ hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
775
+ inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,
776
+ kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states
777
+ )
778
+ if presents is not None and type(presents) is torch.Tensor:
779
+ presents = presents.split(1, dim=0)
780
+ presents = list(presents)
781
+ presents = [list(x.squeeze(0).split(1, dim=0)) for x in presents]
782
+ presents = [tuple([x.squeeze(0) for x in y]) for y in presents]
783
+ presents = tuple(presents)
784
+
785
+ if not return_dict:
786
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
787
+
788
+ return BaseModelOutputWithPast(
789
+ last_hidden_state=hidden_states,
790
+ past_key_values=presents,
791
+ hidden_states=all_hidden_states,
792
+ attentions=all_self_attentions,
793
+ )
794
+
795
+
796
+ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
797
+ def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
798
+ super().__init__(config)
799
+
800
+ self.max_sequence_length = config.max_length
801
+ self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
802
+ self.config = config
803
+
804
+ def _update_model_kwargs_for_generation(
805
+ self,
806
+ outputs: ModelOutput,
807
+ model_kwargs: Dict[str, Any],
808
+ is_encoder_decoder: bool = False,
809
+ standardize_cache_format: bool = False,
810
+ ) -> Dict[str, Any]:
811
+ # update past_key_values
812
+ model_kwargs["past_key_values"] = self._extract_past_from_model_output(
813
+ outputs, standardize_cache_format=standardize_cache_format
814
+ )
815
+
816
+ # update attention mask
817
+ if "attention_mask" in model_kwargs:
818
+ attention_mask = model_kwargs["attention_mask"]
819
+ model_kwargs["attention_mask"] = torch.cat(
820
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
821
+ )
822
+
823
+ # update position ids
824
+ if "position_ids" in model_kwargs:
825
+ position_ids = model_kwargs["position_ids"]
826
+ new_position_id = position_ids[..., -1:].clone()
827
+ new_position_id += 1
828
+ model_kwargs["position_ids"] = torch.cat(
829
+ [position_ids, new_position_id], dim=-1
830
+ )
831
+
832
+ model_kwargs["is_first_forward"] = False
833
+ return model_kwargs
834
+
835
+ def prepare_inputs_for_generation(
836
+ self,
837
+ input_ids: torch.LongTensor,
838
+ past_key_values: Optional[torch.Tensor] = None,
839
+ attention_mask: Optional[torch.Tensor] = None,
840
+ position_ids: Optional[torch.Tensor] = None,
841
+ use_cache: Optional[bool] = None,
842
+ is_first_forward: bool = True,
843
+ **kwargs
844
+ ) -> dict:
845
+ # only last token for input_ids if past is not None
846
+ if position_ids is None:
847
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
848
+ if not is_first_forward:
849
+ if past_key_values is not None:
850
+ position_ids = position_ids[..., -1:]
851
+ input_ids = input_ids[:, -1:]
852
+ return {
853
+ "input_ids": input_ids,
854
+ "past_key_values": past_key_values,
855
+ "position_ids": position_ids,
856
+ "attention_mask": attention_mask,
857
+ "return_last_logit": True,
858
+ "use_cache": use_cache
859
+ }
860
+
861
+ def forward(
862
+ self,
863
+ input_ids: Optional[torch.Tensor] = None,
864
+ position_ids: Optional[torch.Tensor] = None,
865
+ attention_mask: Optional[torch.Tensor] = None,
866
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
867
+ inputs_embeds: Optional[torch.Tensor] = None,
868
+ labels: Optional[torch.Tensor] = None,
869
+ use_cache: Optional[bool] = None,
870
+ output_attentions: Optional[bool] = None,
871
+ output_hidden_states: Optional[bool] = None,
872
+ return_dict: Optional[bool] = None,
873
+ return_last_logit: Optional[bool] = False,
874
+ ):
875
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
876
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
877
+
878
+ transformer_outputs = self.transformer(
879
+ input_ids=input_ids,
880
+ position_ids=position_ids,
881
+ attention_mask=attention_mask,
882
+ past_key_values=past_key_values,
883
+ inputs_embeds=inputs_embeds,
884
+ use_cache=use_cache,
885
+ output_hidden_states=output_hidden_states,
886
+ return_dict=return_dict,
887
+ )
888
+
889
+ hidden_states = transformer_outputs[0]
890
+ if return_last_logit:
891
+ hidden_states = hidden_states[:, -1:]
892
+ lm_logits = self.transformer.output_layer(hidden_states)
893
+
894
+ loss = None
895
+ if labels is not None:
896
+ lm_logits = lm_logits.to(torch.float32)
897
+
898
+ # Shift so that tokens < n predict n
899
+ shift_logits = lm_logits[..., :-1, :].contiguous()
900
+ shift_labels = labels[..., 1:].contiguous()
901
+ # Flatten the tokens
902
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
903
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
904
+
905
+ lm_logits = lm_logits.to(hidden_states.dtype)
906
+ loss = loss.to(hidden_states.dtype)
907
+
908
+ if not return_dict:
909
+ output = (lm_logits,) + transformer_outputs[1:]
910
+ return ((loss,) + output) if loss is not None else output
911
+
912
+ return CausalLMOutputWithPast(
913
+ loss=loss,
914
+ logits=lm_logits,
915
+ past_key_values=transformer_outputs.past_key_values,
916
+ hidden_states=transformer_outputs.hidden_states,
917
+ attentions=transformer_outputs.attentions,
918
+ )
919
+
920
+ @staticmethod
921
+ def _reorder_cache(
922
+ past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
923
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
924
+ """
925
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
926
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
927
+ beam_idx at every generation step.
928
+
929
+ Output shares the same memory storage as `past`.
930
+ """
931
+ return tuple(
932
+ (
933
+ layer_past[0].index_select(0, beam_idx.to(layer_past[0].device)),
934
+ layer_past[1].index_select(0, beam_idx.to(layer_past[1].device)),
935
+ )
936
+ for layer_past in past
937
+ )
938
+
939
+ def process_response(self, output, history):
940
+ content = ""
941
+ history = deepcopy(history)
942
+ for response in output.split("<|assistant|>"):
943
+ if "\n" in response:
944
+ metadata, content = response.split("\n", maxsplit=1)
945
+ else:
946
+ metadata, content = "", response
947
+ if not metadata.strip():
948
+ content = content.strip()
949
+ history.append({"role": "assistant", "metadata": metadata, "content": content})
950
+ content = content.replace("[[训练时间]]", "2023年")
951
+ else:
952
+ history.append({"role": "assistant", "metadata": metadata, "content": content})
953
+ if history[0]["role"] == "system" and "tools" in history[0]:
954
+ parameters = json.loads(content)
955
+ content = {"name": metadata.strip(), "parameters": parameters}
956
+ else:
957
+ content = {"name": metadata.strip(), "content": content}
958
+ return content, history
959
+
960
+ @torch.inference_mode()
961
+ def chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = "user",
962
+ max_length: int = 8192, num_beams=1, do_sample=True, top_p=0.8, temperature=0.8, logits_processor=None,
963
+ **kwargs):
964
+ if history is None:
965
+ history = []
966
+ if logits_processor is None:
967
+ logits_processor = LogitsProcessorList()
968
+ logits_processor.append(InvalidScoreLogitsProcessor())
969
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
970
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
971
+ history.append({"role": role, "content": query})
972
+ inputs = tokenizer.apply_chat_template(history, add_generation_prompt=True, tokenize=True,
973
+ return_tensors="pt", return_dict=True)
974
+ inputs = inputs.to(self.device)
975
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|user|>"),
976
+ tokenizer.convert_tokens_to_ids("<|observation|>")]
977
+ outputs = self.generate(**inputs, **gen_kwargs, eos_token_id=eos_token_id)
978
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):-1]
979
+ response = tokenizer.decode(outputs)
980
+ response, history = self.process_response(response, history)
981
+ return response, history
982
+
983
+ @torch.inference_mode()
984
+ def stream_chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = "user",
985
+ past_key_values=None, max_length: int = 8192, do_sample=True, top_p=0.8, temperature=0.8,
986
+ logits_processor=None, return_past_key_values=False, **kwargs):
987
+ if history is None:
988
+ history = []
989
+ if logits_processor is None:
990
+ logits_processor = LogitsProcessorList()
991
+ logits_processor.append(InvalidScoreLogitsProcessor())
992
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids("<|user|>"),
993
+ tokenizer.convert_tokens_to_ids("<|observation|>")]
994
+ gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p,
995
+ "temperature": temperature, "logits_processor": logits_processor, **kwargs}
996
+ if past_key_values is None:
997
+ inputs = tokenizer.apply_chat_template(history + [{"role": role, "content": query}],
998
+ add_generation_prompt=True, tokenize=True, return_tensors="pt",
999
+ return_dict=True)
1000
+ else:
1001
+ inputs = tokenizer.apply_chat_template([{"role": role, "content": query}], add_special_tokens=False,
1002
+ add_generation_prompt=True, tokenize=True, return_tensors="pt",
1003
+ return_dict=True)
1004
+ inputs = inputs.to(self.device)
1005
+ if past_key_values is not None:
1006
+ past_length = past_key_values[0][0].shape[2]
1007
+ inputs.position_ids += past_length
1008
+ attention_mask = inputs.attention_mask
1009
+ attention_mask = torch.cat((attention_mask.new_ones(1, past_length), attention_mask), dim=1)
1010
+ inputs['attention_mask'] = attention_mask
1011
+ history.append({"role": role, "content": query})
1012
+ for outputs in self.stream_generate(**inputs, past_key_values=past_key_values,
1013
+ eos_token_id=eos_token_id, return_past_key_values=return_past_key_values,
1014
+ **gen_kwargs):
1015
+ if return_past_key_values:
1016
+ outputs, past_key_values = outputs
1017
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):-1]
1018
+ response = tokenizer.decode(outputs)
1019
+ if response and response[-1] != "�":
1020
+ response, new_history = self.process_response(response, history)
1021
+ if return_past_key_values:
1022
+ yield response, new_history, past_key_values
1023
+ else:
1024
+ yield response, new_history
1025
+
1026
+ @torch.inference_mode()
1027
+ def stream_generate(
1028
+ self,
1029
+ input_ids,
1030
+ generation_config: Optional[GenerationConfig] = None,
1031
+ logits_processor: Optional[LogitsProcessorList] = None,
1032
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1033
+ prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
1034
+ return_past_key_values=False,
1035
+ **kwargs,
1036
+ ):
1037
+ batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1]
1038
+
1039
+ if generation_config is None:
1040
+ generation_config = self.generation_config
1041
+ generation_config = copy.deepcopy(generation_config)
1042
+ model_kwargs = generation_config.update(**kwargs)
1043
+ model_kwargs["use_cache"] = generation_config.use_cache
1044
+ bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id
1045
+
1046
+ if isinstance(eos_token_id, int):
1047
+ eos_token_id = [eos_token_id]
1048
+ eos_token_id_tensor = torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
1049
+
1050
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
1051
+ if has_default_max_length and generation_config.max_new_tokens is None:
1052
+ warnings.warn(
1053
+ f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
1054
+ "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
1055
+ " recommend using `max_new_tokens` to control the maximum length of the generation.",
1056
+ UserWarning,
1057
+ )
1058
+ elif generation_config.max_new_tokens is not None:
1059
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
1060
+ if not has_default_max_length:
1061
+ logger.warn(
1062
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
1063
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
1064
+ "Please refer to the documentation for more information. "
1065
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
1066
+ UserWarning,
1067
+ )
1068
+
1069
+ if input_ids_seq_length >= generation_config.max_length:
1070
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
1071
+ logger.warning(
1072
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
1073
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
1074
+ " increasing `max_new_tokens`."
1075
+ )
1076
+
1077
+ # 2. Set generation parameters if not already defined
1078
+ logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
1079
+ stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
1080
+
1081
+ logits_processor = self._get_logits_processor(
1082
+ generation_config=generation_config,
1083
+ input_ids_seq_length=input_ids_seq_length,
1084
+ encoder_input_ids=input_ids,
1085
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1086
+ logits_processor=logits_processor,
1087
+ )
1088
+
1089
+ stopping_criteria = self._get_stopping_criteria(
1090
+ generation_config=generation_config, stopping_criteria=stopping_criteria
1091
+ )
1092
+ logits_warper = self._get_logits_warper(generation_config)
1093
+
1094
+ unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1)
1095
+ scores = None
1096
+ while True:
1097
+ model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs)
1098
+ # forward pass to get next token
1099
+ outputs = self(
1100
+ **model_inputs,
1101
+ return_dict=True,
1102
+ output_attentions=False,
1103
+ output_hidden_states=False,
1104
+ )
1105
+
1106
+ next_token_logits = outputs.logits[:, -1, :]
1107
+
1108
+ # pre-process distribution
1109
+ next_token_scores = logits_processor(input_ids, next_token_logits)
1110
+ next_token_scores = logits_warper(input_ids, next_token_scores)
1111
+
1112
+ # sample
1113
+ probs = nn.functional.softmax(next_token_scores, dim=-1)
1114
+ if generation_config.do_sample:
1115
+ next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1)
1116
+ else:
1117
+ next_tokens = torch.argmax(probs, dim=-1)
1118
+ # update generated ids, model inputs, and length for next step
1119
+ input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1)
1120
+ model_kwargs = self._update_model_kwargs_for_generation(
1121
+ outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
1122
+ )
1123
+ unfinished_sequences = unfinished_sequences.mul(
1124
+ next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
1125
+ )
1126
+ if return_past_key_values:
1127
+ yield input_ids, outputs.past_key_values
1128
+ else:
1129
+ yield input_ids
1130
+ # stop when each sentence is finished, or if we exceed the maximum length
1131
+ if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores):
1132
+ break
1133
+
1134
+
1135
+ class ChatGLMForSequenceClassification(ChatGLMPreTrainedModel):
1136
+ def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
1137
+ super().__init__(config)
1138
+
1139
+ self.num_labels = config.num_labels
1140
+ self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
1141
+
1142
+ self.classifier_head = nn.Linear(config.hidden_size, config.num_labels, bias=True, dtype=torch.half)
1143
+ if config.classifier_dropout is not None:
1144
+ self.dropout = nn.Dropout(config.classifier_dropout)
1145
+ else:
1146
+ self.dropout = None
1147
+ self.config = config
1148
+
1149
+ def forward(
1150
+ self,
1151
+ input_ids: Optional[torch.LongTensor] = None,
1152
+ position_ids: Optional[torch.LongTensor] = None,
1153
+ attention_mask: Optional[torch.Tensor] = None,
1154
+ full_attention_mask: Optional[torch.Tensor] = None,
1155
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1156
+ inputs_embeds: Optional[torch.LongTensor] = None,
1157
+ labels: Optional[torch.LongTensor] = None,
1158
+ use_cache: Optional[bool] = None,
1159
+ output_hidden_states: Optional[bool] = None,
1160
+ return_dict: Optional[bool] = None,
1161
+ ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutputWithPast]:
1162
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1163
+
1164
+ transformer_outputs = self.transformer(
1165
+ input_ids=input_ids,
1166
+ position_ids=position_ids,
1167
+ attention_mask=attention_mask,
1168
+ full_attention_mask=full_attention_mask,
1169
+ past_key_values=past_key_values,
1170
+ inputs_embeds=inputs_embeds,
1171
+ use_cache=use_cache,
1172
+ output_hidden_states=output_hidden_states,
1173
+ return_dict=return_dict,
1174
+ )
1175
+
1176
+ hidden_states = transformer_outputs[0]
1177
+ pooled_hidden_states = hidden_states[-1]
1178
+ if self.dropout is not None:
1179
+ pooled_hidden_states = self.dropout(pooled_hidden_states)
1180
+ logits = self.classifier_head(pooled_hidden_states)
1181
+
1182
+ loss = None
1183
+ if labels is not None:
1184
+ if self.config.problem_type is None:
1185
+ if self.num_labels == 1:
1186
+ self.config.problem_type = "regression"
1187
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1188
+ self.config.problem_type = "single_label_classification"
1189
+ else:
1190
+ self.config.problem_type = "multi_label_classification"
1191
+
1192
+ if self.config.problem_type == "regression":
1193
+ loss_fct = MSELoss()
1194
+ if self.num_labels == 1:
1195
+ loss = loss_fct(logits.squeeze().float(), labels.squeeze())
1196
+ else:
1197
+ loss = loss_fct(logits.float(), labels)
1198
+ elif self.config.problem_type == "single_label_classification":
1199
+ loss_fct = CrossEntropyLoss()
1200
+ loss = loss_fct(logits.view(-1, self.num_labels).float(), labels.view(-1))
1201
+ elif self.config.problem_type == "multi_label_classification":
1202
+ loss_fct = BCEWithLogitsLoss()
1203
+ loss = loss_fct(logits.float(), labels.view(-1, self.num_labels))
1204
+
1205
+ if not return_dict:
1206
+ output = (logits,) + transformer_outputs[1:]
1207
+ return ((loss,) + output) if loss is not None else output
1208
+
1209
+ return SequenceClassifierOutputWithPast(
1210
+ loss=loss,
1211
+ logits=logits,
1212
+ past_key_values=transformer_outputs.past_key_values,
1213
+ hidden_states=transformer_outputs.hidden_states,
1214
+ attentions=transformer_outputs.attentions,
1215
+ )
special_tokens_map.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|endoftext|>",
4
+ "[MASK]",
5
+ "[gMASK]",
6
+ "[sMASK]",
7
+ "<sop>",
8
+ "<eop>",
9
+ "<|system|>",
10
+ "<|user|>",
11
+ "<|assistant|>",
12
+ "<|observation|>",
13
+ "<|begin_of_image|>",
14
+ "<|end_of_image|>",
15
+ "<|begin_of_video|>",
16
+ "<|end_of_video|>"
17
+ ],
18
+ "eos_token": {
19
+ "content": "<|endoftext|>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "pad_token": {
26
+ "content": "<|endoftext|>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ }
32
+ }
tokenization_chatglm.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import regex as re
2
+ import base64
3
+ import os
4
+ import json
5
+ import tiktoken
6
+ from torch import TensorType
7
+ from typing import List, Optional, Union, Dict, Any
8
+ from transformers import PreTrainedTokenizer
9
+ from transformers.utils import logging, PaddingStrategy
10
+ from transformers.tokenization_utils_base import EncodedInput, BatchEncoding
11
+
12
+
13
+ class ChatGLM4Tokenizer(PreTrainedTokenizer):
14
+ vocab_files_names = {"vocab_file": "tokenizer.model"}
15
+ model_input_names = ["input_ids", "attention_mask", "position_ids"]
16
+
17
+ def __init__(
18
+ self,
19
+ vocab_file,
20
+ padding_side="left",
21
+ clean_up_tokenization_spaces=False,
22
+ encode_special_tokens=False,
23
+ **kwargs
24
+ ):
25
+ self.name = "GLM4Tokenizer"
26
+ self.vocab_file = vocab_file
27
+ pat_str = "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
28
+ self.pat_str = re.compile(pat_str)
29
+ self.encode_special_tokens = encode_special_tokens
30
+
31
+ mergeable_ranks = {}
32
+ with open(vocab_file) as f:
33
+ for line in f:
34
+ token, rank = line.strip().split()
35
+ rank = int(rank)
36
+ token = base64.b64decode(token)
37
+ mergeable_ranks[token] = rank
38
+
39
+ self.mergeable_ranks = mergeable_ranks
40
+
41
+ self.tokenizer = tiktoken.Encoding(
42
+ name="my_tokenizer",
43
+ pat_str=pat_str,
44
+ mergeable_ranks=mergeable_ranks,
45
+ special_tokens={}
46
+ )
47
+ self.decoder = {rank: token for token, rank in mergeable_ranks.items()}
48
+ self.n_words = len(self.decoder)
49
+
50
+ super().__init__(
51
+ padding_side=padding_side,
52
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
53
+ **kwargs
54
+ )
55
+
56
+ @property
57
+ def vocab_size(self):
58
+ return self.n_words
59
+
60
+ def get_vocab(self):
61
+ """ Returns vocab as a dict """
62
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
63
+ vocab.update(self.added_tokens_encoder)
64
+ return vocab
65
+
66
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
67
+ """
68
+ Converts a sequence of tokens in a single string.
69
+ """
70
+ text = ""
71
+ temp = b""
72
+ for t in tokens:
73
+ if isinstance(t, str):
74
+ if temp:
75
+ text += temp.decode("utf-8", errors="replace")
76
+ temp = b""
77
+ text += t
78
+ elif isinstance(t, bytes):
79
+ temp += t
80
+ else:
81
+ raise TypeError("token should only be of type types or str")
82
+ if temp:
83
+ text += temp.decode("utf-8", errors="replace")
84
+ return text
85
+
86
+ def _tokenize(self, text, **kwargs):
87
+ tokens = []
88
+ ids = self.tokenizer.encode(text)
89
+ for t in ids:
90
+ tokens.append(self.decoder[t])
91
+ return tokens
92
+
93
+ def _convert_token_to_id(self, token):
94
+ """ Converts a token (str) in an id using the vocab. """
95
+ return self.mergeable_ranks[token]
96
+
97
+ def _convert_id_to_token(self, index):
98
+ """Converts an index (integer) in a token (str) using the vocab."""
99
+ return self.decoder.get(index, "")
100
+
101
+ def save_vocabulary(self, save_directory, filename_prefix=None):
102
+ """
103
+ Save the vocabulary and special tokens file to a directory.
104
+
105
+ Args:
106
+ save_directory (`str`):
107
+ The directory in which to save the vocabulary.
108
+ filename_prefix (`str`, *optional*):
109
+ An optional prefix to add to the named of the saved files.
110
+
111
+ Returns:
112
+ `Tuple(str)`: Paths to the files saved.
113
+ """
114
+ if os.path.isdir(save_directory):
115
+ vocab_file = os.path.join(
116
+ save_directory, self.vocab_files_names["vocab_file"]
117
+ )
118
+ else:
119
+ vocab_file = save_directory
120
+
121
+ with open(self.vocab_file, 'rb') as fin:
122
+ proto_str = fin.read()
123
+
124
+ with open(vocab_file, "wb") as writer:
125
+ writer.write(proto_str)
126
+
127
+ return (vocab_file,)
128
+
129
+ def get_prefix_tokens(self):
130
+ prefix_tokens = [self.convert_tokens_to_ids("[gMASK]"), self.convert_tokens_to_ids("<sop>")]
131
+ return prefix_tokens
132
+
133
+ def build_single_message(self, role, metadata, message, tokenize=True):
134
+ assert role in ["system", "user", "assistant", "observation"], role
135
+ if tokenize:
136
+ role_tokens = [self.convert_tokens_to_ids(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n",
137
+ disallowed_special=())
138
+ message_tokens = self.tokenizer.encode(message, disallowed_special=())
139
+ tokens = role_tokens + message_tokens
140
+ return tokens
141
+ else:
142
+ return str(f"<|{role}|>{metadata}\n{message}")
143
+
144
+ def apply_chat_template(
145
+ self,
146
+ conversation: Union[List[Dict[str, str]], List[List[Dict[str, str]]], "Conversation"],
147
+ add_generation_prompt: bool = False,
148
+ tokenize: bool = True,
149
+ padding: bool = False,
150
+ truncation: bool = False,
151
+ max_length: Optional[int] = None,
152
+ return_tensors: Optional[Union[str, TensorType]] = None,
153
+ return_dict: bool = False,
154
+ tokenizer_kwargs: Optional[Dict[str, Any]] = None,
155
+ add_special_tokens: bool = True,
156
+ **kwargs,
157
+ ) -> Union[str, List[int], List[str], List[List[int]], BatchEncoding]:
158
+
159
+ if return_dict and not tokenize:
160
+ raise ValueError(
161
+ "`return_dict=True` is incompatible with `tokenize=False`, because there is no dict "
162
+ "of tokenizer outputs to return."
163
+ )
164
+
165
+ def handle_single_conversation(conversation):
166
+ input_ids = self.get_prefix_tokens() if add_special_tokens else []
167
+ input_message = "[gMASK]<sop>" if add_special_tokens else ""
168
+ for item in conversation:
169
+ if item.get("tools"):
170
+ tools = item["tools"]
171
+ content = "你是一个名为 GLM-4 的人工智能助手。你是基于智谱AI训练的语言模型 GLM-4 模型开发的,你的任务是针对用户的问题和要求提供适当的答复和支持。"
172
+ for tool in tools:
173
+ if tool["type"] == "function":
174
+ function = tool["function"]
175
+ content += f"\n\n## {function['name']}\n\n{json.dumps(function, ensure_ascii=False, indent=4)}"
176
+ content += "\n在调用上述函数时,请使用 Json 格式表示调用的参数。"
177
+ elif tool["type"] == "python":
178
+ content += "\n\n## python\n\n当你向 `python` 发送包含 Python 代码的消息时,该代码将会在一个有状态的 Jupyter notebook 环境中执行。\n`python` 返回代码执行的输出,或在执行 60 秒后返回超时。\n`/mnt/data` 将会持久化存储你的文件。在此会话中,`python` 无法访问互联网。不要使用 `python` 进行任何网络请求或者在线 API 调用,这些在线内容的访问将不会成功。"
179
+ elif tool["type"] == "simple_browser":
180
+ content += "\n\n## simple_browser\n\n你可以使用 `simple_browser` 工具。该工具支持以下函数:\n`search(query: str, recency_days: int)`:使用搜索引擎进行查询并显示结果,可以使用 `recency_days` 参数控制搜索内容的时效性。\n`mclick(ids: list[int])`:获取一系列指定 id 的页面内容。每次调用时,须选择3-10个页面。选择多个角度的页面,同时尽可能选择可信任的信息来源。考虑到部分页面是无法加载的,你也可以多打开一些可能有用的页面而不用担心内容过多。\n`open_url(url: str)`:打开指定的 URL。\n\n使用 `【{引用 id}†{引用文本}】` 来引用内容。\n\n操作步骤:1. 使用 `search` 来获得信息列表; 2. 使用 `mclick` 来获取指定 ID 页面的内容; 3. 根据获得的内容进行回复。在回复中应当引用信息来源。\n 如果用户提供了 URL,也可以用 `open_url` 直接打开页面。\n如果初次搜索结果没有找到合适的信息,也可以再次使用 `search` 进行搜索。"
181
+ elif tool["type"] == "cogview":
182
+ content += "\n\n## cogview\n\n如果用户的请求中包含了对图像的描述,你可以使用 `cogview` 来生成图像并展示给用户。你需要向 `cogview` 发送图像描述,规则:\n- 发送给 `cogview` 的消息必须使用英语。用户的中文描述必须完全翻译为英语。\n- 应当尽可能详细地描述图像生成的需求,需求描述约 100 英文单词。\n- 保持用户原始描述的意图。不要虚构内容或者没见过的人物。\n- 如无特殊说明,所在地为中国,持有中国立场并遵循中国社会主义价值观。"
183
+ else:
184
+ raise NotImplementedError(f"Unknown tool type {tool['type']}")
185
+ input = self.build_single_message("system", "", content, tokenize=tokenize)
186
+ if tokenize:
187
+ input_ids.extend(input)
188
+ else:
189
+ input_message += input
190
+ if item["content"]:
191
+ input = self.build_single_message(
192
+ item["role"],
193
+ item.get("metadata", ""),
194
+ item["content"],
195
+ tokenize=tokenize
196
+ )
197
+ if tokenize:
198
+ input_ids.extend(input)
199
+ else:
200
+ input_message += input
201
+ if add_generation_prompt:
202
+ if tokenize:
203
+ input_ids.extend([self.convert_tokens_to_ids("<|assistant|>")])
204
+ else:
205
+ input_message += "<|assistant|>"
206
+
207
+ return input_ids if tokenize else input_message
208
+
209
+ # Main logic to handle different conversation formats
210
+ if isinstance(conversation, list) and all(isinstance(i, dict) for i in conversation):
211
+ result = handle_single_conversation(conversation)
212
+ elif isinstance(conversation, list) and all(isinstance(i, list) for i in conversation):
213
+ result = [handle_single_conversation(c) for c in conversation]
214
+ elif hasattr(conversation, "messages"):
215
+ result = handle_single_conversation(conversation.messages)
216
+ else:
217
+ raise ValueError("Invalid conversation format")
218
+
219
+ if tokenize:
220
+ output = self.batch_encode_plus(
221
+ [result] if isinstance(result[0], int) else result,
222
+ padding=padding,
223
+ truncation=truncation,
224
+ max_length=max_length,
225
+ return_tensors=return_tensors,
226
+ is_split_into_words=True,
227
+ add_special_tokens=False
228
+ )
229
+ if return_dict:
230
+ return output
231
+ else:
232
+ return output["input_ids"]
233
+ else:
234
+ return result
235
+
236
+
237
+ def build_inputs_with_special_tokens(
238
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
239
+ ) -> List[int]:
240
+ """
241
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
242
+ adding special tokens. A BERT sequence has the following format:
243
+
244
+ - single sequence: `[CLS] X [SEP]`
245
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
246
+
247
+ Args:
248
+ token_ids_0 (`List[int]`):
249
+ List of IDs to which the special tokens will be added.
250
+ token_ids_1 (`List[int]`, *optional*):
251
+ Optional second list of IDs for sequence pairs.
252
+
253
+ Returns:
254
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
255
+ """
256
+ prefix_tokens = self.get_prefix_tokens()
257
+ token_ids_0 = prefix_tokens + token_ids_0
258
+ if token_ids_1 is not None:
259
+ token_ids_0 = token_ids_0 + token_ids_1 + [self.convert_tokens_to_ids("<eos>")]
260
+ return token_ids_0
261
+
262
+ def _pad(
263
+ self,
264
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
265
+ max_length: Optional[int] = None,
266
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
267
+ pad_to_multiple_of: Optional[int] = None,
268
+ return_attention_mask: Optional[bool] = None,
269
+ ) -> dict:
270
+ """
271
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
272
+
273
+ Args:
274
+ encoded_inputs:
275
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
276
+ max_length: maximum length of the returned list and optionally padding length (see below).
277
+ Will truncate by taking into account the special tokens.
278
+ padding_strategy: PaddingStrategy to use for padding.
279
+
280
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
281
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
282
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
283
+ The tokenizer padding sides are defined in self.padding_side:
284
+
285
+ - 'left': pads on the left of the sequences
286
+ - 'right': pads on the right of the sequences
287
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
288
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
289
+ `>= 7.5` (Volta).
290
+ return_attention_mask:
291
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
292
+ """
293
+ # Load from model defaults
294
+ assert self.padding_side == "left"
295
+
296
+ required_input = encoded_inputs[self.model_input_names[0]]
297
+ seq_length = len(required_input)
298
+
299
+ if padding_strategy == PaddingStrategy.LONGEST:
300
+ max_length = len(required_input)
301
+
302
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
303
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
304
+
305
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
306
+
307
+ # Initialize attention mask if not present.
308
+ if "attention_mask" not in encoded_inputs:
309
+ encoded_inputs["attention_mask"] = [1] * seq_length
310
+
311
+ if "position_ids" not in encoded_inputs:
312
+ encoded_inputs["position_ids"] = list(range(seq_length))
313
+
314
+ if needs_to_be_padded:
315
+ difference = max_length - len(required_input)
316
+
317
+ if "attention_mask" in encoded_inputs:
318
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
319
+ if "position_ids" in encoded_inputs:
320
+ encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"]
321
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
322
+
323
+ return encoded_inputs
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a493598071550244b2ee7f26118f3edec2150b9dfa967929a99052ac83fe716
3
+ size 2623634
tokenizer_config.json ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "151329": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "151330": {
12
+ "content": "[MASK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "151331": {
20
+ "content": "[gMASK]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "151332": {
28
+ "content": "[sMASK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "151333": {
36
+ "content": "<sop>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "151334": {
44
+ "content": "<eop>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "151335": {
52
+ "content": "<|system|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "151336": {
60
+ "content": "<|user|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "151337": {
68
+ "content": "<|assistant|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "151338": {
76
+ "content": "<|observation|>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "151339": {
84
+ "content": "<|begin_of_image|>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "151340": {
92
+ "content": "<|end_of_image|>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "151341": {
100
+ "content": "<|begin_of_video|>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "151342": {
108
+ "content": "<|end_of_video|>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ }
115
+ },
116
+ "additional_special_tokens": [
117
+ "<|endoftext|>",
118
+ "[MASK]",
119
+ "[gMASK]",
120
+ "[sMASK]",
121
+ "<sop>",
122
+ "<eop>",
123
+ "<|system|>",
124
+ "<|user|>",
125
+ "<|assistant|>",
126
+ "<|observation|>",
127
+ "<|begin_of_image|>",
128
+ "<|end_of_image|>",
129
+ "<|begin_of_video|>",
130
+ "<|end_of_video|>"
131
+ ],
132
+ "auto_map": {
133
+ "AutoTokenizer": [
134
+ "tokenization_chatglm.ChatGLM4Tokenizer",
135
+ null
136
+ ]
137
+ },
138
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message + '\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'Human: ' + content + '\nAssistant: ' }}{% elif message['role'] == 'assistant' %}{{ content + '<|endoftext|>' + '\n' }}{% endif %}{% endfor %}",
139
+ "clean_up_tokenization_spaces": false,
140
+ "do_lower_case": false,
141
+ "eos_token": "<|endoftext|>",
142
+ "model_max_length": 1024000,
143
+ "pad_token": "<|endoftext|>",
144
+ "padding_side": "left",
145
+ "remove_space": false,
146
+ "split_special_tokens": false,
147
+ "tokenizer_class": "ChatGLM4Tokenizer"
148
+ }