Merge branch 'main' of https://huggingface.co/THUDM/glm-4-9b-chat
Browse files- LICENSE +1 -1
- README.md +7 -7
- modeling_chatglm.py +2 -2
- tokenization_chatglm.py +4 -4
LICENSE
CHANGED
@@ -45,7 +45,7 @@ The glm-4-9b License
|
|
45 |
|
46 |
2. License
|
47 |
|
48 |
-
|
49 |
This license allows you to use all open source models in this repository for free for academic research. For users who wish to use the models for commercial purposes, please do so [here](https://open.bigmodel.cn/mla/form)
|
50 |
Complete registration. Registered users are free to use this model for commercial activities, but must comply with all terms and conditions of this license.
|
51 |
The copyright notice and this license notice shall be included in all copies or substantial portions of the Software.
|
|
|
45 |
|
46 |
2. License
|
47 |
|
48 |
+
Under the terms and conditions of this license, the Licensor hereby grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty-free copyright license.
|
49 |
This license allows you to use all open source models in this repository for free for academic research. For users who wish to use the models for commercial purposes, please do so [here](https://open.bigmodel.cn/mla/form)
|
50 |
Complete registration. Registered users are free to use this model for commercial activities, but must comply with all terms and conditions of this license.
|
51 |
The copyright notice and this license notice shall be included in all copies or substantial portions of the Software.
|
README.md
CHANGED
@@ -2,15 +2,15 @@
|
|
2 |
license: other
|
3 |
license_name: glm-4
|
4 |
license_link: https://huggingface.co/THUDM/glm-4-9b-chat/blob/main/LICENSE
|
5 |
-
|
6 |
language:
|
7 |
-
|
8 |
-
|
9 |
tags:
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
inference: false
|
|
|
14 |
---
|
15 |
|
16 |
# GLM-4-9B-Chat
|
@@ -168,4 +168,4 @@ GLM-4 模型的权重的使用则需要遵循 [LICENSE](LICENSE)。
|
|
168 |
pages={320--335},
|
169 |
year={2022}
|
170 |
}
|
171 |
-
```
|
|
|
2 |
license: other
|
3 |
license_name: glm-4
|
4 |
license_link: https://huggingface.co/THUDM/glm-4-9b-chat/blob/main/LICENSE
|
|
|
5 |
language:
|
6 |
+
- zh
|
7 |
+
- en
|
8 |
tags:
|
9 |
+
- glm
|
10 |
+
- chatglm
|
11 |
+
- thudm
|
12 |
inference: false
|
13 |
+
pipeline_tag: text-generation
|
14 |
---
|
15 |
|
16 |
# GLM-4-9B-Chat
|
|
|
168 |
pages={320--335},
|
169 |
year={2022}
|
170 |
}
|
171 |
+
```
|
modeling_chatglm.py
CHANGED
@@ -21,7 +21,7 @@ from transformers.modeling_outputs import (
|
|
21 |
SequenceClassifierOutputWithPast,
|
22 |
)
|
23 |
from transformers.modeling_utils import PreTrainedModel
|
24 |
-
from transformers.utils import logging
|
25 |
from transformers.generation.logits_process import LogitsProcessor
|
26 |
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
|
27 |
|
@@ -29,7 +29,7 @@ from .configuration_chatglm import ChatGLMConfig
|
|
29 |
|
30 |
# flags required to enable jit fusion kernels
|
31 |
|
32 |
-
if sys.platform != 'darwin':
|
33 |
torch._C._jit_set_profiling_mode(False)
|
34 |
torch._C._jit_set_profiling_executor(False)
|
35 |
torch._C._jit_override_can_fuse_on_cpu(True)
|
|
|
21 |
SequenceClassifierOutputWithPast,
|
22 |
)
|
23 |
from transformers.modeling_utils import PreTrainedModel
|
24 |
+
from transformers.utils import logging, is_torch_npu_available
|
25 |
from transformers.generation.logits_process import LogitsProcessor
|
26 |
from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
|
27 |
|
|
|
29 |
|
30 |
# flags required to enable jit fusion kernels
|
31 |
|
32 |
+
if sys.platform != 'darwin' and not is_torch_npu_available():
|
33 |
torch._C._jit_set_profiling_mode(False)
|
34 |
torch._C._jit_set_profiling_executor(False)
|
35 |
torch._C._jit_override_can_fuse_on_cpu(True)
|
tokenization_chatglm.py
CHANGED
@@ -63,22 +63,22 @@ class ChatGLM4Tokenizer(PreTrainedTokenizer):
|
|
63 |
vocab.update(self.added_tokens_encoder)
|
64 |
return vocab
|
65 |
|
66 |
-
def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
|
67 |
"""
|
68 |
Converts a sequence of tokens in a single string.
|
69 |
"""
|
70 |
text = ""
|
71 |
temp = b""
|
72 |
for t in tokens:
|
|
|
|
|
73 |
if isinstance(t, str):
|
74 |
if temp:
|
75 |
text += temp.decode("utf-8", errors="replace")
|
76 |
-
temp = b""
|
77 |
-
text += t
|
78 |
elif isinstance(t, bytes):
|
79 |
temp += t
|
80 |
else:
|
81 |
-
raise TypeError("token should only be of type
|
82 |
if temp:
|
83 |
text += temp.decode("utf-8", errors="replace")
|
84 |
return text
|
|
|
63 |
vocab.update(self.added_tokens_encoder)
|
64 |
return vocab
|
65 |
|
66 |
+
def convert_tokens_to_string(self, tokens: List[Union[bytes, str, int]]) -> str:
|
67 |
"""
|
68 |
Converts a sequence of tokens in a single string.
|
69 |
"""
|
70 |
text = ""
|
71 |
temp = b""
|
72 |
for t in tokens:
|
73 |
+
if isinstance(t, int):
|
74 |
+
t = chr(t)
|
75 |
if isinstance(t, str):
|
76 |
if temp:
|
77 |
text += temp.decode("utf-8", errors="replace")
|
|
|
|
|
78 |
elif isinstance(t, bytes):
|
79 |
temp += t
|
80 |
else:
|
81 |
+
raise TypeError("token should only be of type int, bytes or str")
|
82 |
if temp:
|
83 |
text += temp.decode("utf-8", errors="replace")
|
84 |
return text
|