Upload model
Browse files- README.md +199 -0
- config.json +42 -0
- configuration_qwen.py +71 -0
- cpp_kernels.py +55 -0
- generation_config.json +15 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +138 -0
- modeling_qwen.py +1363 -0
- qwen_generation_utils.py +416 -0
README.md
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
tags: []
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/data/dhildebrand/Master_LLMCompression/Pruning/other/SLEB/test/qwen_7B",
|
3 |
+
"architectures": [
|
4 |
+
"QWenLMHeadModel"
|
5 |
+
],
|
6 |
+
"attn_dropout_prob": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoConfig": "configuration_qwen.QWenConfig",
|
9 |
+
"AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
|
10 |
+
},
|
11 |
+
"bf16": true,
|
12 |
+
"emb_dropout_prob": 0.0,
|
13 |
+
"fp16": false,
|
14 |
+
"fp32": false,
|
15 |
+
"hidden_size": 4096,
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 22016,
|
18 |
+
"kv_channels": 128,
|
19 |
+
"layer_norm_epsilon": 1e-06,
|
20 |
+
"max_position_embeddings": 32768,
|
21 |
+
"model_type": "qwen",
|
22 |
+
"no_bias": true,
|
23 |
+
"num_attention_heads": 32,
|
24 |
+
"num_hidden_layers": 16,
|
25 |
+
"onnx_safe": null,
|
26 |
+
"rotary_emb_base": 10000,
|
27 |
+
"rotary_pct": 1.0,
|
28 |
+
"scale_attn_weights": true,
|
29 |
+
"seq_length": 8192,
|
30 |
+
"softmax_in_fp32": false,
|
31 |
+
"tie_word_embeddings": false,
|
32 |
+
"tokenizer_class": "QWenTokenizer",
|
33 |
+
"torch_dtype": "bfloat16",
|
34 |
+
"transformers_version": "4.43.1",
|
35 |
+
"use_cache": true,
|
36 |
+
"use_cache_kernel": false,
|
37 |
+
"use_cache_quantization": false,
|
38 |
+
"use_dynamic_ntk": true,
|
39 |
+
"use_flash_attn": true,
|
40 |
+
"use_logn_attn": true,
|
41 |
+
"vocab_size": 151936
|
42 |
+
}
|
configuration_qwen.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
from transformers import PretrainedConfig
|
7 |
+
|
8 |
+
|
9 |
+
class QWenConfig(PretrainedConfig):
|
10 |
+
model_type = "qwen"
|
11 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
12 |
+
|
13 |
+
def __init__(
|
14 |
+
self,
|
15 |
+
vocab_size=151936,
|
16 |
+
hidden_size=4096,
|
17 |
+
num_hidden_layers=32,
|
18 |
+
num_attention_heads=32,
|
19 |
+
emb_dropout_prob=0.0,
|
20 |
+
attn_dropout_prob=0.0,
|
21 |
+
layer_norm_epsilon=1e-6,
|
22 |
+
initializer_range=0.02,
|
23 |
+
max_position_embeddings=8192,
|
24 |
+
scale_attn_weights=True,
|
25 |
+
use_cache=True,
|
26 |
+
bf16=False,
|
27 |
+
fp16=False,
|
28 |
+
fp32=False,
|
29 |
+
kv_channels=128,
|
30 |
+
rotary_pct=1.0,
|
31 |
+
rotary_emb_base=10000,
|
32 |
+
use_dynamic_ntk=True,
|
33 |
+
use_logn_attn=True,
|
34 |
+
use_flash_attn="auto",
|
35 |
+
intermediate_size=22016,
|
36 |
+
no_bias=True,
|
37 |
+
tie_word_embeddings=False,
|
38 |
+
use_cache_quantization=False,
|
39 |
+
use_cache_kernel=False,
|
40 |
+
softmax_in_fp32=False,
|
41 |
+
**kwargs,
|
42 |
+
):
|
43 |
+
self.vocab_size = vocab_size
|
44 |
+
self.hidden_size = hidden_size
|
45 |
+
self.intermediate_size = intermediate_size
|
46 |
+
self.num_hidden_layers = num_hidden_layers
|
47 |
+
self.num_attention_heads = num_attention_heads
|
48 |
+
self.emb_dropout_prob = emb_dropout_prob
|
49 |
+
self.attn_dropout_prob = attn_dropout_prob
|
50 |
+
self.layer_norm_epsilon = layer_norm_epsilon
|
51 |
+
self.initializer_range = initializer_range
|
52 |
+
self.scale_attn_weights = scale_attn_weights
|
53 |
+
self.use_cache = use_cache
|
54 |
+
self.max_position_embeddings = max_position_embeddings
|
55 |
+
self.bf16 = bf16
|
56 |
+
self.fp16 = fp16
|
57 |
+
self.fp32 = fp32
|
58 |
+
self.kv_channels = kv_channels
|
59 |
+
self.rotary_pct = rotary_pct
|
60 |
+
self.rotary_emb_base = rotary_emb_base
|
61 |
+
self.use_dynamic_ntk = use_dynamic_ntk
|
62 |
+
self.use_logn_attn = use_logn_attn
|
63 |
+
self.use_flash_attn = use_flash_attn
|
64 |
+
self.no_bias = no_bias
|
65 |
+
self.use_cache_quantization = use_cache_quantization
|
66 |
+
self.use_cache_kernel = use_cache_kernel
|
67 |
+
self.softmax_in_fp32 = softmax_in_fp32
|
68 |
+
super().__init__(
|
69 |
+
tie_word_embeddings=tie_word_embeddings,
|
70 |
+
**kwargs
|
71 |
+
)
|
cpp_kernels.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.utils import cpp_extension
|
2 |
+
import pathlib
|
3 |
+
import os
|
4 |
+
import subprocess
|
5 |
+
|
6 |
+
def _get_cuda_bare_metal_version(cuda_dir):
|
7 |
+
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
|
8 |
+
universal_newlines=True)
|
9 |
+
output = raw_output.split()
|
10 |
+
release_idx = output.index("release") + 1
|
11 |
+
release = output[release_idx].split(".")
|
12 |
+
bare_metal_major = release[0]
|
13 |
+
bare_metal_minor = release[1][0]
|
14 |
+
|
15 |
+
return raw_output, bare_metal_major, bare_metal_minor
|
16 |
+
|
17 |
+
def _create_build_dir(buildpath):
|
18 |
+
try:
|
19 |
+
os.mkdir(buildpath)
|
20 |
+
except OSError:
|
21 |
+
if not os.path.isdir(buildpath):
|
22 |
+
print(f"Creation of the build directory {buildpath} failed")
|
23 |
+
|
24 |
+
# Check if cuda 11 is installed for compute capability 8.0
|
25 |
+
cc_flag = []
|
26 |
+
_, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
|
27 |
+
if int(bare_metal_major) >= 11:
|
28 |
+
cc_flag.append('-gencode')
|
29 |
+
cc_flag.append('arch=compute_80,code=sm_80')
|
30 |
+
if int(bare_metal_minor) >= 7:
|
31 |
+
cc_flag.append('-gencode')
|
32 |
+
cc_flag.append('arch=compute_90,code=sm_90')
|
33 |
+
|
34 |
+
# Build path
|
35 |
+
srcpath = pathlib.Path(__file__).parent.absolute()
|
36 |
+
buildpath = srcpath / 'build'
|
37 |
+
_create_build_dir(buildpath)
|
38 |
+
|
39 |
+
def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
|
40 |
+
return cpp_extension.load(
|
41 |
+
name=name,
|
42 |
+
sources=sources,
|
43 |
+
build_directory=buildpath,
|
44 |
+
extra_cflags=['-O3', ],
|
45 |
+
extra_cuda_cflags=['-O3',
|
46 |
+
'-gencode', 'arch=compute_70,code=sm_70',
|
47 |
+
'--use_fast_math'] + extra_cuda_flags + cc_flag,
|
48 |
+
verbose=1
|
49 |
+
)
|
50 |
+
|
51 |
+
extra_flags = []
|
52 |
+
|
53 |
+
cache_autogptq_cuda_256_sources = ["./cache_autogptq_cuda_256.cpp",
|
54 |
+
"./cache_autogptq_cuda_kernel_256.cu"]
|
55 |
+
cache_autogptq_cuda_256 = _cpp_extention_load_helper("cache_autogptq_cuda_256", cache_autogptq_cuda_256_sources, extra_flags)
|
generation_config.json
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"chat_format": "raw",
|
3 |
+
"do_sample": true,
|
4 |
+
"eos_token_id": 151643,
|
5 |
+
"max_new_tokens": 512,
|
6 |
+
"pad_token_id": 151643,
|
7 |
+
"stop_words_ids": [
|
8 |
+
[
|
9 |
+
151643
|
10 |
+
]
|
11 |
+
],
|
12 |
+
"top_k": 0,
|
13 |
+
"top_p": 0.8,
|
14 |
+
"transformers_version": "4.43.1"
|
15 |
+
}
|
model-00001-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5320e5c8d6ff70eaa65440af6b1c86cc9243803a621823965deeba8893dc083d
|
3 |
+
size 4988485656
|
model-00002-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2eb1c2736ffd9b8d79f1d8e061733396a8d833ce1cfba7ac38c1101f19e40c2
|
3 |
+
size 3977516936
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 8965988352
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00002-of-00002.safetensors",
|
7 |
+
"transformer.h.0.attn.c_attn.bias": "model-00001-of-00002.safetensors",
|
8 |
+
"transformer.h.0.attn.c_attn.weight": "model-00001-of-00002.safetensors",
|
9 |
+
"transformer.h.0.attn.c_proj.weight": "model-00001-of-00002.safetensors",
|
10 |
+
"transformer.h.0.ln_1.weight": "model-00001-of-00002.safetensors",
|
11 |
+
"transformer.h.0.ln_2.weight": "model-00001-of-00002.safetensors",
|
12 |
+
"transformer.h.0.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
|
13 |
+
"transformer.h.0.mlp.w1.weight": "model-00001-of-00002.safetensors",
|
14 |
+
"transformer.h.0.mlp.w2.weight": "model-00001-of-00002.safetensors",
|
15 |
+
"transformer.h.1.attn.c_attn.bias": "model-00001-of-00002.safetensors",
|
16 |
+
"transformer.h.1.attn.c_attn.weight": "model-00001-of-00002.safetensors",
|
17 |
+
"transformer.h.1.attn.c_proj.weight": "model-00001-of-00002.safetensors",
|
18 |
+
"transformer.h.1.ln_1.weight": "model-00001-of-00002.safetensors",
|
19 |
+
"transformer.h.1.ln_2.weight": "model-00001-of-00002.safetensors",
|
20 |
+
"transformer.h.1.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
|
21 |
+
"transformer.h.1.mlp.w1.weight": "model-00001-of-00002.safetensors",
|
22 |
+
"transformer.h.1.mlp.w2.weight": "model-00001-of-00002.safetensors",
|
23 |
+
"transformer.h.10.attn.c_attn.bias": "model-00002-of-00002.safetensors",
|
24 |
+
"transformer.h.10.attn.c_attn.weight": "model-00002-of-00002.safetensors",
|
25 |
+
"transformer.h.10.attn.c_proj.weight": "model-00002-of-00002.safetensors",
|
26 |
+
"transformer.h.10.ln_1.weight": "model-00002-of-00002.safetensors",
|
27 |
+
"transformer.h.10.ln_2.weight": "model-00002-of-00002.safetensors",
|
28 |
+
"transformer.h.10.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
|
29 |
+
"transformer.h.10.mlp.w1.weight": "model-00002-of-00002.safetensors",
|
30 |
+
"transformer.h.10.mlp.w2.weight": "model-00002-of-00002.safetensors",
|
31 |
+
"transformer.h.11.attn.c_attn.bias": "model-00002-of-00002.safetensors",
|
32 |
+
"transformer.h.11.attn.c_attn.weight": "model-00002-of-00002.safetensors",
|
33 |
+
"transformer.h.11.attn.c_proj.weight": "model-00002-of-00002.safetensors",
|
34 |
+
"transformer.h.11.ln_1.weight": "model-00002-of-00002.safetensors",
|
35 |
+
"transformer.h.11.ln_2.weight": "model-00002-of-00002.safetensors",
|
36 |
+
"transformer.h.11.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
|
37 |
+
"transformer.h.11.mlp.w1.weight": "model-00002-of-00002.safetensors",
|
38 |
+
"transformer.h.11.mlp.w2.weight": "model-00002-of-00002.safetensors",
|
39 |
+
"transformer.h.12.attn.c_attn.bias": "model-00002-of-00002.safetensors",
|
40 |
+
"transformer.h.12.attn.c_attn.weight": "model-00002-of-00002.safetensors",
|
41 |
+
"transformer.h.12.attn.c_proj.weight": "model-00002-of-00002.safetensors",
|
42 |
+
"transformer.h.12.ln_1.weight": "model-00002-of-00002.safetensors",
|
43 |
+
"transformer.h.12.ln_2.weight": "model-00002-of-00002.safetensors",
|
44 |
+
"transformer.h.12.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
|
45 |
+
"transformer.h.12.mlp.w1.weight": "model-00002-of-00002.safetensors",
|
46 |
+
"transformer.h.12.mlp.w2.weight": "model-00002-of-00002.safetensors",
|
47 |
+
"transformer.h.13.attn.c_attn.bias": "model-00002-of-00002.safetensors",
|
48 |
+
"transformer.h.13.attn.c_attn.weight": "model-00002-of-00002.safetensors",
|
49 |
+
"transformer.h.13.attn.c_proj.weight": "model-00002-of-00002.safetensors",
|
50 |
+
"transformer.h.13.ln_1.weight": "model-00002-of-00002.safetensors",
|
51 |
+
"transformer.h.13.ln_2.weight": "model-00002-of-00002.safetensors",
|
52 |
+
"transformer.h.13.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
|
53 |
+
"transformer.h.13.mlp.w1.weight": "model-00002-of-00002.safetensors",
|
54 |
+
"transformer.h.13.mlp.w2.weight": "model-00002-of-00002.safetensors",
|
55 |
+
"transformer.h.14.attn.c_attn.bias": "model-00002-of-00002.safetensors",
|
56 |
+
"transformer.h.14.attn.c_attn.weight": "model-00002-of-00002.safetensors",
|
57 |
+
"transformer.h.14.attn.c_proj.weight": "model-00002-of-00002.safetensors",
|
58 |
+
"transformer.h.14.ln_1.weight": "model-00002-of-00002.safetensors",
|
59 |
+
"transformer.h.14.ln_2.weight": "model-00002-of-00002.safetensors",
|
60 |
+
"transformer.h.14.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
|
61 |
+
"transformer.h.14.mlp.w1.weight": "model-00002-of-00002.safetensors",
|
62 |
+
"transformer.h.14.mlp.w2.weight": "model-00002-of-00002.safetensors",
|
63 |
+
"transformer.h.15.attn.c_attn.bias": "model-00002-of-00002.safetensors",
|
64 |
+
"transformer.h.15.attn.c_attn.weight": "model-00002-of-00002.safetensors",
|
65 |
+
"transformer.h.15.attn.c_proj.weight": "model-00002-of-00002.safetensors",
|
66 |
+
"transformer.h.15.ln_1.weight": "model-00002-of-00002.safetensors",
|
67 |
+
"transformer.h.15.ln_2.weight": "model-00002-of-00002.safetensors",
|
68 |
+
"transformer.h.15.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
|
69 |
+
"transformer.h.15.mlp.w1.weight": "model-00002-of-00002.safetensors",
|
70 |
+
"transformer.h.15.mlp.w2.weight": "model-00002-of-00002.safetensors",
|
71 |
+
"transformer.h.2.attn.c_attn.bias": "model-00001-of-00002.safetensors",
|
72 |
+
"transformer.h.2.attn.c_attn.weight": "model-00001-of-00002.safetensors",
|
73 |
+
"transformer.h.2.attn.c_proj.weight": "model-00001-of-00002.safetensors",
|
74 |
+
"transformer.h.2.ln_1.weight": "model-00001-of-00002.safetensors",
|
75 |
+
"transformer.h.2.ln_2.weight": "model-00001-of-00002.safetensors",
|
76 |
+
"transformer.h.2.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
|
77 |
+
"transformer.h.2.mlp.w1.weight": "model-00001-of-00002.safetensors",
|
78 |
+
"transformer.h.2.mlp.w2.weight": "model-00001-of-00002.safetensors",
|
79 |
+
"transformer.h.3.attn.c_attn.bias": "model-00001-of-00002.safetensors",
|
80 |
+
"transformer.h.3.attn.c_attn.weight": "model-00001-of-00002.safetensors",
|
81 |
+
"transformer.h.3.attn.c_proj.weight": "model-00001-of-00002.safetensors",
|
82 |
+
"transformer.h.3.ln_1.weight": "model-00001-of-00002.safetensors",
|
83 |
+
"transformer.h.3.ln_2.weight": "model-00001-of-00002.safetensors",
|
84 |
+
"transformer.h.3.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
|
85 |
+
"transformer.h.3.mlp.w1.weight": "model-00001-of-00002.safetensors",
|
86 |
+
"transformer.h.3.mlp.w2.weight": "model-00001-of-00002.safetensors",
|
87 |
+
"transformer.h.4.attn.c_attn.bias": "model-00001-of-00002.safetensors",
|
88 |
+
"transformer.h.4.attn.c_attn.weight": "model-00001-of-00002.safetensors",
|
89 |
+
"transformer.h.4.attn.c_proj.weight": "model-00001-of-00002.safetensors",
|
90 |
+
"transformer.h.4.ln_1.weight": "model-00001-of-00002.safetensors",
|
91 |
+
"transformer.h.4.ln_2.weight": "model-00001-of-00002.safetensors",
|
92 |
+
"transformer.h.4.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
|
93 |
+
"transformer.h.4.mlp.w1.weight": "model-00001-of-00002.safetensors",
|
94 |
+
"transformer.h.4.mlp.w2.weight": "model-00001-of-00002.safetensors",
|
95 |
+
"transformer.h.5.attn.c_attn.bias": "model-00001-of-00002.safetensors",
|
96 |
+
"transformer.h.5.attn.c_attn.weight": "model-00001-of-00002.safetensors",
|
97 |
+
"transformer.h.5.attn.c_proj.weight": "model-00001-of-00002.safetensors",
|
98 |
+
"transformer.h.5.ln_1.weight": "model-00001-of-00002.safetensors",
|
99 |
+
"transformer.h.5.ln_2.weight": "model-00001-of-00002.safetensors",
|
100 |
+
"transformer.h.5.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
|
101 |
+
"transformer.h.5.mlp.w1.weight": "model-00001-of-00002.safetensors",
|
102 |
+
"transformer.h.5.mlp.w2.weight": "model-00001-of-00002.safetensors",
|
103 |
+
"transformer.h.6.attn.c_attn.bias": "model-00001-of-00002.safetensors",
|
104 |
+
"transformer.h.6.attn.c_attn.weight": "model-00001-of-00002.safetensors",
|
105 |
+
"transformer.h.6.attn.c_proj.weight": "model-00001-of-00002.safetensors",
|
106 |
+
"transformer.h.6.ln_1.weight": "model-00001-of-00002.safetensors",
|
107 |
+
"transformer.h.6.ln_2.weight": "model-00001-of-00002.safetensors",
|
108 |
+
"transformer.h.6.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
|
109 |
+
"transformer.h.6.mlp.w1.weight": "model-00001-of-00002.safetensors",
|
110 |
+
"transformer.h.6.mlp.w2.weight": "model-00001-of-00002.safetensors",
|
111 |
+
"transformer.h.7.attn.c_attn.bias": "model-00001-of-00002.safetensors",
|
112 |
+
"transformer.h.7.attn.c_attn.weight": "model-00001-of-00002.safetensors",
|
113 |
+
"transformer.h.7.attn.c_proj.weight": "model-00001-of-00002.safetensors",
|
114 |
+
"transformer.h.7.ln_1.weight": "model-00001-of-00002.safetensors",
|
115 |
+
"transformer.h.7.ln_2.weight": "model-00001-of-00002.safetensors",
|
116 |
+
"transformer.h.7.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
|
117 |
+
"transformer.h.7.mlp.w1.weight": "model-00001-of-00002.safetensors",
|
118 |
+
"transformer.h.7.mlp.w2.weight": "model-00001-of-00002.safetensors",
|
119 |
+
"transformer.h.8.attn.c_attn.bias": "model-00001-of-00002.safetensors",
|
120 |
+
"transformer.h.8.attn.c_attn.weight": "model-00001-of-00002.safetensors",
|
121 |
+
"transformer.h.8.attn.c_proj.weight": "model-00001-of-00002.safetensors",
|
122 |
+
"transformer.h.8.ln_1.weight": "model-00001-of-00002.safetensors",
|
123 |
+
"transformer.h.8.ln_2.weight": "model-00001-of-00002.safetensors",
|
124 |
+
"transformer.h.8.mlp.c_proj.weight": "model-00001-of-00002.safetensors",
|
125 |
+
"transformer.h.8.mlp.w1.weight": "model-00001-of-00002.safetensors",
|
126 |
+
"transformer.h.8.mlp.w2.weight": "model-00001-of-00002.safetensors",
|
127 |
+
"transformer.h.9.attn.c_attn.bias": "model-00001-of-00002.safetensors",
|
128 |
+
"transformer.h.9.attn.c_attn.weight": "model-00001-of-00002.safetensors",
|
129 |
+
"transformer.h.9.attn.c_proj.weight": "model-00002-of-00002.safetensors",
|
130 |
+
"transformer.h.9.ln_1.weight": "model-00001-of-00002.safetensors",
|
131 |
+
"transformer.h.9.ln_2.weight": "model-00002-of-00002.safetensors",
|
132 |
+
"transformer.h.9.mlp.c_proj.weight": "model-00002-of-00002.safetensors",
|
133 |
+
"transformer.h.9.mlp.w1.weight": "model-00002-of-00002.safetensors",
|
134 |
+
"transformer.h.9.mlp.w2.weight": "model-00002-of-00002.safetensors",
|
135 |
+
"transformer.ln_f.weight": "model-00002-of-00002.safetensors",
|
136 |
+
"transformer.wte.weight": "model-00001-of-00002.safetensors"
|
137 |
+
}
|
138 |
+
}
|
modeling_qwen.py
ADDED
@@ -0,0 +1,1363 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
import copy
|
7 |
+
import importlib
|
8 |
+
import math
|
9 |
+
import pathlib
|
10 |
+
from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List, Any, Generator
|
11 |
+
|
12 |
+
import torch
|
13 |
+
import torch.nn.functional as F
|
14 |
+
import torch.utils.checkpoint
|
15 |
+
import warnings
|
16 |
+
|
17 |
+
from torch.nn import CrossEntropyLoss
|
18 |
+
from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
|
19 |
+
from transformers.generation.logits_process import LogitsProcessorList
|
20 |
+
|
21 |
+
if TYPE_CHECKING:
|
22 |
+
from transformers.generation.streamers import BaseStreamer
|
23 |
+
from transformers.generation.utils import GenerateOutput
|
24 |
+
from transformers.modeling_outputs import (
|
25 |
+
BaseModelOutputWithPast,
|
26 |
+
CausalLMOutputWithPast,
|
27 |
+
)
|
28 |
+
from transformers.modeling_utils import PreTrainedModel
|
29 |
+
from transformers.utils import logging
|
30 |
+
|
31 |
+
try:
|
32 |
+
from einops import rearrange
|
33 |
+
except ImportError:
|
34 |
+
rearrange = None
|
35 |
+
from torch import nn
|
36 |
+
|
37 |
+
SUPPORT_CUDA = torch.cuda.is_available()
|
38 |
+
SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
|
39 |
+
SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
|
40 |
+
SUPPORT_TORCH2 = hasattr(torch, '__version__') and int(torch.__version__.split(".")[0]) >= 2
|
41 |
+
|
42 |
+
|
43 |
+
from .configuration_qwen import QWenConfig
|
44 |
+
from .qwen_generation_utils import (
|
45 |
+
HistoryType,
|
46 |
+
make_context,
|
47 |
+
decode_tokens,
|
48 |
+
get_stop_words_ids,
|
49 |
+
StopWordsLogitsProcessor,
|
50 |
+
)
|
51 |
+
|
52 |
+
|
53 |
+
logger = logging.get_logger(__name__)
|
54 |
+
|
55 |
+
_CHECKPOINT_FOR_DOC = "qwen"
|
56 |
+
_CONFIG_FOR_DOC = "QWenConfig"
|
57 |
+
|
58 |
+
QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"]
|
59 |
+
|
60 |
+
_ERROR_BAD_CHAT_FORMAT = """\
|
61 |
+
We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml".
|
62 |
+
If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat().
|
63 |
+
我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。
|
64 |
+
如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。
|
65 |
+
"""
|
66 |
+
|
67 |
+
_SENTINEL = object()
|
68 |
+
_ERROR_STREAM_IN_CHAT = """\
|
69 |
+
Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True).
|
70 |
+
向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。
|
71 |
+
"""
|
72 |
+
|
73 |
+
_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED = """\
|
74 |
+
We detect you have activated flash attention support, but running model computation on CPU. Please make sure that your input data has been placed on GPU. If you actually want to run CPU computation, please following the readme and set device_map="cpu" to disable flash attention when loading the model (calling AutoModelForCausalLM.from_pretrained).
|
75 |
+
检测到您的模型已激活了flash attention支持,但正在执行CPU运算任务。如使用flash attention,请您确认模型输入已经传到GPU上。如果您确认要执行CPU运算,请您在载入模型(调用AutoModelForCausalLM.from_pretrained)时,按照readme说法,指定device_map="cpu"以禁用flash attention。
|
76 |
+
"""
|
77 |
+
|
78 |
+
apply_rotary_emb_func = None
|
79 |
+
rms_norm = None
|
80 |
+
flash_attn_unpadded_func = None
|
81 |
+
flash_attn_func = None
|
82 |
+
|
83 |
+
def _import_flash_attn():
|
84 |
+
global apply_rotary_emb_func, rms_norm, flash_attn_unpadded_func, flash_attn_func
|
85 |
+
try:
|
86 |
+
from flash_attn.layers.rotary import apply_rotary_emb_func as __apply_rotary_emb_func
|
87 |
+
apply_rotary_emb_func = __apply_rotary_emb_func
|
88 |
+
except ImportError:
|
89 |
+
logger.warn(
|
90 |
+
"Warning: import flash_attn rotary fail, please install FlashAttention rotary to get higher efficiency "
|
91 |
+
"https://github.com/Dao-AILab/flash-attention/tree/main/csrc/rotary"
|
92 |
+
)
|
93 |
+
|
94 |
+
try:
|
95 |
+
from flash_attn.ops.rms_norm import rms_norm as __rms_norm
|
96 |
+
rms_norm = __rms_norm
|
97 |
+
except ImportError:
|
98 |
+
logger.warn(
|
99 |
+
"Warning: import flash_attn rms_norm fail, please install FlashAttention layer_norm to get higher efficiency "
|
100 |
+
"https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm"
|
101 |
+
)
|
102 |
+
|
103 |
+
try:
|
104 |
+
import flash_attn
|
105 |
+
_flash_attn_func = None
|
106 |
+
if not hasattr(flash_attn, '__version__'):
|
107 |
+
from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
|
108 |
+
else:
|
109 |
+
if int(flash_attn.__version__.split(".")[0]) >= 2:
|
110 |
+
if int(flash_attn.__version__.split(".")[1]) >= 1:
|
111 |
+
from flash_attn.flash_attn_interface import flash_attn_func as _flash_attn_func
|
112 |
+
from flash_attn.flash_attn_interface import flash_attn_varlen_func as __flash_attn_unpadded_func
|
113 |
+
else:
|
114 |
+
from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
|
115 |
+
flash_attn_unpadded_func = __flash_attn_unpadded_func
|
116 |
+
flash_attn_func = _flash_attn_func
|
117 |
+
except ImportError:
|
118 |
+
logger.warn(
|
119 |
+
"Warning: import flash_attn fail, please install FlashAttention to get higher efficiency "
|
120 |
+
"https://github.com/Dao-AILab/flash-attention"
|
121 |
+
)
|
122 |
+
|
123 |
+
def quantize_cache_v(fdata, bits, qmax, qmin):
|
124 |
+
# b, s, head, h-dim->b, head, s, h-dim
|
125 |
+
qtype = torch.uint8
|
126 |
+
device = fdata.device
|
127 |
+
shape = fdata.shape
|
128 |
+
|
129 |
+
fdata_cal = torch.flatten(fdata, 2)
|
130 |
+
fmax = torch.amax(fdata_cal, dim=-1, keepdim=True)
|
131 |
+
fmin = torch.amin(fdata_cal, dim=-1, keepdim=True)
|
132 |
+
# Compute params
|
133 |
+
if qmax.device != fmax.device:
|
134 |
+
qmax = qmax.to(device)
|
135 |
+
qmin = qmin.to(device)
|
136 |
+
scale = (fmax - fmin) / (qmax - qmin)
|
137 |
+
zero = qmin - fmin / scale
|
138 |
+
scale = scale.unsqueeze(-1).repeat(1,1,shape[2],1).contiguous()
|
139 |
+
zero = zero.unsqueeze(-1).repeat(1,1,shape[2],1).contiguous()
|
140 |
+
# Quantize
|
141 |
+
res_data = fdata / scale + zero
|
142 |
+
qdata = torch.clamp(res_data, qmin, qmax).to(qtype)
|
143 |
+
return qdata.contiguous(), scale, zero
|
144 |
+
|
145 |
+
def dequantize_cache_torch(qdata, scale, zero):
|
146 |
+
data = scale * (qdata - zero)
|
147 |
+
return data
|
148 |
+
|
149 |
+
class FlashSelfAttention(torch.nn.Module):
|
150 |
+
def __init__(
|
151 |
+
self,
|
152 |
+
causal=False,
|
153 |
+
softmax_scale=None,
|
154 |
+
attention_dropout=0.0,
|
155 |
+
):
|
156 |
+
super().__init__()
|
157 |
+
assert flash_attn_unpadded_func is not None, (
|
158 |
+
"Please install FlashAttention first, " "e.g., with pip install flash-attn"
|
159 |
+
)
|
160 |
+
assert (
|
161 |
+
rearrange is not None
|
162 |
+
), "Please install einops first, e.g., with pip install einops"
|
163 |
+
self.causal = causal
|
164 |
+
self.softmax_scale = softmax_scale
|
165 |
+
self.dropout_p = attention_dropout
|
166 |
+
|
167 |
+
def unpad_input(self, hidden_states, attention_mask):
|
168 |
+
valid_mask = attention_mask.squeeze(1).squeeze(1).eq(0)
|
169 |
+
seqlens_in_batch = valid_mask.sum(dim=-1, dtype=torch.int32)
|
170 |
+
indices = torch.nonzero(valid_mask.flatten(), as_tuple=False).flatten()
|
171 |
+
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
172 |
+
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
173 |
+
hidden_states = hidden_states[indices]
|
174 |
+
return hidden_states, indices, cu_seqlens, max_seqlen_in_batch
|
175 |
+
|
176 |
+
def pad_input(self, hidden_states, indices, batch, seqlen):
|
177 |
+
output = torch.zeros(batch * seqlen, *hidden_states.shape[1:], device=hidden_states.device,
|
178 |
+
dtype=hidden_states.dtype)
|
179 |
+
output[indices] = hidden_states
|
180 |
+
return rearrange(output, '(b s) ... -> b s ...', b=batch)
|
181 |
+
|
182 |
+
def forward(self, q, k, v, attention_mask=None):
|
183 |
+
assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q, k, v)))
|
184 |
+
assert all((i.is_cuda for i in (q, k, v)))
|
185 |
+
batch_size, seqlen_q = q.shape[0], q.shape[1]
|
186 |
+
seqlen_k = k.shape[1]
|
187 |
+
seqlen_out = seqlen_q
|
188 |
+
|
189 |
+
if flash_attn_func is not None and batch_size == 1:
|
190 |
+
dropout_p = self.dropout_p if self.training else 0
|
191 |
+
output = flash_attn_func(q, k, v, dropout_p, softmax_scale=self.softmax_scale, causal=self.causal)
|
192 |
+
return output
|
193 |
+
|
194 |
+
q, k, v = [rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]]
|
195 |
+
cu_seqlens_q = torch.arange(
|
196 |
+
0,
|
197 |
+
(batch_size + 1) * seqlen_q,
|
198 |
+
step=seqlen_q,
|
199 |
+
dtype=torch.int32,
|
200 |
+
device=q.device,
|
201 |
+
)
|
202 |
+
|
203 |
+
if batch_size > 1 and attention_mask is not None:
|
204 |
+
k, indices_k, cu_seqlens_k, seqlen_k = self.unpad_input(k, attention_mask)
|
205 |
+
if q.size(0) == v.size(0):
|
206 |
+
q = q[indices_k]
|
207 |
+
cu_seqlens_q = cu_seqlens_k
|
208 |
+
seqlen_q = seqlen_k
|
209 |
+
v = v[indices_k]
|
210 |
+
else:
|
211 |
+
cu_seqlens_k = torch.arange(
|
212 |
+
0,
|
213 |
+
(batch_size + 1) * seqlen_k,
|
214 |
+
step=seqlen_k,
|
215 |
+
dtype=torch.int32,
|
216 |
+
device=q.device,
|
217 |
+
)
|
218 |
+
|
219 |
+
if self.training:
|
220 |
+
assert seqlen_k == seqlen_q
|
221 |
+
is_causal = self.causal
|
222 |
+
dropout_p = self.dropout_p
|
223 |
+
else:
|
224 |
+
is_causal = seqlen_q == seqlen_k
|
225 |
+
dropout_p = 0
|
226 |
+
|
227 |
+
output = flash_attn_unpadded_func(
|
228 |
+
q,
|
229 |
+
k,
|
230 |
+
v,
|
231 |
+
cu_seqlens_q,
|
232 |
+
cu_seqlens_k,
|
233 |
+
seqlen_q,
|
234 |
+
seqlen_k,
|
235 |
+
dropout_p,
|
236 |
+
softmax_scale=self.softmax_scale,
|
237 |
+
causal=is_causal,
|
238 |
+
)
|
239 |
+
if batch_size > 1 and attention_mask is not None and seqlen_q == seqlen_k:
|
240 |
+
output = self.pad_input(output, indices_k, batch_size, seqlen_out)
|
241 |
+
else:
|
242 |
+
new_shape = (batch_size, output.shape[0] // batch_size) + output.shape[1:]
|
243 |
+
output = output.view(new_shape)
|
244 |
+
return output
|
245 |
+
|
246 |
+
|
247 |
+
class QWenAttention(nn.Module):
|
248 |
+
def __init__(self, config):
|
249 |
+
super().__init__()
|
250 |
+
|
251 |
+
self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
|
252 |
+
self.seq_length = config.seq_length
|
253 |
+
|
254 |
+
self.hidden_size = config.hidden_size
|
255 |
+
self.split_size = config.hidden_size
|
256 |
+
self.num_heads = config.num_attention_heads
|
257 |
+
self.head_dim = self.hidden_size // self.num_heads
|
258 |
+
|
259 |
+
self.use_flash_attn = config.use_flash_attn
|
260 |
+
self.scale_attn_weights = True
|
261 |
+
|
262 |
+
self.projection_size = config.kv_channels * config.num_attention_heads
|
263 |
+
|
264 |
+
assert self.projection_size % config.num_attention_heads == 0
|
265 |
+
self.hidden_size_per_attention_head = (
|
266 |
+
self.projection_size // config.num_attention_heads
|
267 |
+
)
|
268 |
+
|
269 |
+
self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size)
|
270 |
+
|
271 |
+
self.c_proj = nn.Linear(
|
272 |
+
config.hidden_size, self.projection_size, bias=not config.no_bias
|
273 |
+
)
|
274 |
+
|
275 |
+
self.is_fp32 = not (config.bf16 or config.fp16)
|
276 |
+
if (
|
277 |
+
self.use_flash_attn
|
278 |
+
and flash_attn_unpadded_func is not None
|
279 |
+
and not self.is_fp32
|
280 |
+
):
|
281 |
+
self.core_attention_flash = FlashSelfAttention(
|
282 |
+
causal=True, attention_dropout=config.attn_dropout_prob
|
283 |
+
)
|
284 |
+
self.bf16 = config.bf16
|
285 |
+
|
286 |
+
self.use_dynamic_ntk = config.use_dynamic_ntk
|
287 |
+
self.use_logn_attn = config.use_logn_attn
|
288 |
+
|
289 |
+
logn_list = [
|
290 |
+
math.log(i, self.seq_length) if i > self.seq_length else 1
|
291 |
+
for i in range(1, 32768)
|
292 |
+
]
|
293 |
+
logn_tensor = torch.tensor(logn_list)[None, :, None, None]
|
294 |
+
self.register_buffer("logn_tensor", logn_tensor, persistent=False)
|
295 |
+
|
296 |
+
self.attn_dropout = nn.Dropout(config.attn_dropout_prob)
|
297 |
+
self.softmax_in_fp32 = config.softmax_in_fp32 if hasattr(config, 'softmax_in_fp32') else False
|
298 |
+
self.use_cache_quantization = config.use_cache_quantization if hasattr(config, 'use_cache_quantization') else False
|
299 |
+
self.use_cache_kernel = config.use_cache_kernel if hasattr(config,'use_cache_kernel') else False
|
300 |
+
cache_dtype = torch.float
|
301 |
+
if self.bf16:
|
302 |
+
cache_dtype=torch.bfloat16
|
303 |
+
elif config.fp16:
|
304 |
+
cache_dtype = torch.float16
|
305 |
+
self.cache_qmax = torch.tensor(torch.iinfo(torch.uint8).max, dtype=cache_dtype)
|
306 |
+
self.cache_qmin = torch.tensor(torch.iinfo(torch.uint8).min, dtype=cache_dtype)
|
307 |
+
|
308 |
+
if config.use_cache_quantization and config.use_cache_kernel:
|
309 |
+
# pre check if the support files existing
|
310 |
+
module_root = pathlib.Path(__file__).parent
|
311 |
+
src_files = ("cache_autogptq_cuda_256.cpp", "cache_autogptq_cuda_kernel_256.cu")
|
312 |
+
if any(not (module_root/src).is_file() for src in src_files):
|
313 |
+
warnings.warn("KV cache kernel source files (.cpp and .cu) not found.")
|
314 |
+
self.cache_kernels = None
|
315 |
+
else:
|
316 |
+
try:
|
317 |
+
from .cpp_kernels import cache_autogptq_cuda_256
|
318 |
+
self.cache_kernels = cache_autogptq_cuda_256
|
319 |
+
except ImportError:
|
320 |
+
warnings.warn("Failed to import KV cache kernels.")
|
321 |
+
self.cache_kernels = None
|
322 |
+
|
323 |
+
def _attn(self, query, key, value, causal_mask=None, attention_mask=None, head_mask=None):
|
324 |
+
device = query.device
|
325 |
+
if self.use_cache_quantization:
|
326 |
+
qk, qk_scale, qk_zero = key
|
327 |
+
if self.use_cache_kernel and self.cache_kernels is not None:
|
328 |
+
shape = query.shape[:-1] + (qk.shape[-2],)
|
329 |
+
attn_weights = torch.zeros(shape, dtype=torch.float16, device=device)
|
330 |
+
self.cache_kernels.vecquant8matmul_batched_faster_old(
|
331 |
+
query.contiguous() if query.dtype == torch.float16 else query.to(torch.float16).contiguous(),
|
332 |
+
qk.transpose(-1, -2).contiguous(),
|
333 |
+
attn_weights,
|
334 |
+
qk_scale.contiguous() if qk_scale.dtype == torch.float16 else qk_scale.to(torch.float16).contiguous(),
|
335 |
+
qk_zero.contiguous()if qk_zero.dtype == torch.float16 else qk_zero.to(torch.float16).contiguous())
|
336 |
+
# attn_weights = attn_weights.to(query.dtype).contiguous()
|
337 |
+
else:
|
338 |
+
key = dequantize_cache_torch(qk, qk_scale, qk_zero)
|
339 |
+
attn_weights = torch.matmul(query, key.transpose(-1, -2))
|
340 |
+
else:
|
341 |
+
attn_weights = torch.matmul(query, key.transpose(-1, -2))
|
342 |
+
|
343 |
+
if self.scale_attn_weights:
|
344 |
+
if self.use_cache_quantization:
|
345 |
+
size_temp = value[0].size(-1)
|
346 |
+
else:
|
347 |
+
size_temp = value.size(-1)
|
348 |
+
attn_weights = attn_weights / (size_temp ** 0.5)
|
349 |
+
|
350 |
+
mask_value = torch.finfo(attn_weights.dtype).min
|
351 |
+
if causal_mask is not None:
|
352 |
+
attn_weights = torch.where(
|
353 |
+
causal_mask, attn_weights.to(attn_weights.dtype), mask_value
|
354 |
+
)
|
355 |
+
|
356 |
+
if attention_mask is not None:
|
357 |
+
attn_weights = attn_weights + attention_mask
|
358 |
+
|
359 |
+
if self.softmax_in_fp32:
|
360 |
+
attn_weights = nn.functional.softmax(attn_weights.float(), dim=-1)
|
361 |
+
else:
|
362 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
363 |
+
|
364 |
+
attn_weights = attn_weights.type(query.dtype)
|
365 |
+
attn_weights = self.attn_dropout(attn_weights)
|
366 |
+
|
367 |
+
if head_mask is not None:
|
368 |
+
attn_weights = attn_weights * head_mask
|
369 |
+
|
370 |
+
if self.use_cache_quantization:
|
371 |
+
qv, qv_scale, qv_zero = value
|
372 |
+
if self.use_cache_kernel and self.cache_kernels is not None:
|
373 |
+
shape = attn_weights.shape[:-1] + (query.shape[-1],)
|
374 |
+
attn_output = torch.zeros(shape, dtype=torch.float16, device=device)
|
375 |
+
self.cache_kernels.vecquant8matmul_batched_column_compression_faster_old(
|
376 |
+
attn_weights.contiguous() if attn_weights.dtype == torch.float16 else attn_weights.to(torch.float16).contiguous(),
|
377 |
+
qv.contiguous(), # dtype: int32
|
378 |
+
attn_output,
|
379 |
+
qv_scale.contiguous() if qv_scale.dtype == torch.float16 else qv_scale.to(torch.float16).contiguous(),
|
380 |
+
qv_zero.contiguous() if qv_zero.dtype == torch.float16 else qv_zero.to(torch.float16).contiguous())
|
381 |
+
if attn_output.dtype != query.dtype:
|
382 |
+
attn_output = attn_output.to(query.dtype)
|
383 |
+
attn_weights = attn_weights.to(query.dtype)
|
384 |
+
else:
|
385 |
+
value = dequantize_cache_torch(qv, qv_scale, qv_zero)
|
386 |
+
attn_output = torch.matmul(attn_weights, value)
|
387 |
+
else:
|
388 |
+
attn_output = torch.matmul(attn_weights, value)
|
389 |
+
|
390 |
+
attn_output = attn_output.transpose(1, 2)
|
391 |
+
|
392 |
+
return attn_output, attn_weights
|
393 |
+
|
394 |
+
def _split_heads(self, tensor, num_heads, attn_head_size):
|
395 |
+
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
|
396 |
+
tensor = tensor.view(new_shape)
|
397 |
+
return tensor
|
398 |
+
|
399 |
+
def _merge_heads(self, tensor, num_heads, attn_head_size):
|
400 |
+
tensor = tensor.contiguous()
|
401 |
+
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
|
402 |
+
return tensor.view(new_shape)
|
403 |
+
|
404 |
+
def forward(
|
405 |
+
self,
|
406 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]],
|
407 |
+
rotary_pos_emb_list: Optional[List[List[torch.Tensor]]] = None,
|
408 |
+
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
409 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
410 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
411 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
412 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
413 |
+
output_attentions: Optional[bool] = False,
|
414 |
+
use_cache: Optional[bool] = False,
|
415 |
+
):
|
416 |
+
mixed_x_layer = self.c_attn(hidden_states)
|
417 |
+
|
418 |
+
query, key, value = mixed_x_layer.split(self.split_size, dim=2)
|
419 |
+
|
420 |
+
query = self._split_heads(query, self.num_heads, self.head_dim)
|
421 |
+
key = self._split_heads(key, self.num_heads, self.head_dim)
|
422 |
+
value = self._split_heads(value, self.num_heads, self.head_dim)
|
423 |
+
|
424 |
+
if rotary_pos_emb_list is not None:
|
425 |
+
cur_len = query.shape[1]
|
426 |
+
if len(rotary_pos_emb_list) == 1:
|
427 |
+
rotary_pos_emb = rotary_pos_emb_list[0]
|
428 |
+
rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
|
429 |
+
rotary_pos_emb = (rotary_pos_emb,) * 2
|
430 |
+
q_pos_emb, k_pos_emb = rotary_pos_emb
|
431 |
+
# Slice the pos emb for current inference
|
432 |
+
query = apply_rotary_pos_emb(query, q_pos_emb)
|
433 |
+
key = apply_rotary_pos_emb(key, k_pos_emb)
|
434 |
+
else:
|
435 |
+
query_list = []
|
436 |
+
key_list = []
|
437 |
+
for i, rotary_pos_emb in enumerate(rotary_pos_emb_list):
|
438 |
+
rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
|
439 |
+
rotary_pos_emb = (rotary_pos_emb,) * 2
|
440 |
+
q_pos_emb, k_pos_emb = rotary_pos_emb
|
441 |
+
# Slice the pos emb for current inference
|
442 |
+
query_list += [apply_rotary_pos_emb(query[i:i+1, :, :], q_pos_emb)]
|
443 |
+
key_list += [apply_rotary_pos_emb(key[i:i+1, :, :], k_pos_emb)]
|
444 |
+
query = torch.cat(query_list, dim=0)
|
445 |
+
key = torch.cat(key_list, dim=0)
|
446 |
+
|
447 |
+
if self.use_cache_quantization:
|
448 |
+
key = quantize_cache_v(key.permute(0, 2, 1, 3),
|
449 |
+
bits=8,
|
450 |
+
qmin=self.cache_qmin,
|
451 |
+
qmax=self.cache_qmax)
|
452 |
+
value = quantize_cache_v(value.permute(0, 2, 1, 3),
|
453 |
+
bits=8,
|
454 |
+
qmin=self.cache_qmin,
|
455 |
+
qmax=self.cache_qmax)
|
456 |
+
|
457 |
+
|
458 |
+
if layer_past is not None:
|
459 |
+
past_key, past_value = layer_past[0], layer_past[1]
|
460 |
+
if self.use_cache_quantization:
|
461 |
+
# use_cache_quantization:
|
462 |
+
# present=((q_key,key_scale,key_zero_point),
|
463 |
+
# (q_value,value_scale,value_zero_point))
|
464 |
+
key = (torch.cat((past_key[0], key[0]), dim=2),
|
465 |
+
torch.cat((past_key[1], key[1]), dim=2),
|
466 |
+
torch.cat((past_key[2], key[2]), dim=2))
|
467 |
+
value = (torch.cat((past_value[0], value[0]), dim=2),
|
468 |
+
torch.cat((past_value[1], value[1]), dim=2),
|
469 |
+
torch.cat((past_value[2], value[2]), dim=2))
|
470 |
+
else:
|
471 |
+
# not use_cache_quantization:
|
472 |
+
# present=(key,value)
|
473 |
+
key = torch.cat((past_key, key), dim=1)
|
474 |
+
value = torch.cat((past_value, value), dim=1)
|
475 |
+
|
476 |
+
if use_cache:
|
477 |
+
present = (key, value)
|
478 |
+
else:
|
479 |
+
present = None
|
480 |
+
|
481 |
+
key_size = key[0].size(2) if self.use_cache_quantization else key.size(1)
|
482 |
+
if key_size > self.seq_length and self.use_logn_attn and not self.training:
|
483 |
+
if self.use_cache_quantization:
|
484 |
+
seq_start = key[0].size(2) - query.size(1)
|
485 |
+
seq_end = key[0].size(2)
|
486 |
+
else:
|
487 |
+
seq_start = key.size(1) - query.size(1)
|
488 |
+
seq_end = key.size(1)
|
489 |
+
logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :].type_as(query)
|
490 |
+
query = query * logn_tensor.expand_as(query)
|
491 |
+
|
492 |
+
if (
|
493 |
+
self.use_flash_attn
|
494 |
+
and flash_attn_unpadded_func is not None
|
495 |
+
and not self.is_fp32
|
496 |
+
and query.is_cuda
|
497 |
+
):
|
498 |
+
q, k, v = query, key, value
|
499 |
+
attn_output = self.core_attention_flash(q, k, v, attention_mask=attention_mask)
|
500 |
+
else:
|
501 |
+
key_size = key[0].size(2) if self.use_cache_quantization else key.size(1)
|
502 |
+
if query.size(1) == key_size:
|
503 |
+
causal_mask = torch.tril(
|
504 |
+
torch.ones((key_size, key_size), dtype=torch.bool, device=query.device)
|
505 |
+
).view(1, 1, key_size, key_size)
|
506 |
+
else:
|
507 |
+
causal_mask = None
|
508 |
+
query = query.permute(0, 2, 1, 3)
|
509 |
+
if not self.use_cache_quantization:
|
510 |
+
key = key.permute(0, 2, 1, 3)
|
511 |
+
value = value.permute(0, 2, 1, 3)
|
512 |
+
if (
|
513 |
+
causal_mask is None
|
514 |
+
and self.use_flash_attn
|
515 |
+
and flash_attn_unpadded_func is not None
|
516 |
+
and not self.is_fp32
|
517 |
+
and not query.is_cuda
|
518 |
+
):
|
519 |
+
raise Exception(_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED)
|
520 |
+
|
521 |
+
if not self.use_cache_quantization and SUPPORT_TORCH2:
|
522 |
+
if attention_mask is not None:
|
523 |
+
attention_mask = attention_mask.expand(-1, -1, query.size(2), -1)
|
524 |
+
if causal_mask is not None:
|
525 |
+
attention_mask = attention_mask.masked_fill(~causal_mask, torch.finfo(query.dtype).min)
|
526 |
+
else:
|
527 |
+
attention_mask = causal_mask
|
528 |
+
attn_output = F.scaled_dot_product_attention(
|
529 |
+
query, key, value, attn_mask=attention_mask
|
530 |
+
).transpose(1, 2)
|
531 |
+
attn_weight = None
|
532 |
+
else:
|
533 |
+
attn_output, attn_weight = self._attn(
|
534 |
+
query, key, value, causal_mask, attention_mask, head_mask
|
535 |
+
)
|
536 |
+
context_layer = self._merge_heads(
|
537 |
+
attn_output, self.num_heads, self.head_dim
|
538 |
+
)
|
539 |
+
|
540 |
+
attn_output = self.c_proj(context_layer)
|
541 |
+
|
542 |
+
outputs = (attn_output, present)
|
543 |
+
if output_attentions:
|
544 |
+
if (
|
545 |
+
self.use_flash_attn
|
546 |
+
and flash_attn_unpadded_func is not None
|
547 |
+
and not self.is_fp32
|
548 |
+
):
|
549 |
+
raise ValueError("Cannot output attentions while using flash-attn")
|
550 |
+
elif not self.use_cache_quantization and SUPPORT_TORCH2:
|
551 |
+
raise ValueError("Cannot output attentions while using scaled_dot_product_attention")
|
552 |
+
else:
|
553 |
+
outputs += (attn_weight,)
|
554 |
+
|
555 |
+
return outputs
|
556 |
+
|
557 |
+
|
558 |
+
class QWenMLP(nn.Module):
|
559 |
+
def __init__(self, config):
|
560 |
+
super().__init__()
|
561 |
+
self.w1 = nn.Linear(
|
562 |
+
config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
|
563 |
+
)
|
564 |
+
self.w2 = nn.Linear(
|
565 |
+
config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
|
566 |
+
)
|
567 |
+
ff_dim_in = config.intermediate_size // 2
|
568 |
+
self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)
|
569 |
+
|
570 |
+
def forward(self, hidden_states):
|
571 |
+
a1 = self.w1(hidden_states)
|
572 |
+
a2 = self.w2(hidden_states)
|
573 |
+
intermediate_parallel = a1 * F.silu(a2)
|
574 |
+
output = self.c_proj(intermediate_parallel)
|
575 |
+
return output
|
576 |
+
|
577 |
+
|
578 |
+
class QWenBlock(nn.Module):
|
579 |
+
def __init__(self, config):
|
580 |
+
super().__init__()
|
581 |
+
hidden_size = config.hidden_size
|
582 |
+
self.bf16 = config.bf16
|
583 |
+
|
584 |
+
self.ln_1 = RMSNorm(
|
585 |
+
hidden_size,
|
586 |
+
eps=config.layer_norm_epsilon,
|
587 |
+
)
|
588 |
+
self.attn = QWenAttention(config)
|
589 |
+
self.ln_2 = RMSNorm(
|
590 |
+
hidden_size,
|
591 |
+
eps=config.layer_norm_epsilon,
|
592 |
+
)
|
593 |
+
|
594 |
+
self.mlp = QWenMLP(config)
|
595 |
+
|
596 |
+
def forward(
|
597 |
+
self,
|
598 |
+
hidden_states: Optional[Tuple[torch.FloatTensor]],
|
599 |
+
rotary_pos_emb_list: Optional[List[List[torch.Tensor]]] = None,
|
600 |
+
layer_past: Optional[Tuple[torch.Tensor]] = None,
|
601 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
602 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
603 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
604 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
605 |
+
use_cache: Optional[bool] = False,
|
606 |
+
output_attentions: Optional[bool] = False,
|
607 |
+
):
|
608 |
+
layernorm_output = self.ln_1(hidden_states)
|
609 |
+
|
610 |
+
attn_outputs = self.attn(
|
611 |
+
layernorm_output,
|
612 |
+
rotary_pos_emb_list,
|
613 |
+
layer_past=layer_past,
|
614 |
+
attention_mask=attention_mask,
|
615 |
+
head_mask=head_mask,
|
616 |
+
use_cache=use_cache,
|
617 |
+
output_attentions=output_attentions,
|
618 |
+
)
|
619 |
+
attn_output = attn_outputs[0]
|
620 |
+
|
621 |
+
outputs = attn_outputs[1:]
|
622 |
+
|
623 |
+
residual = hidden_states
|
624 |
+
layernorm_input = attn_output + residual
|
625 |
+
|
626 |
+
layernorm_output = self.ln_2(layernorm_input)
|
627 |
+
|
628 |
+
residual = layernorm_input
|
629 |
+
mlp_output = self.mlp(layernorm_output)
|
630 |
+
hidden_states = residual + mlp_output
|
631 |
+
|
632 |
+
if use_cache:
|
633 |
+
outputs = (hidden_states,) + outputs
|
634 |
+
else:
|
635 |
+
outputs = (hidden_states,) + outputs[1:]
|
636 |
+
|
637 |
+
return outputs
|
638 |
+
|
639 |
+
|
640 |
+
class QWenPreTrainedModel(PreTrainedModel):
|
641 |
+
config_class = QWenConfig
|
642 |
+
base_model_prefix = "transformer"
|
643 |
+
is_parallelizable = False
|
644 |
+
supports_gradient_checkpointing = True
|
645 |
+
_no_split_modules = ["QWenBlock"]
|
646 |
+
_skip_keys_device_placement = "past_key_values"
|
647 |
+
|
648 |
+
def __init__(self, *inputs, **kwargs):
|
649 |
+
super().__init__(*inputs, **kwargs)
|
650 |
+
|
651 |
+
def _init_weights(self, module):
|
652 |
+
"""Initialize the weights."""
|
653 |
+
if isinstance(module, nn.Linear):
|
654 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
655 |
+
if module.bias is not None:
|
656 |
+
module.bias.data.zero_()
|
657 |
+
elif isinstance(module, nn.Embedding):
|
658 |
+
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
659 |
+
if module.padding_idx is not None:
|
660 |
+
module.weight.data[module.padding_idx].zero_()
|
661 |
+
elif isinstance(module, RMSNorm):
|
662 |
+
module.weight.data.fill_(1.0)
|
663 |
+
|
664 |
+
for name, p in module.named_parameters():
|
665 |
+
if name == "c_proj.weight":
|
666 |
+
p.data.normal_(
|
667 |
+
mean=0.0,
|
668 |
+
std=(
|
669 |
+
self.config.initializer_range
|
670 |
+
/ math.sqrt(2 * self.config.num_hidden_layers)
|
671 |
+
),
|
672 |
+
)
|
673 |
+
|
674 |
+
def _set_gradient_checkpointing(self, module, value=False):
|
675 |
+
if isinstance(module, QWenModel):
|
676 |
+
module.gradient_checkpointing = value
|
677 |
+
|
678 |
+
|
679 |
+
class QWenModel(QWenPreTrainedModel):
|
680 |
+
_keys_to_ignore_on_load_missing = ["attn.masked_bias"]
|
681 |
+
|
682 |
+
def __init__(self, config):
|
683 |
+
super().__init__(config)
|
684 |
+
self.vocab_size = config.vocab_size
|
685 |
+
self.num_hidden_layers = config.num_hidden_layers
|
686 |
+
self.embed_dim = config.hidden_size
|
687 |
+
self.use_cache_quantization = self.config.use_cache_quantization if hasattr(self.config, 'use_cache_quantization') else False
|
688 |
+
|
689 |
+
self.gradient_checkpointing = False
|
690 |
+
self.use_dynamic_ntk = config.use_dynamic_ntk
|
691 |
+
self.seq_length = config.seq_length
|
692 |
+
|
693 |
+
self.wte = nn.Embedding(self.vocab_size, self.embed_dim)
|
694 |
+
|
695 |
+
self.drop = nn.Dropout(config.emb_dropout_prob)
|
696 |
+
|
697 |
+
if config.rotary_pct == 1.0:
|
698 |
+
self.rotary_ndims = None
|
699 |
+
else:
|
700 |
+
assert config.rotary_pct < 1
|
701 |
+
self.rotary_ndims = int(
|
702 |
+
config.kv_channels * config.rotary_pct
|
703 |
+
)
|
704 |
+
dim = (
|
705 |
+
self.rotary_ndims
|
706 |
+
if self.rotary_ndims is not None
|
707 |
+
else config.kv_channels
|
708 |
+
)
|
709 |
+
self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)
|
710 |
+
|
711 |
+
self.use_flash_attn = config.use_flash_attn
|
712 |
+
self.is_fp32 = not (config.bf16 or config.fp16)
|
713 |
+
|
714 |
+
self.h = nn.ModuleList(
|
715 |
+
[
|
716 |
+
QWenBlock(
|
717 |
+
config
|
718 |
+
)
|
719 |
+
for i in range(config.num_hidden_layers)
|
720 |
+
]
|
721 |
+
)
|
722 |
+
self.ln_f = RMSNorm(
|
723 |
+
self.embed_dim,
|
724 |
+
eps=config.layer_norm_epsilon,
|
725 |
+
)
|
726 |
+
|
727 |
+
self.post_init()
|
728 |
+
|
729 |
+
def get_input_embeddings(self):
|
730 |
+
return self.wte
|
731 |
+
|
732 |
+
def set_input_embeddings(self, new_embeddings):
|
733 |
+
self.wte = new_embeddings
|
734 |
+
|
735 |
+
def get_ntk_alpha(self, true_seq_len):
|
736 |
+
context_value = math.log(true_seq_len / self.seq_length, 2) + 1
|
737 |
+
ntk_alpha = 2 ** math.ceil(context_value) - 1
|
738 |
+
ntk_alpha = max(ntk_alpha, 1)
|
739 |
+
return ntk_alpha
|
740 |
+
|
741 |
+
def forward(
|
742 |
+
self,
|
743 |
+
input_ids: Optional[torch.LongTensor] = None,
|
744 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
745 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
746 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
747 |
+
position_ids: Optional[torch.LongTensor] = None,
|
748 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
749 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
750 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
751 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
752 |
+
use_cache: Optional[bool] = None,
|
753 |
+
output_attentions: Optional[bool] = None,
|
754 |
+
output_hidden_states: Optional[bool] = None,
|
755 |
+
return_dict: Optional[bool] = None,
|
756 |
+
):
|
757 |
+
output_attentions = (
|
758 |
+
output_attentions
|
759 |
+
if output_attentions is not None
|
760 |
+
else self.config.output_attentions
|
761 |
+
)
|
762 |
+
output_hidden_states = (
|
763 |
+
output_hidden_states
|
764 |
+
if output_hidden_states is not None
|
765 |
+
else self.config.output_hidden_states
|
766 |
+
)
|
767 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
768 |
+
return_dict = (
|
769 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
770 |
+
)
|
771 |
+
|
772 |
+
if input_ids is not None and inputs_embeds is not None:
|
773 |
+
raise ValueError(
|
774 |
+
"You cannot specify both input_ids and inputs_embeds at the same time"
|
775 |
+
)
|
776 |
+
elif input_ids is not None:
|
777 |
+
input_shape = input_ids.size()
|
778 |
+
input_ids = input_ids.view(-1, input_shape[-1])
|
779 |
+
batch_size = input_ids.shape[0]
|
780 |
+
elif inputs_embeds is not None:
|
781 |
+
input_shape = inputs_embeds.size()[:-1]
|
782 |
+
batch_size = inputs_embeds.shape[0]
|
783 |
+
else:
|
784 |
+
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
785 |
+
|
786 |
+
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
787 |
+
|
788 |
+
if token_type_ids is not None:
|
789 |
+
token_type_ids = token_type_ids.view(-1, input_shape[-1])
|
790 |
+
if position_ids is not None:
|
791 |
+
position_ids = position_ids.view(-1, input_shape[-1])
|
792 |
+
|
793 |
+
if past_key_values is None:
|
794 |
+
past_length = 0
|
795 |
+
past_key_values = tuple([None] * len(self.h))
|
796 |
+
else:
|
797 |
+
if self.use_cache_quantization:
|
798 |
+
past_length = past_key_values[0][0][0].size(2)
|
799 |
+
else:
|
800 |
+
past_length = past_key_values[0][0].size(-2)
|
801 |
+
if position_ids is None:
|
802 |
+
position_ids = torch.arange(
|
803 |
+
past_length,
|
804 |
+
input_shape[-1] + past_length,
|
805 |
+
dtype=torch.long,
|
806 |
+
device=device,
|
807 |
+
)
|
808 |
+
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
|
809 |
+
|
810 |
+
if attention_mask is not None:
|
811 |
+
if batch_size <= 0:
|
812 |
+
raise ValueError("batch_size has to be defined and > 0")
|
813 |
+
attention_mask = attention_mask.view(batch_size, -1)
|
814 |
+
attention_mask = attention_mask[:, None, None, :]
|
815 |
+
attention_mask = attention_mask.to(dtype=self.dtype)
|
816 |
+
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
|
817 |
+
|
818 |
+
encoder_attention_mask = None
|
819 |
+
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
|
820 |
+
|
821 |
+
if inputs_embeds is None:
|
822 |
+
inputs_embeds = self.wte(input_ids)
|
823 |
+
hidden_states = inputs_embeds
|
824 |
+
|
825 |
+
kv_seq_len = hidden_states.size()[1]
|
826 |
+
if past_key_values[0] is not None:
|
827 |
+
# past key values[0][0] shape: bs * seq_len * head_num * dim
|
828 |
+
if self.use_cache_quantization:
|
829 |
+
kv_seq_len += past_key_values[0][0][0].shape[2]
|
830 |
+
else:
|
831 |
+
kv_seq_len += past_key_values[0][0].shape[1]
|
832 |
+
|
833 |
+
if self.training or not self.use_dynamic_ntk:
|
834 |
+
ntk_alpha_list = [1.0]
|
835 |
+
elif kv_seq_len != hidden_states.size()[1]:
|
836 |
+
ntk_alpha_list = self.rotary_emb._ntk_alpha_cached_list
|
837 |
+
else:
|
838 |
+
ntk_alpha_list = []
|
839 |
+
if attention_mask is not None and kv_seq_len > self.seq_length:
|
840 |
+
true_seq_lens = attention_mask.squeeze(1).squeeze(1).eq(0).sum(dim=-1, dtype=torch.int32)
|
841 |
+
for i in range(hidden_states.size()[0]):
|
842 |
+
true_seq_len = true_seq_lens[i].item()
|
843 |
+
ntk_alpha = self.get_ntk_alpha(true_seq_len)
|
844 |
+
ntk_alpha_list.append(ntk_alpha)
|
845 |
+
else:
|
846 |
+
ntk_alpha = self.get_ntk_alpha(kv_seq_len)
|
847 |
+
ntk_alpha_list.append(ntk_alpha)
|
848 |
+
self.rotary_emb._ntk_alpha_cached_list = ntk_alpha_list
|
849 |
+
rotary_pos_emb_list = [
|
850 |
+
self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for ntk_alpha in ntk_alpha_list
|
851 |
+
]
|
852 |
+
|
853 |
+
hidden_states = self.drop(hidden_states)
|
854 |
+
output_shape = input_shape + (hidden_states.size(-1),)
|
855 |
+
|
856 |
+
if self.gradient_checkpointing and self.training:
|
857 |
+
if use_cache:
|
858 |
+
logger.warning_once(
|
859 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
860 |
+
)
|
861 |
+
use_cache = False
|
862 |
+
|
863 |
+
presents = () if use_cache else None
|
864 |
+
all_self_attentions = () if output_attentions else None
|
865 |
+
all_hidden_states = () if output_hidden_states else None
|
866 |
+
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
|
867 |
+
|
868 |
+
if output_hidden_states:
|
869 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
870 |
+
|
871 |
+
if self.gradient_checkpointing and self.training:
|
872 |
+
|
873 |
+
def create_custom_forward(module):
|
874 |
+
def custom_forward(*inputs):
|
875 |
+
# None for past_key_value
|
876 |
+
return module(*inputs, use_cache, output_attentions)
|
877 |
+
|
878 |
+
return custom_forward
|
879 |
+
|
880 |
+
outputs = torch.utils.checkpoint.checkpoint(
|
881 |
+
create_custom_forward(block),
|
882 |
+
hidden_states,
|
883 |
+
rotary_pos_emb_list,
|
884 |
+
None,
|
885 |
+
attention_mask,
|
886 |
+
head_mask[i],
|
887 |
+
encoder_hidden_states,
|
888 |
+
encoder_attention_mask,
|
889 |
+
)
|
890 |
+
else:
|
891 |
+
outputs = block(
|
892 |
+
hidden_states,
|
893 |
+
layer_past=layer_past,
|
894 |
+
rotary_pos_emb_list=rotary_pos_emb_list,
|
895 |
+
attention_mask=attention_mask,
|
896 |
+
head_mask=head_mask[i],
|
897 |
+
encoder_hidden_states=encoder_hidden_states,
|
898 |
+
encoder_attention_mask=encoder_attention_mask,
|
899 |
+
use_cache=use_cache,
|
900 |
+
output_attentions=output_attentions,
|
901 |
+
)
|
902 |
+
|
903 |
+
hidden_states = outputs[0]
|
904 |
+
if use_cache is True:
|
905 |
+
presents = presents + (outputs[1],)
|
906 |
+
|
907 |
+
if output_attentions:
|
908 |
+
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
|
909 |
+
|
910 |
+
hidden_states = self.ln_f(hidden_states)
|
911 |
+
hidden_states = hidden_states.view(output_shape)
|
912 |
+
# Add last hidden state
|
913 |
+
if output_hidden_states:
|
914 |
+
all_hidden_states = all_hidden_states + (hidden_states,)
|
915 |
+
|
916 |
+
if not return_dict:
|
917 |
+
return tuple(
|
918 |
+
v for v in [hidden_states, presents, all_hidden_states] if v is not None
|
919 |
+
)
|
920 |
+
|
921 |
+
return BaseModelOutputWithPast(
|
922 |
+
last_hidden_state=hidden_states,
|
923 |
+
past_key_values=presents,
|
924 |
+
hidden_states=all_hidden_states,
|
925 |
+
attentions=all_self_attentions,
|
926 |
+
)
|
927 |
+
|
928 |
+
|
929 |
+
class QWenLMHeadModel(QWenPreTrainedModel):
|
930 |
+
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.rotary_emb\.inv_freq"]
|
931 |
+
_keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias"]
|
932 |
+
|
933 |
+
def __init__(self, config):
|
934 |
+
super().__init__(config)
|
935 |
+
assert (
|
936 |
+
config.bf16 + config.fp16 + config.fp32 <= 1
|
937 |
+
), "Only one of \"bf16\", \"fp16\", \"fp32\" can be true"
|
938 |
+
|
939 |
+
autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0
|
940 |
+
|
941 |
+
if autoset_precision:
|
942 |
+
if SUPPORT_BF16:
|
943 |
+
logger.warn(
|
944 |
+
"The model is automatically converting to bf16 for faster inference. "
|
945 |
+
"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
|
946 |
+
)
|
947 |
+
config.bf16 = True
|
948 |
+
elif SUPPORT_FP16:
|
949 |
+
logger.warn(
|
950 |
+
"The model is automatically converting to fp16 for faster inference. "
|
951 |
+
"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
|
952 |
+
)
|
953 |
+
config.fp16 = True
|
954 |
+
else:
|
955 |
+
config.fp32 = True
|
956 |
+
|
957 |
+
if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:
|
958 |
+
logger.warn("Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \"AutoModelForCausalLM.from_pretrained\".")
|
959 |
+
if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:
|
960 |
+
logger.warn("Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster")
|
961 |
+
if config.fp32:
|
962 |
+
if SUPPORT_BF16:
|
963 |
+
logger.warn("Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".")
|
964 |
+
elif SUPPORT_FP16:
|
965 |
+
logger.warn("Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".")
|
966 |
+
|
967 |
+
if config.use_flash_attn == "auto":
|
968 |
+
if config.bf16 or config.fp16:
|
969 |
+
logger.warn("Try importing flash-attention for faster inference...")
|
970 |
+
config.use_flash_attn = True
|
971 |
+
else:
|
972 |
+
config.use_flash_attn = False
|
973 |
+
if config.use_flash_attn and config.fp32:
|
974 |
+
logger.warn("Flash attention will be disabled because it does NOT support fp32.")
|
975 |
+
|
976 |
+
if config.use_flash_attn:
|
977 |
+
_import_flash_attn()
|
978 |
+
|
979 |
+
self.transformer = QWenModel(config)
|
980 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
981 |
+
|
982 |
+
if config.bf16:
|
983 |
+
self.transformer.bfloat16()
|
984 |
+
self.lm_head.bfloat16()
|
985 |
+
if config.fp16:
|
986 |
+
self.transformer.half()
|
987 |
+
self.lm_head.half()
|
988 |
+
self.post_init()
|
989 |
+
|
990 |
+
def get_output_embeddings(self):
|
991 |
+
return self.lm_head
|
992 |
+
|
993 |
+
def set_output_embeddings(self, new_embeddings):
|
994 |
+
self.lm_head = new_embeddings
|
995 |
+
|
996 |
+
def prepare_inputs_for_generation(
|
997 |
+
self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
|
998 |
+
):
|
999 |
+
if past_key_values:
|
1000 |
+
input_ids = input_ids[:, -1].unsqueeze(-1)
|
1001 |
+
|
1002 |
+
if input_ids.size(0) == 1:
|
1003 |
+
attention_mask = None
|
1004 |
+
else:
|
1005 |
+
attention_mask = kwargs.get("attention_mask", None)
|
1006 |
+
|
1007 |
+
if inputs_embeds is not None and past_key_values is None:
|
1008 |
+
model_inputs = {"inputs_embeds": inputs_embeds}
|
1009 |
+
else:
|
1010 |
+
model_inputs = {"input_ids": input_ids}
|
1011 |
+
|
1012 |
+
model_inputs.update(
|
1013 |
+
{
|
1014 |
+
"past_key_values": past_key_values,
|
1015 |
+
"use_cache": kwargs.get("use_cache"),
|
1016 |
+
"attention_mask": attention_mask,
|
1017 |
+
}
|
1018 |
+
)
|
1019 |
+
return model_inputs
|
1020 |
+
|
1021 |
+
def forward(
|
1022 |
+
self,
|
1023 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1024 |
+
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
1025 |
+
attention_mask: Optional[torch.FloatTensor] = None,
|
1026 |
+
token_type_ids: Optional[torch.LongTensor] = None,
|
1027 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1028 |
+
head_mask: Optional[torch.FloatTensor] = None,
|
1029 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1030 |
+
encoder_hidden_states: Optional[torch.Tensor] = None,
|
1031 |
+
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
1032 |
+
labels: Optional[torch.LongTensor] = None,
|
1033 |
+
use_cache: Optional[bool] = None,
|
1034 |
+
output_attentions: Optional[bool] = None,
|
1035 |
+
output_hidden_states: Optional[bool] = None,
|
1036 |
+
return_dict: Optional[bool] = None,
|
1037 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
1038 |
+
|
1039 |
+
return_dict = (
|
1040 |
+
return_dict if return_dict is not None else self.config.use_return_dict
|
1041 |
+
)
|
1042 |
+
|
1043 |
+
transformer_outputs = self.transformer(
|
1044 |
+
input_ids,
|
1045 |
+
past_key_values=past_key_values,
|
1046 |
+
attention_mask=attention_mask,
|
1047 |
+
token_type_ids=token_type_ids,
|
1048 |
+
position_ids=position_ids,
|
1049 |
+
head_mask=head_mask,
|
1050 |
+
inputs_embeds=inputs_embeds,
|
1051 |
+
encoder_hidden_states=encoder_hidden_states,
|
1052 |
+
encoder_attention_mask=encoder_attention_mask,
|
1053 |
+
use_cache=use_cache,
|
1054 |
+
output_attentions=output_attentions,
|
1055 |
+
output_hidden_states=output_hidden_states,
|
1056 |
+
return_dict=return_dict,
|
1057 |
+
)
|
1058 |
+
hidden_states = transformer_outputs[0]
|
1059 |
+
|
1060 |
+
lm_logits = self.lm_head(hidden_states)
|
1061 |
+
|
1062 |
+
loss = None
|
1063 |
+
if labels is not None:
|
1064 |
+
labels = labels.to(lm_logits.device)
|
1065 |
+
shift_logits = lm_logits[..., :-1, :].contiguous()
|
1066 |
+
shift_labels = labels[..., 1:].contiguous()
|
1067 |
+
loss_fct = CrossEntropyLoss()
|
1068 |
+
loss = loss_fct(
|
1069 |
+
shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
|
1070 |
+
)
|
1071 |
+
|
1072 |
+
if not return_dict:
|
1073 |
+
output = (lm_logits,) + transformer_outputs[1:]
|
1074 |
+
return ((loss,) + output) if loss is not None else output
|
1075 |
+
|
1076 |
+
return CausalLMOutputWithPast(
|
1077 |
+
loss=loss,
|
1078 |
+
logits=lm_logits,
|
1079 |
+
past_key_values=transformer_outputs.past_key_values,
|
1080 |
+
hidden_states=transformer_outputs.hidden_states,
|
1081 |
+
attentions=transformer_outputs.attentions,
|
1082 |
+
)
|
1083 |
+
|
1084 |
+
@staticmethod
|
1085 |
+
def _reorder_cache(
|
1086 |
+
past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
|
1087 |
+
) -> Tuple[Tuple[torch.Tensor]]:
|
1088 |
+
|
1089 |
+
return tuple(
|
1090 |
+
tuple(
|
1091 |
+
past_state.index_select(0, beam_idx.to(past_state.device))
|
1092 |
+
for past_state in layer_past
|
1093 |
+
)
|
1094 |
+
for layer_past in past_key_values
|
1095 |
+
)
|
1096 |
+
|
1097 |
+
def chat(
|
1098 |
+
self,
|
1099 |
+
tokenizer: PreTrainedTokenizer,
|
1100 |
+
query: str,
|
1101 |
+
history: Optional[HistoryType],
|
1102 |
+
system: str = "You are a helpful assistant.",
|
1103 |
+
stream: Optional[bool] = _SENTINEL,
|
1104 |
+
stop_words_ids: Optional[List[List[int]]] = None,
|
1105 |
+
generation_config: Optional[GenerationConfig] = None,
|
1106 |
+
**kwargs,
|
1107 |
+
) -> Tuple[str, HistoryType]:
|
1108 |
+
generation_config = generation_config if generation_config is not None else self.generation_config
|
1109 |
+
|
1110 |
+
assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT
|
1111 |
+
assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
|
1112 |
+
if history is None:
|
1113 |
+
history = []
|
1114 |
+
else:
|
1115 |
+
# make a copy of the user's input such that is is left untouched
|
1116 |
+
history = copy.deepcopy(history)
|
1117 |
+
|
1118 |
+
if stop_words_ids is None:
|
1119 |
+
stop_words_ids = []
|
1120 |
+
|
1121 |
+
max_window_size = kwargs.get('max_window_size', None)
|
1122 |
+
if max_window_size is None:
|
1123 |
+
max_window_size = generation_config.max_window_size
|
1124 |
+
raw_text, context_tokens = make_context(
|
1125 |
+
tokenizer,
|
1126 |
+
query,
|
1127 |
+
history=history,
|
1128 |
+
system=system,
|
1129 |
+
max_window_size=max_window_size,
|
1130 |
+
chat_format=generation_config.chat_format,
|
1131 |
+
)
|
1132 |
+
|
1133 |
+
stop_words_ids.extend(get_stop_words_ids(
|
1134 |
+
generation_config.chat_format, tokenizer
|
1135 |
+
))
|
1136 |
+
input_ids = torch.tensor([context_tokens]).to(self.device)
|
1137 |
+
outputs = self.generate(
|
1138 |
+
input_ids,
|
1139 |
+
stop_words_ids=stop_words_ids,
|
1140 |
+
return_dict_in_generate=False,
|
1141 |
+
generation_config=generation_config,
|
1142 |
+
**kwargs,
|
1143 |
+
)
|
1144 |
+
|
1145 |
+
response = decode_tokens(
|
1146 |
+
outputs[0],
|
1147 |
+
tokenizer,
|
1148 |
+
raw_text_len=len(raw_text),
|
1149 |
+
context_length=len(context_tokens),
|
1150 |
+
chat_format=generation_config.chat_format,
|
1151 |
+
verbose=False,
|
1152 |
+
errors='replace'
|
1153 |
+
)
|
1154 |
+
|
1155 |
+
# as history is a copy of the user inputs,
|
1156 |
+
# we can always return the new turn to the user.
|
1157 |
+
# separating input history and output history also enables the user
|
1158 |
+
# to implement more complex history management
|
1159 |
+
history.append((query, response))
|
1160 |
+
|
1161 |
+
return response, history
|
1162 |
+
|
1163 |
+
def chat_stream(
|
1164 |
+
self,
|
1165 |
+
tokenizer: PreTrainedTokenizer,
|
1166 |
+
query: str,
|
1167 |
+
history: Optional[HistoryType],
|
1168 |
+
system: str = "You are a helpful assistant.",
|
1169 |
+
stop_words_ids: Optional[List[List[int]]] = None,
|
1170 |
+
logits_processor: Optional[LogitsProcessorList] = None,
|
1171 |
+
generation_config: Optional[GenerationConfig] = None,
|
1172 |
+
**kwargs,
|
1173 |
+
) -> Generator[str, Any, None]:
|
1174 |
+
generation_config = generation_config if generation_config is not None else self.generation_config
|
1175 |
+
assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
|
1176 |
+
if history is None:
|
1177 |
+
history = []
|
1178 |
+
if stop_words_ids is None:
|
1179 |
+
stop_words_ids = []
|
1180 |
+
|
1181 |
+
max_window_size = kwargs.get('max_window_size', None)
|
1182 |
+
if max_window_size is None:
|
1183 |
+
max_window_size = generation_config.max_window_size
|
1184 |
+
raw_text, context_tokens = make_context(
|
1185 |
+
tokenizer,
|
1186 |
+
query,
|
1187 |
+
history=history,
|
1188 |
+
system=system,
|
1189 |
+
max_window_size=max_window_size,
|
1190 |
+
chat_format=generation_config.chat_format,
|
1191 |
+
)
|
1192 |
+
|
1193 |
+
stop_words_ids.extend(get_stop_words_ids(
|
1194 |
+
generation_config.chat_format, tokenizer
|
1195 |
+
))
|
1196 |
+
if stop_words_ids is not None:
|
1197 |
+
stop_words_logits_processor = StopWordsLogitsProcessor(
|
1198 |
+
stop_words_ids=stop_words_ids,
|
1199 |
+
eos_token_id=generation_config.eos_token_id,
|
1200 |
+
)
|
1201 |
+
if logits_processor is None:
|
1202 |
+
logits_processor = LogitsProcessorList([stop_words_logits_processor])
|
1203 |
+
else:
|
1204 |
+
logits_processor.append(stop_words_logits_processor)
|
1205 |
+
input_ids = torch.tensor([context_tokens]).to(self.device)
|
1206 |
+
|
1207 |
+
from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig
|
1208 |
+
self.__class__.generate_stream = NewGenerationMixin.generate
|
1209 |
+
self.__class__.sample_stream = NewGenerationMixin.sample_stream
|
1210 |
+
stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)
|
1211 |
+
|
1212 |
+
def stream_generator():
|
1213 |
+
outputs = []
|
1214 |
+
for token in self.generate_stream(
|
1215 |
+
input_ids,
|
1216 |
+
return_dict_in_generate=False,
|
1217 |
+
generation_config=stream_config,
|
1218 |
+
logits_processor=logits_processor,
|
1219 |
+
seed=-1,
|
1220 |
+
**kwargs):
|
1221 |
+
outputs.append(token.item())
|
1222 |
+
yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore')
|
1223 |
+
|
1224 |
+
return stream_generator()
|
1225 |
+
|
1226 |
+
def generate(
|
1227 |
+
self,
|
1228 |
+
inputs: Optional[torch.Tensor] = None,
|
1229 |
+
generation_config: Optional[GenerationConfig] = None,
|
1230 |
+
logits_processor: Optional[LogitsProcessorList] = None,
|
1231 |
+
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
1232 |
+
prefix_allowed_tokens_fn: Optional[
|
1233 |
+
Callable[[int, torch.Tensor], List[int]]
|
1234 |
+
] = None,
|
1235 |
+
synced_gpus: Optional[bool] = None,
|
1236 |
+
assistant_model: Optional["PreTrainedModel"] = None,
|
1237 |
+
streamer: Optional["BaseStreamer"] = None,
|
1238 |
+
**kwargs,
|
1239 |
+
) -> Union[GenerateOutput, torch.LongTensor]:
|
1240 |
+
generation_config = generation_config if generation_config is not None else self.generation_config
|
1241 |
+
|
1242 |
+
# Process stop_words_ids.
|
1243 |
+
stop_words_ids = kwargs.pop("stop_words_ids", None)
|
1244 |
+
if stop_words_ids is None and generation_config is not None:
|
1245 |
+
stop_words_ids = getattr(generation_config, "stop_words_ids", None)
|
1246 |
+
if stop_words_ids is None:
|
1247 |
+
stop_words_ids = getattr(generation_config, "stop_words_ids", None)
|
1248 |
+
|
1249 |
+
if stop_words_ids is not None:
|
1250 |
+
stop_words_logits_processor = StopWordsLogitsProcessor(
|
1251 |
+
stop_words_ids=stop_words_ids,
|
1252 |
+
eos_token_id=generation_config.eos_token_id,
|
1253 |
+
)
|
1254 |
+
if logits_processor is None:
|
1255 |
+
logits_processor = LogitsProcessorList([stop_words_logits_processor])
|
1256 |
+
else:
|
1257 |
+
logits_processor.append(stop_words_logits_processor)
|
1258 |
+
|
1259 |
+
return super().generate(
|
1260 |
+
inputs,
|
1261 |
+
generation_config=generation_config,
|
1262 |
+
logits_processor=logits_processor,
|
1263 |
+
stopping_criteria=stopping_criteria,
|
1264 |
+
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
|
1265 |
+
synced_gpus=synced_gpus,
|
1266 |
+
assistant_model=assistant_model,
|
1267 |
+
streamer=streamer,
|
1268 |
+
**kwargs,
|
1269 |
+
)
|
1270 |
+
|
1271 |
+
|
1272 |
+
class RotaryEmbedding(torch.nn.Module):
|
1273 |
+
def __init__(self, dim, base=10000):
|
1274 |
+
super().__init__()
|
1275 |
+
self.dim = dim
|
1276 |
+
self.base = base
|
1277 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
|
1278 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
1279 |
+
if importlib.util.find_spec("einops") is None:
|
1280 |
+
raise RuntimeError("einops is required for Rotary Embedding")
|
1281 |
+
|
1282 |
+
self._rotary_pos_emb_cache = None
|
1283 |
+
self._seq_len_cached = 0
|
1284 |
+
self._ntk_alpha_cached = 1.0
|
1285 |
+
self._ntk_alpha_cached_list = [1.0]
|
1286 |
+
|
1287 |
+
def update_rotary_pos_emb_cache(self, seqlen, ntk_alpha=1.0):
|
1288 |
+
if seqlen > self._seq_len_cached or ntk_alpha != self._ntk_alpha_cached:
|
1289 |
+
base = self.base * ntk_alpha ** (self.dim / (self.dim - 2))
|
1290 |
+
self.inv_freq = 1.0 / (
|
1291 |
+
base
|
1292 |
+
** (
|
1293 |
+
torch.arange(0, self.dim, 2, device=self.inv_freq.device).float()
|
1294 |
+
/ self.dim
|
1295 |
+
)
|
1296 |
+
)
|
1297 |
+
self._seq_len_cached = max(2 * seqlen, 16)
|
1298 |
+
self._ntk_alpha_cached = ntk_alpha
|
1299 |
+
seq = torch.arange(self._seq_len_cached, device=self.inv_freq.device)
|
1300 |
+
freqs = torch.outer(seq.type_as(self.inv_freq), self.inv_freq)
|
1301 |
+
|
1302 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
1303 |
+
from einops import rearrange
|
1304 |
+
|
1305 |
+
emb = rearrange(emb, "n d -> 1 n 1 d")
|
1306 |
+
|
1307 |
+
cos, sin = emb.cos(), emb.sin()
|
1308 |
+
self._rotary_pos_emb_cache = [cos, sin]
|
1309 |
+
|
1310 |
+
def forward(self, max_seq_len, ntk_alpha=1.0):
|
1311 |
+
self.update_rotary_pos_emb_cache(max_seq_len, ntk_alpha)
|
1312 |
+
cos, sin = self._rotary_pos_emb_cache
|
1313 |
+
return [cos[:, :max_seq_len], sin[:, :max_seq_len]]
|
1314 |
+
|
1315 |
+
|
1316 |
+
def _rotate_half(x):
|
1317 |
+
from einops import rearrange
|
1318 |
+
|
1319 |
+
x = rearrange(x, "... (j d) -> ... j d", j=2)
|
1320 |
+
x1, x2 = x.unbind(dim=-2)
|
1321 |
+
return torch.cat((-x2, x1), dim=-1)
|
1322 |
+
|
1323 |
+
|
1324 |
+
def apply_rotary_pos_emb(t, freqs):
|
1325 |
+
""" Apply rotary embedding to the first rotary_dim of the iput
|
1326 |
+
|
1327 |
+
Arguments:
|
1328 |
+
t (tensor(batch_size, seq_len, n_head, head_dim)):
|
1329 |
+
the input embedding/hidden states
|
1330 |
+
freqs (list[tensor(1, seq_len, 1, rotary_dim), tensor(1, seq_len, 1, rotary_dim)]):
|
1331 |
+
the cached cos/sin position embeddings
|
1332 |
+
"""
|
1333 |
+
rot_dim = freqs[0].shape[-1]
|
1334 |
+
cos, sin = freqs
|
1335 |
+
t_float = t.float()
|
1336 |
+
if apply_rotary_emb_func is not None and t.is_cuda:
|
1337 |
+
# apply_rotary_emb in flash_attn requires cos/sin to be of
|
1338 |
+
# shape (seqlen, rotary_dim / 2) and apply rotary embedding
|
1339 |
+
# to the first rotary_dim of the input
|
1340 |
+
cos = cos.squeeze(0).squeeze(1)[:, : rot_dim // 2]
|
1341 |
+
sin = sin.squeeze(0).squeeze(1)[:, : rot_dim // 2]
|
1342 |
+
return apply_rotary_emb_func(t_float, cos, sin).type_as(t)
|
1343 |
+
else:
|
1344 |
+
t_rot, t_pass = t_float[..., :rot_dim], t_float[..., rot_dim:]
|
1345 |
+
t_rot = (t_rot * cos) + (_rotate_half(t_rot) * sin)
|
1346 |
+
return torch.cat((t_rot, t_pass), dim=-1).type_as(t)
|
1347 |
+
|
1348 |
+
|
1349 |
+
class RMSNorm(torch.nn.Module):
|
1350 |
+
def __init__(self, dim: int, eps: float = 1e-6):
|
1351 |
+
super().__init__()
|
1352 |
+
self.eps = eps
|
1353 |
+
self.weight = nn.Parameter(torch.ones(dim))
|
1354 |
+
|
1355 |
+
def _norm(self, x):
|
1356 |
+
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
1357 |
+
|
1358 |
+
def forward(self, x):
|
1359 |
+
if rms_norm is not None and x.is_cuda:
|
1360 |
+
return rms_norm(x, self.weight, self.eps)
|
1361 |
+
else:
|
1362 |
+
output = self._norm(x.float()).type_as(x)
|
1363 |
+
return output * self.weight
|
qwen_generation_utils.py
ADDED
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Alibaba Cloud.
|
2 |
+
#
|
3 |
+
# This source code is licensed under the license found in the
|
4 |
+
# LICENSE file in the root directory of this source tree.
|
5 |
+
|
6 |
+
"""Generation support."""
|
7 |
+
|
8 |
+
from typing import Tuple, List, Union, Iterable
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
import torch
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from transformers import PreTrainedTokenizer
|
14 |
+
from transformers import logging
|
15 |
+
from transformers.generation import LogitsProcessor
|
16 |
+
|
17 |
+
logger = logging.get_logger(__name__)
|
18 |
+
|
19 |
+
# Types.
|
20 |
+
HistoryType = List[Tuple[str, str]]
|
21 |
+
TokensType = List[int]
|
22 |
+
BatchTokensType = List[List[int]]
|
23 |
+
|
24 |
+
|
25 |
+
def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:
|
26 |
+
for tokens in batch:
|
27 |
+
context_length = len(tokens)
|
28 |
+
if context_length < seq_length:
|
29 |
+
tokens.extend([pad_id] * (seq_length - context_length))
|
30 |
+
return batch
|
31 |
+
|
32 |
+
|
33 |
+
def get_ltor_masks_and_position_ids(
|
34 |
+
data,
|
35 |
+
eod_token,
|
36 |
+
reset_position_ids,
|
37 |
+
reset_attention_mask,
|
38 |
+
eod_mask_loss,
|
39 |
+
):
|
40 |
+
"""Build masks and position id for left to right model."""
|
41 |
+
|
42 |
+
# Extract batch size and sequence length.
|
43 |
+
micro_batch_size, seq_length = data.size()
|
44 |
+
|
45 |
+
# Attention mask (lower triangular).
|
46 |
+
if reset_attention_mask:
|
47 |
+
att_mask_batch = micro_batch_size
|
48 |
+
else:
|
49 |
+
att_mask_batch = 1
|
50 |
+
attention_mask = torch.tril(
|
51 |
+
torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)
|
52 |
+
).view(att_mask_batch, 1, seq_length, seq_length)
|
53 |
+
|
54 |
+
# Loss mask.
|
55 |
+
loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
|
56 |
+
if eod_mask_loss:
|
57 |
+
loss_mask[data == eod_token] = 0.0
|
58 |
+
|
59 |
+
# Position ids.
|
60 |
+
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
|
61 |
+
position_ids = position_ids.unsqueeze(0).expand_as(data)
|
62 |
+
# We need to clone as the ids will be modifed based on batch index.
|
63 |
+
if reset_position_ids:
|
64 |
+
position_ids = position_ids.clone()
|
65 |
+
|
66 |
+
if reset_position_ids or reset_attention_mask:
|
67 |
+
# Loop through the batches:
|
68 |
+
for b in range(micro_batch_size):
|
69 |
+
|
70 |
+
# Find indecies where EOD token is.
|
71 |
+
eod_index = position_ids[b, data[b] == eod_token]
|
72 |
+
# Detach indecies from positions if going to modify positions.
|
73 |
+
if reset_position_ids:
|
74 |
+
eod_index = eod_index.clone()
|
75 |
+
|
76 |
+
# Loop through EOD indecies:
|
77 |
+
prev_index = 0
|
78 |
+
for j in range(eod_index.size()[0]):
|
79 |
+
i = eod_index[j]
|
80 |
+
# Mask attention loss.
|
81 |
+
if reset_attention_mask:
|
82 |
+
attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
|
83 |
+
# Reset positions.
|
84 |
+
if reset_position_ids:
|
85 |
+
position_ids[b, (i + 1) :] -= i + 1 - prev_index
|
86 |
+
prev_index = i + 1
|
87 |
+
|
88 |
+
# Convert attention mask to binary:
|
89 |
+
attention_mask = attention_mask < 0.5
|
90 |
+
|
91 |
+
return attention_mask, loss_mask, position_ids
|
92 |
+
|
93 |
+
|
94 |
+
def get_batch(context_tokens: torch.LongTensor, eod_id: int):
|
95 |
+
"""Generate batch from context tokens."""
|
96 |
+
# Move to GPU.
|
97 |
+
tokens = context_tokens.contiguous().to(context_tokens.device)
|
98 |
+
# Get the attention mask and postition ids.
|
99 |
+
attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
|
100 |
+
tokens,
|
101 |
+
eod_id,
|
102 |
+
reset_position_ids=False,
|
103 |
+
reset_attention_mask=False,
|
104 |
+
eod_mask_loss=False,
|
105 |
+
)
|
106 |
+
return tokens, attention_mask, position_ids
|
107 |
+
|
108 |
+
|
109 |
+
def get_stop_words_ids(chat_format, tokenizer):
|
110 |
+
if chat_format == "raw":
|
111 |
+
stop_words_ids = [tokenizer.encode("Human:"), [tokenizer.eod_id]]
|
112 |
+
elif chat_format == "chatml":
|
113 |
+
stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]]
|
114 |
+
else:
|
115 |
+
raise NotImplementedError(f"Unknown chat format {chat_format!r}")
|
116 |
+
return stop_words_ids
|
117 |
+
|
118 |
+
|
119 |
+
def make_context(
|
120 |
+
tokenizer: PreTrainedTokenizer,
|
121 |
+
query: str,
|
122 |
+
history: List[Tuple[str, str]] = None,
|
123 |
+
system: str = "",
|
124 |
+
max_window_size: int = 6144,
|
125 |
+
chat_format: str = "chatml",
|
126 |
+
):
|
127 |
+
if history is None:
|
128 |
+
history = []
|
129 |
+
|
130 |
+
if chat_format == "chatml":
|
131 |
+
im_start, im_end = "<|im_start|>", "<|im_end|>"
|
132 |
+
im_start_tokens = [tokenizer.im_start_id]
|
133 |
+
im_end_tokens = [tokenizer.im_end_id]
|
134 |
+
nl_tokens = tokenizer.encode("\n")
|
135 |
+
|
136 |
+
def _tokenize_str(role, content):
|
137 |
+
return f"{role}\n{content}", tokenizer.encode(
|
138 |
+
role, allowed_special=set()
|
139 |
+
) + nl_tokens + tokenizer.encode(content, allowed_special=set())
|
140 |
+
|
141 |
+
system_text, system_tokens_part = _tokenize_str("system", system)
|
142 |
+
system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
|
143 |
+
|
144 |
+
raw_text = ""
|
145 |
+
context_tokens = []
|
146 |
+
|
147 |
+
for turn_query, turn_response in reversed(history):
|
148 |
+
query_text, query_tokens_part = _tokenize_str("user", turn_query)
|
149 |
+
query_tokens = im_start_tokens + query_tokens_part + im_end_tokens
|
150 |
+
response_text, response_tokens_part = _tokenize_str(
|
151 |
+
"assistant", turn_response
|
152 |
+
)
|
153 |
+
response_tokens = im_start_tokens + response_tokens_part + im_end_tokens
|
154 |
+
|
155 |
+
next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens
|
156 |
+
prev_chat = (
|
157 |
+
f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}"
|
158 |
+
)
|
159 |
+
|
160 |
+
current_context_size = (
|
161 |
+
len(system_tokens) + len(next_context_tokens) + len(context_tokens)
|
162 |
+
)
|
163 |
+
if current_context_size < max_window_size:
|
164 |
+
context_tokens = next_context_tokens + context_tokens
|
165 |
+
raw_text = prev_chat + raw_text
|
166 |
+
else:
|
167 |
+
break
|
168 |
+
|
169 |
+
context_tokens = system_tokens + context_tokens
|
170 |
+
raw_text = f"{im_start}{system_text}{im_end}" + raw_text
|
171 |
+
context_tokens += (
|
172 |
+
nl_tokens
|
173 |
+
+ im_start_tokens
|
174 |
+
+ _tokenize_str("user", query)[1]
|
175 |
+
+ im_end_tokens
|
176 |
+
+ nl_tokens
|
177 |
+
+ im_start_tokens
|
178 |
+
+ tokenizer.encode("assistant")
|
179 |
+
+ nl_tokens
|
180 |
+
)
|
181 |
+
raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
|
182 |
+
|
183 |
+
elif chat_format == "raw":
|
184 |
+
raw_text = query
|
185 |
+
context_tokens = tokenizer.encode(raw_text)
|
186 |
+
else:
|
187 |
+
raise NotImplementedError(f"Unknown chat format {chat_format!r}")
|
188 |
+
|
189 |
+
return raw_text, context_tokens
|
190 |
+
|
191 |
+
|
192 |
+
def _decode_default(
|
193 |
+
tokens: List[int],
|
194 |
+
*,
|
195 |
+
stop_words: List[str],
|
196 |
+
eod_words: List[str],
|
197 |
+
tokenizer: PreTrainedTokenizer,
|
198 |
+
raw_text_len: int,
|
199 |
+
verbose: bool = False,
|
200 |
+
return_end_reason: bool = False,
|
201 |
+
errors: str='replace',
|
202 |
+
):
|
203 |
+
trim_decode_tokens = tokenizer.decode(tokens, errors=errors)[raw_text_len:]
|
204 |
+
if verbose:
|
205 |
+
print("\nRaw Generate: ", trim_decode_tokens)
|
206 |
+
|
207 |
+
end_reason = f"Gen length {len(tokens)}"
|
208 |
+
for stop_word in stop_words:
|
209 |
+
trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
|
210 |
+
for eod_word in eod_words:
|
211 |
+
if eod_word in trim_decode_tokens:
|
212 |
+
end_reason = f"Gen {eod_word!r}"
|
213 |
+
trim_decode_tokens = trim_decode_tokens.split(eod_word)[0]
|
214 |
+
trim_decode_tokens = trim_decode_tokens.strip()
|
215 |
+
if verbose:
|
216 |
+
print("\nEnd Reason:", end_reason)
|
217 |
+
print("\nGenerate: ", trim_decode_tokens)
|
218 |
+
|
219 |
+
if return_end_reason:
|
220 |
+
return trim_decode_tokens, end_reason
|
221 |
+
else:
|
222 |
+
return trim_decode_tokens
|
223 |
+
|
224 |
+
|
225 |
+
def _decode_chatml(
|
226 |
+
tokens: List[int],
|
227 |
+
*,
|
228 |
+
stop_words: List[str],
|
229 |
+
eod_token_ids: List[int],
|
230 |
+
tokenizer: PreTrainedTokenizer,
|
231 |
+
raw_text_len: int,
|
232 |
+
context_length: int,
|
233 |
+
verbose: bool = False,
|
234 |
+
return_end_reason: bool = False,
|
235 |
+
errors: str='replace'
|
236 |
+
):
|
237 |
+
end_reason = f"Gen length {len(tokens)}"
|
238 |
+
eod_token_idx = context_length
|
239 |
+
for eod_token_idx in range(context_length, len(tokens)):
|
240 |
+
if tokens[eod_token_idx] in eod_token_ids:
|
241 |
+
end_reason = f"Gen {tokenizer.decode([tokens[eod_token_idx]])!r}"
|
242 |
+
break
|
243 |
+
|
244 |
+
trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx], errors=errors)[raw_text_len:]
|
245 |
+
if verbose:
|
246 |
+
print("\nRaw Generate w/o EOD:", tokenizer.decode(tokens, errors=errors)[raw_text_len:])
|
247 |
+
print("\nRaw Generate:", trim_decode_tokens)
|
248 |
+
print("\nEnd Reason:", end_reason)
|
249 |
+
for stop_word in stop_words:
|
250 |
+
trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
|
251 |
+
trim_decode_tokens = trim_decode_tokens.strip()
|
252 |
+
if verbose:
|
253 |
+
print("\nGenerate:", trim_decode_tokens)
|
254 |
+
|
255 |
+
if return_end_reason:
|
256 |
+
return trim_decode_tokens, end_reason
|
257 |
+
else:
|
258 |
+
return trim_decode_tokens
|
259 |
+
|
260 |
+
|
261 |
+
def decode_tokens(
|
262 |
+
tokens: Union[torch.LongTensor, TokensType],
|
263 |
+
tokenizer: PreTrainedTokenizer,
|
264 |
+
raw_text_len: int,
|
265 |
+
context_length: int,
|
266 |
+
chat_format: str,
|
267 |
+
verbose: bool = False,
|
268 |
+
return_end_reason: bool = False,
|
269 |
+
errors: str="replace",
|
270 |
+
) -> str:
|
271 |
+
if torch.is_tensor(tokens):
|
272 |
+
tokens = tokens.cpu().numpy().tolist()
|
273 |
+
|
274 |
+
if chat_format == "chatml":
|
275 |
+
return _decode_chatml(
|
276 |
+
tokens,
|
277 |
+
stop_words=[],
|
278 |
+
eod_token_ids=[tokenizer.im_start_id, tokenizer.im_end_id],
|
279 |
+
tokenizer=tokenizer,
|
280 |
+
raw_text_len=raw_text_len,
|
281 |
+
context_length=context_length,
|
282 |
+
verbose=verbose,
|
283 |
+
return_end_reason=return_end_reason,
|
284 |
+
errors=errors,
|
285 |
+
)
|
286 |
+
elif chat_format == "raw":
|
287 |
+
return _decode_default(
|
288 |
+
tokens,
|
289 |
+
stop_words=["<|endoftext|>"],
|
290 |
+
eod_words=["<|endoftext|>"],
|
291 |
+
tokenizer=tokenizer,
|
292 |
+
raw_text_len=raw_text_len,
|
293 |
+
verbose=verbose,
|
294 |
+
return_end_reason=return_end_reason,
|
295 |
+
errors=errors,
|
296 |
+
)
|
297 |
+
else:
|
298 |
+
raise NotImplementedError(f"Unknown chat format {chat_format!r}")
|
299 |
+
|
300 |
+
|
301 |
+
class StopWordsLogitsProcessor(LogitsProcessor):
|
302 |
+
"""
|
303 |
+
:class:`transformers.LogitsProcessor` that enforces that when specified sequences appear, stop geration.
|
304 |
+
|
305 |
+
Args:
|
306 |
+
stop_words_ids (:obj:`List[List[int]]`):
|
307 |
+
List of list of token ids of stop ids. In order to get the tokens of the words
|
308 |
+
that should not appear in the generated text, use :obj:`tokenizer(bad_word,
|
309 |
+
add_prefix_space=True).input_ids`.
|
310 |
+
eos_token_id (:obj:`int`):
|
311 |
+
The id of the `end-of-sequence` token.
|
312 |
+
"""
|
313 |
+
|
314 |
+
def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):
|
315 |
+
|
316 |
+
if not isinstance(stop_words_ids, List) or len(stop_words_ids) == 0:
|
317 |
+
raise ValueError(
|
318 |
+
f"`stop_words_ids` has to be a non-emtpy list, but is {stop_words_ids}."
|
319 |
+
)
|
320 |
+
if any(not isinstance(bad_word_ids, list) for bad_word_ids in stop_words_ids):
|
321 |
+
raise ValueError(
|
322 |
+
f"`stop_words_ids` has to be a list of lists, but is {stop_words_ids}."
|
323 |
+
)
|
324 |
+
if any(
|
325 |
+
any(
|
326 |
+
(not isinstance(token_id, (int, np.integer)) or token_id < 0)
|
327 |
+
for token_id in stop_word_ids
|
328 |
+
)
|
329 |
+
for stop_word_ids in stop_words_ids
|
330 |
+
):
|
331 |
+
raise ValueError(
|
332 |
+
f"Each list in `stop_words_ids` has to be a list of positive integers, but is {stop_words_ids}."
|
333 |
+
)
|
334 |
+
|
335 |
+
self.stop_words_ids = list(
|
336 |
+
filter(
|
337 |
+
lambda bad_token_seq: bad_token_seq != [eos_token_id], stop_words_ids
|
338 |
+
)
|
339 |
+
)
|
340 |
+
self.eos_token_id = eos_token_id
|
341 |
+
for stop_token_seq in self.stop_words_ids:
|
342 |
+
assert (
|
343 |
+
len(stop_token_seq) > 0
|
344 |
+
), "Stop words token sequences {} cannot have an empty list".format(
|
345 |
+
stop_words_ids
|
346 |
+
)
|
347 |
+
|
348 |
+
def __call__(
|
349 |
+
self, input_ids: torch.LongTensor, scores: torch.FloatTensor
|
350 |
+
) -> torch.FloatTensor:
|
351 |
+
stopped_samples = self._calc_stopped_samples(input_ids)
|
352 |
+
for i, should_stop in enumerate(stopped_samples):
|
353 |
+
if should_stop:
|
354 |
+
scores[i, self.eos_token_id] = float(2**15)
|
355 |
+
return scores
|
356 |
+
|
357 |
+
def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:
|
358 |
+
if len(tokens) == 0:
|
359 |
+
# if bad word tokens is just one token always ban it
|
360 |
+
return True
|
361 |
+
elif len(tokens) > len(prev_tokens):
|
362 |
+
# if bad word tokens are longer then prev input_ids they can't be equal
|
363 |
+
return False
|
364 |
+
elif prev_tokens[-len(tokens) :].tolist() == tokens:
|
365 |
+
# if tokens match
|
366 |
+
return True
|
367 |
+
else:
|
368 |
+
return False
|
369 |
+
|
370 |
+
def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:
|
371 |
+
stopped_samples = []
|
372 |
+
for prev_input_ids_slice in prev_input_ids:
|
373 |
+
match = False
|
374 |
+
for stop_token_seq in self.stop_words_ids:
|
375 |
+
if self._tokens_match(prev_input_ids_slice, stop_token_seq):
|
376 |
+
# if tokens do not match continue
|
377 |
+
match = True
|
378 |
+
break
|
379 |
+
stopped_samples.append(match)
|
380 |
+
|
381 |
+
return stopped_samples
|
382 |
+
|
383 |
+
|
384 |
+
def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
|
385 |
+
"""This function has been mostly taken from huggingface conversational
|
386 |
+
ai code at
|
387 |
+
https://medium.com/huggingface/how-to-build-a-state-of-the-art-
|
388 |
+
conversational-ai-with-transfer-learning-2d818ac26313"""
|
389 |
+
|
390 |
+
if top_k > 0:
|
391 |
+
# Remove all tokens with a probability less than the
|
392 |
+
# last token of the top-k
|
393 |
+
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
|
394 |
+
logits[indices_to_remove] = filter_value
|
395 |
+
|
396 |
+
if top_p > 0.0:
|
397 |
+
# Cconvert to 1D
|
398 |
+
sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
|
399 |
+
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
|
400 |
+
|
401 |
+
# Remove tokens with cumulative probability above the threshold
|
402 |
+
sorted_indices_to_remove = cumulative_probs > top_p
|
403 |
+
# Shift the indices to the right to keep also the first token
|
404 |
+
# above the threshold
|
405 |
+
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
|
406 |
+
sorted_indices_to_remove[..., 0] = 0
|
407 |
+
for i in range(sorted_indices.size(0)):
|
408 |
+
indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
|
409 |
+
logits[i][indices_to_remove] = filter_value
|
410 |
+
|
411 |
+
return logits
|
412 |
+
|
413 |
+
|
414 |
+
def switch(val1, val2, boolean):
|
415 |
+
boolean = boolean.type_as(val1)
|
416 |
+
return (1 - boolean) * val1 + boolean * val2
|