Upload Calf
Browse files- LMConfig.py +62 -0
- README.md +199 -0
- config.json +84 -0
- generation_config.json +4 -0
- model.py +482 -0
- pytorch_model.bin +3 -0
LMConfig.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PretrainedConfig
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
|
5 |
+
class LMConfig(PretrainedConfig):
|
6 |
+
model_type = "minimind"
|
7 |
+
|
8 |
+
def __init__(
|
9 |
+
self,
|
10 |
+
dim: int = 768,
|
11 |
+
n_layers: int = 16,
|
12 |
+
n_heads: int = 16,
|
13 |
+
n_kv_heads: int = 8,
|
14 |
+
vocab_size: int = 6400,
|
15 |
+
hidden_dim: int = None,
|
16 |
+
multiple_of: int = 64,
|
17 |
+
norm_eps: float = 1e-5,
|
18 |
+
max_seq_len: int = 200,
|
19 |
+
dropout: float = 0.0,
|
20 |
+
flash_attn: bool = True,
|
21 |
+
image_special_token: str = '<' * 25 + '>' * 25,
|
22 |
+
image_ids=[30] * 25 + [32] * 25,
|
23 |
+
####################################################
|
24 |
+
# Here are the specific configurations of MOE
|
25 |
+
# When use_moe is false, the following is invalid
|
26 |
+
####################################################
|
27 |
+
use_moe: bool = False,
|
28 |
+
num_experts_per_tok=2,
|
29 |
+
n_routed_experts=4,
|
30 |
+
n_shared_experts: bool = True,
|
31 |
+
scoring_func='softmax',
|
32 |
+
aux_loss_alpha=0.01,
|
33 |
+
seq_aux=True,
|
34 |
+
norm_topk_prob=True,
|
35 |
+
**kwargs,
|
36 |
+
):
|
37 |
+
self.dim = dim
|
38 |
+
self.n_layers = n_layers
|
39 |
+
self.n_heads = n_heads
|
40 |
+
self.n_kv_heads = n_kv_heads
|
41 |
+
self.vocab_size = vocab_size
|
42 |
+
self.hidden_dim = hidden_dim
|
43 |
+
self.multiple_of = multiple_of
|
44 |
+
self.norm_eps = norm_eps
|
45 |
+
self.max_seq_len = max_seq_len
|
46 |
+
self.dropout = dropout
|
47 |
+
self.flash_attn = flash_attn
|
48 |
+
self.image_special_token = image_special_token
|
49 |
+
self.image_ids = image_ids
|
50 |
+
####################################################
|
51 |
+
# Here are the specific configurations of MOE
|
52 |
+
# When use_moe is false, the following is invalid
|
53 |
+
####################################################
|
54 |
+
self.use_moe = use_moe
|
55 |
+
self.num_experts_per_tok = num_experts_per_tok # 每个token选择的专家数量
|
56 |
+
self.n_routed_experts = n_routed_experts # 总的专家数量
|
57 |
+
self.n_shared_experts = n_shared_experts # 共享专家
|
58 |
+
self.scoring_func = scoring_func # 评分函数,默认为'softmax'
|
59 |
+
self.aux_loss_alpha = aux_loss_alpha # 辅助损失的alpha参数
|
60 |
+
self.seq_aux = seq_aux # 是否在序列级别上计算辅助损失
|
61 |
+
self.norm_topk_prob = norm_topk_prob # 是否标准化top-k概率
|
62 |
+
super().__init__(**kwargs)
|
README.md
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
tags: []
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
config.json
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Calf"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "LMConfig.LMConfig",
|
7 |
+
"AutoModel": "model.Calf"
|
8 |
+
},
|
9 |
+
"aux_loss_alpha": 0.01,
|
10 |
+
"dim": 768,
|
11 |
+
"dropout": 0.0,
|
12 |
+
"flash_attn": true,
|
13 |
+
"hidden_dim": null,
|
14 |
+
"image_ids": [
|
15 |
+
30,
|
16 |
+
30,
|
17 |
+
30,
|
18 |
+
30,
|
19 |
+
30,
|
20 |
+
30,
|
21 |
+
30,
|
22 |
+
30,
|
23 |
+
30,
|
24 |
+
30,
|
25 |
+
30,
|
26 |
+
30,
|
27 |
+
30,
|
28 |
+
30,
|
29 |
+
30,
|
30 |
+
30,
|
31 |
+
30,
|
32 |
+
30,
|
33 |
+
30,
|
34 |
+
30,
|
35 |
+
30,
|
36 |
+
30,
|
37 |
+
30,
|
38 |
+
30,
|
39 |
+
30,
|
40 |
+
32,
|
41 |
+
32,
|
42 |
+
32,
|
43 |
+
32,
|
44 |
+
32,
|
45 |
+
32,
|
46 |
+
32,
|
47 |
+
32,
|
48 |
+
32,
|
49 |
+
32,
|
50 |
+
32,
|
51 |
+
32,
|
52 |
+
32,
|
53 |
+
32,
|
54 |
+
32,
|
55 |
+
32,
|
56 |
+
32,
|
57 |
+
32,
|
58 |
+
32,
|
59 |
+
32,
|
60 |
+
32,
|
61 |
+
32,
|
62 |
+
32,
|
63 |
+
32,
|
64 |
+
32
|
65 |
+
],
|
66 |
+
"image_special_token": "<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>",
|
67 |
+
"max_seq_len": 200,
|
68 |
+
"model_type": "minimind",
|
69 |
+
"multiple_of": 64,
|
70 |
+
"n_heads": 16,
|
71 |
+
"n_kv_heads": 8,
|
72 |
+
"n_layers": 16,
|
73 |
+
"n_routed_experts": 4,
|
74 |
+
"n_shared_experts": true,
|
75 |
+
"norm_eps": 1e-05,
|
76 |
+
"norm_topk_prob": true,
|
77 |
+
"num_experts_per_tok": 2,
|
78 |
+
"scoring_func": "softmax",
|
79 |
+
"seq_aux": true,
|
80 |
+
"torch_dtype": "float32",
|
81 |
+
"transformers_version": "4.44.2",
|
82 |
+
"use_moe": false,
|
83 |
+
"vocab_size": 6400
|
84 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"transformers_version": "4.44.2"
|
4 |
+
}
|
model.py
ADDED
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import struct
|
3 |
+
import inspect
|
4 |
+
import time
|
5 |
+
|
6 |
+
from .LMConfig import LMConfig
|
7 |
+
from typing import Any, Optional, Tuple
|
8 |
+
import numpy as np
|
9 |
+
import torch
|
10 |
+
import torch.nn.functional as F
|
11 |
+
from torch import nn
|
12 |
+
from transformers import PreTrainedModel
|
13 |
+
from transformers.modeling_outputs import CausalLMOutputWithPast
|
14 |
+
|
15 |
+
|
16 |
+
class RMSNorm(torch.nn.Module):
|
17 |
+
def __init__(self, dim: int, eps: float):
|
18 |
+
super().__init__()
|
19 |
+
self.eps = eps
|
20 |
+
self.weight = nn.Parameter(torch.ones(dim))
|
21 |
+
|
22 |
+
def _norm(self, x):
|
23 |
+
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
|
24 |
+
|
25 |
+
def forward(self, x):
|
26 |
+
output = self._norm(x.float()).type_as(x)
|
27 |
+
return output * self.weight
|
28 |
+
|
29 |
+
|
30 |
+
def precompute_pos_cis(dim: int, end: int, theta: float = 10000.0):
|
31 |
+
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
|
32 |
+
t = torch.arange(end, device=freqs.device) # type: ignore
|
33 |
+
freqs = torch.outer(t, freqs).float() # type: ignore
|
34 |
+
pos_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
|
35 |
+
return pos_cis
|
36 |
+
|
37 |
+
|
38 |
+
def apply_rotary_emb(xq, xk, pos_cis):
|
39 |
+
def unite_shape(pos_cis, x):
|
40 |
+
ndim = x.ndim
|
41 |
+
assert 0 <= 1 < ndim
|
42 |
+
assert pos_cis.shape == (x.shape[1], x.shape[-1])
|
43 |
+
shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
|
44 |
+
return pos_cis.view(*shape)
|
45 |
+
|
46 |
+
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
|
47 |
+
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
|
48 |
+
pos_cis = unite_shape(pos_cis, xq_)
|
49 |
+
xq_out = torch.view_as_real(xq_ * pos_cis).flatten(3)
|
50 |
+
xk_out = torch.view_as_real(xk_ * pos_cis).flatten(3)
|
51 |
+
return xq_out.type_as(xq), xk_out.type_as(xk)
|
52 |
+
|
53 |
+
|
54 |
+
def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
|
55 |
+
"""torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
|
56 |
+
bs, slen, n_kv_heads, head_dim = x.shape
|
57 |
+
if n_rep == 1:
|
58 |
+
return x
|
59 |
+
return (
|
60 |
+
x[:, :, :, None, :]
|
61 |
+
.expand(bs, slen, n_kv_heads, n_rep, head_dim)
|
62 |
+
.reshape(bs, slen, n_kv_heads * n_rep, head_dim)
|
63 |
+
)
|
64 |
+
|
65 |
+
|
66 |
+
class Attention(nn.Module):
|
67 |
+
def __init__(self, args: LMConfig):
|
68 |
+
super().__init__()
|
69 |
+
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
|
70 |
+
assert args.n_heads % self.n_kv_heads == 0
|
71 |
+
self.n_local_heads = args.n_heads
|
72 |
+
self.n_local_kv_heads = self.n_kv_heads
|
73 |
+
self.n_rep = self.n_local_heads // self.n_local_kv_heads
|
74 |
+
self.head_dim = args.dim // args.n_heads
|
75 |
+
self.wq = nn.Linear(args.dim, args.n_heads * self.head_dim, bias=False)
|
76 |
+
self.wk = nn.Linear(args.dim, self.n_kv_heads * self.head_dim, bias=False)
|
77 |
+
self.wv = nn.Linear(args.dim, self.n_kv_heads * self.head_dim, bias=False)
|
78 |
+
self.wo = nn.Linear(args.n_heads * self.head_dim, args.dim, bias=False)
|
79 |
+
self.k_cache, self.v_cache = None, None
|
80 |
+
self.attn_dropout = nn.Dropout(args.dropout)
|
81 |
+
self.resid_dropout = nn.Dropout(args.dropout)
|
82 |
+
self.dropout = args.dropout
|
83 |
+
self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') and args.flash_attn
|
84 |
+
|
85 |
+
# print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
|
86 |
+
mask = torch.full((1, 1, args.max_seq_len, args.max_seq_len), float("-inf"))
|
87 |
+
mask = torch.triu(mask, diagonal=1)
|
88 |
+
self.register_buffer("mask", mask, persistent=False)
|
89 |
+
|
90 |
+
def forward(self, x: torch.Tensor, pos_cis: torch.Tensor, kv_cache=False):
|
91 |
+
bsz, seqlen, _ = x.shape
|
92 |
+
|
93 |
+
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
|
94 |
+
|
95 |
+
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
|
96 |
+
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
|
97 |
+
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
|
98 |
+
|
99 |
+
xq, xk = apply_rotary_emb(xq, xk, pos_cis)
|
100 |
+
|
101 |
+
# 更高效的kv_cache实现
|
102 |
+
if kv_cache and self.eval():
|
103 |
+
if seqlen == 1 and all(cache is not None for cache in (self.k_cache, self.v_cache)):
|
104 |
+
xk = torch.cat((self.k_cache, xk), dim=1)
|
105 |
+
xv = torch.cat((self.v_cache, xv), dim=1)
|
106 |
+
self.k_cache, self.v_cache = xk, xv
|
107 |
+
|
108 |
+
xk = repeat_kv(xk, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
|
109 |
+
xv = repeat_kv(xv, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
|
110 |
+
|
111 |
+
xq = xq.transpose(1, 2)
|
112 |
+
xk = xk.transpose(1, 2)
|
113 |
+
xv = xv.transpose(1, 2)
|
114 |
+
|
115 |
+
if self.flash and seqlen != 1:
|
116 |
+
output = torch.nn.functional.scaled_dot_product_attention(xq, xk, xv, attn_mask=None,
|
117 |
+
dropout_p=self.dropout if self.training else 0.0,
|
118 |
+
is_causal=True)
|
119 |
+
else:
|
120 |
+
scores = torch.matmul(xq, xk.transpose(2, 3)) / math.sqrt(self.head_dim)
|
121 |
+
scores = scores + self.mask[:, :, :seqlen, :seqlen] # (bs, n_local_heads, seqlen, cache_len + seqlen)
|
122 |
+
scores = F.softmax(scores.float(), dim=-1).type_as(xq)
|
123 |
+
scores = self.attn_dropout(scores)
|
124 |
+
output = torch.matmul(scores, xv) # (bs, n_local_heads, seqlen, head_dim)
|
125 |
+
|
126 |
+
output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
|
127 |
+
|
128 |
+
output = self.wo(output)
|
129 |
+
output = self.resid_dropout(output)
|
130 |
+
return output
|
131 |
+
|
132 |
+
|
133 |
+
class FeedForward(nn.Module):
|
134 |
+
def __init__(self, dim: int, hidden_dim: int, multiple_of: int, dropout: float):
|
135 |
+
super().__init__()
|
136 |
+
if hidden_dim is None:
|
137 |
+
hidden_dim = 4 * dim
|
138 |
+
hidden_dim = int(2 * hidden_dim / 3)
|
139 |
+
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
|
140 |
+
self.w1 = nn.Linear(dim, hidden_dim, bias=False)
|
141 |
+
self.w2 = nn.Linear(hidden_dim, dim, bias=False)
|
142 |
+
self.w3 = nn.Linear(dim, hidden_dim, bias=False)
|
143 |
+
self.dropout = nn.Dropout(dropout)
|
144 |
+
|
145 |
+
def forward(self, x):
|
146 |
+
return self.dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))
|
147 |
+
|
148 |
+
|
149 |
+
class MoEGate(nn.Module):
|
150 |
+
def __init__(self, config: LMConfig):
|
151 |
+
super().__init__()
|
152 |
+
self.config = config
|
153 |
+
self.top_k = config.num_experts_per_tok
|
154 |
+
self.n_routed_experts = config.n_routed_experts
|
155 |
+
|
156 |
+
self.scoring_func = config.scoring_func
|
157 |
+
self.alpha = config.aux_loss_alpha
|
158 |
+
self.seq_aux = config.seq_aux
|
159 |
+
|
160 |
+
self.norm_topk_prob = config.norm_topk_prob
|
161 |
+
self.gating_dim = config.dim
|
162 |
+
self.weight = nn.Parameter(torch.empty((self.n_routed_experts, self.gating_dim)))
|
163 |
+
self.reset_parameters()
|
164 |
+
|
165 |
+
def reset_parameters(self) -> None:
|
166 |
+
import torch.nn.init as init
|
167 |
+
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
|
168 |
+
|
169 |
+
def forward(self, hidden_states):
|
170 |
+
bsz, seq_len, h = hidden_states.shape
|
171 |
+
|
172 |
+
hidden_states = hidden_states.view(-1, h)
|
173 |
+
logits = F.linear(hidden_states, self.weight, None)
|
174 |
+
if self.scoring_func == 'softmax':
|
175 |
+
scores = logits.softmax(dim=-1)
|
176 |
+
else:
|
177 |
+
raise NotImplementedError(f'insupportable scoring function for MoE gating: {self.scoring_func}')
|
178 |
+
|
179 |
+
topk_weight, topk_idx = torch.topk(scores, k=self.top_k, dim=-1, sorted=False)
|
180 |
+
|
181 |
+
if self.top_k > 1 and self.norm_topk_prob:
|
182 |
+
denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
|
183 |
+
topk_weight = topk_weight / denominator
|
184 |
+
|
185 |
+
if self.training and self.alpha > 0.0:
|
186 |
+
scores_for_aux = scores
|
187 |
+
aux_topk = self.top_k
|
188 |
+
topk_idx_for_aux_loss = topk_idx.view(bsz, -1)
|
189 |
+
if self.seq_aux:
|
190 |
+
scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1)
|
191 |
+
ce = torch.zeros(bsz, self.n_routed_experts, device=hidden_states.device)
|
192 |
+
ce.scatter_add_(1, topk_idx_for_aux_loss,
|
193 |
+
torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device)).div_(
|
194 |
+
seq_len * aux_topk / self.n_routed_experts)
|
195 |
+
aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(dim=1).mean() * self.alpha
|
196 |
+
else:
|
197 |
+
mask_ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts)
|
198 |
+
ce = mask_ce.float().mean(0)
|
199 |
+
Pi = scores_for_aux.mean(0)
|
200 |
+
fi = ce * self.n_routed_experts
|
201 |
+
aux_loss = (Pi * fi).sum() * self.alpha
|
202 |
+
else:
|
203 |
+
aux_loss = None
|
204 |
+
return topk_idx, topk_weight, aux_loss
|
205 |
+
|
206 |
+
|
207 |
+
class MOEFeedForward(nn.Module):
|
208 |
+
def __init__(self, config: LMConfig):
|
209 |
+
super().__init__()
|
210 |
+
self.config = config
|
211 |
+
self.experts = nn.ModuleList([
|
212 |
+
FeedForward(
|
213 |
+
dim=config.dim,
|
214 |
+
hidden_dim=config.hidden_dim,
|
215 |
+
multiple_of=config.multiple_of,
|
216 |
+
dropout=config.dropout,
|
217 |
+
)
|
218 |
+
for _ in range(config.n_routed_experts)
|
219 |
+
])
|
220 |
+
|
221 |
+
self.gate = MoEGate(config)
|
222 |
+
if config.n_shared_experts is not None:
|
223 |
+
self.shared_experts = FeedForward(
|
224 |
+
dim=config.dim,
|
225 |
+
hidden_dim=config.hidden_dim,
|
226 |
+
multiple_of=config.multiple_of,
|
227 |
+
dropout=config.dropout,
|
228 |
+
)
|
229 |
+
|
230 |
+
def forward(self, x):
|
231 |
+
identity = x
|
232 |
+
orig_shape = x.shape
|
233 |
+
bsz, seq_len, _ = x.shape
|
234 |
+
|
235 |
+
# 使用门控机制选择专家
|
236 |
+
topk_idx, topk_weight, aux_loss = self.gate(x)
|
237 |
+
|
238 |
+
x = x.view(-1, x.shape[-1])
|
239 |
+
flat_topk_idx = topk_idx.view(-1)
|
240 |
+
|
241 |
+
if self.training:
|
242 |
+
# 训练模式下,重复输入数据
|
243 |
+
x = x.repeat_interleave(self.config.num_experts_per_tok, dim=0)
|
244 |
+
y = torch.empty_like(x, dtype=torch.float16)
|
245 |
+
for i, expert in enumerate(self.experts):
|
246 |
+
y[flat_topk_idx == i] = expert(x[flat_topk_idx == i])
|
247 |
+
y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
|
248 |
+
y = y.view(*orig_shape)
|
249 |
+
else:
|
250 |
+
# 推理模式下,只选择最优专家
|
251 |
+
y = self.moe_infer(x, flat_topk_idx, topk_weight.view(-1, 1)).view(*orig_shape)
|
252 |
+
|
253 |
+
if self.config.n_shared_experts is not None:
|
254 |
+
y = y + self.shared_experts(identity)
|
255 |
+
|
256 |
+
return y
|
257 |
+
|
258 |
+
@torch.no_grad()
|
259 |
+
def moe_infer(self, x, flat_expert_indices, flat_expert_weights):
|
260 |
+
expert_cache = torch.zeros_like(x)
|
261 |
+
idxs = flat_expert_indices.argsort()
|
262 |
+
tokens_per_expert = flat_expert_indices.bincount().cpu().numpy().cumsum(0)
|
263 |
+
token_idxs = idxs // self.config.num_experts_per_tok
|
264 |
+
# 例如当tokens_per_expert=[6, 15, 20, 26, 33, 38, 46, 52]
|
265 |
+
# 当token_idxs=[3, 7, 19, 21, 24, 25, 4, 5, 6, 10, 11, 12...]
|
266 |
+
# 意味着当token_idxs[:6] -> [3, 7, 19, 21, 24, 25, 4]位置的token都由专家0处理,token_idxs[6:15]位置的token都由专家1处理......
|
267 |
+
for i, end_idx in enumerate(tokens_per_expert):
|
268 |
+
start_idx = 0 if i == 0 else tokens_per_expert[i - 1]
|
269 |
+
if start_idx == end_idx:
|
270 |
+
continue
|
271 |
+
expert = self.experts[i]
|
272 |
+
exp_token_idx = token_idxs[start_idx:end_idx]
|
273 |
+
expert_tokens = x[exp_token_idx]
|
274 |
+
expert_out = expert(expert_tokens)
|
275 |
+
expert_out.mul_(flat_expert_weights[idxs[start_idx:end_idx]])
|
276 |
+
# 使用 scatter_add_ 进行 sum 操作
|
277 |
+
expert_cache.scatter_add_(0, exp_token_idx.view(-1, 1).repeat(1, x.shape[-1]), expert_out)
|
278 |
+
|
279 |
+
return expert_cache
|
280 |
+
|
281 |
+
|
282 |
+
class TransformerBlock(nn.Module):
|
283 |
+
def __init__(self, layer_id: int, args: LMConfig):
|
284 |
+
super().__init__()
|
285 |
+
self.n_heads = args.n_heads
|
286 |
+
self.dim = args.dim
|
287 |
+
self.head_dim = args.dim // args.n_heads
|
288 |
+
self.attention = Attention(args)
|
289 |
+
|
290 |
+
self.layer_id = layer_id
|
291 |
+
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
|
292 |
+
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
|
293 |
+
|
294 |
+
if args.use_moe:
|
295 |
+
self.feed_forward = MOEFeedForward(args)
|
296 |
+
else:
|
297 |
+
self.feed_forward = FeedForward(
|
298 |
+
dim=args.dim,
|
299 |
+
hidden_dim=args.hidden_dim,
|
300 |
+
multiple_of=args.multiple_of,
|
301 |
+
dropout=args.dropout,
|
302 |
+
)
|
303 |
+
|
304 |
+
def forward(self, x, pos_cis, kv_cache=False):
|
305 |
+
h = x + self.attention(self.attention_norm(x), pos_cis, kv_cache)
|
306 |
+
out = h + self.feed_forward(self.ffn_norm(h))
|
307 |
+
return out
|
308 |
+
|
309 |
+
|
310 |
+
class VisionProj(nn.Module):
|
311 |
+
def __init__(self, vision_out_dim=768, lm_dim=512, image_ids=[1, 2, 3, 4]):
|
312 |
+
super().__init__()
|
313 |
+
self.vision_out_dim = vision_out_dim
|
314 |
+
self.lm_dim = lm_dim
|
315 |
+
self.image_ids = image_ids
|
316 |
+
self.vision_proj = nn.Sequential(
|
317 |
+
nn.Linear(self.vision_out_dim, self.lm_dim),
|
318 |
+
)
|
319 |
+
|
320 |
+
def forward(self, image_encoders):
|
321 |
+
vision_proj = self.vision_proj(image_encoders)
|
322 |
+
return vision_proj
|
323 |
+
|
324 |
+
|
325 |
+
class Calf(PreTrainedModel):
|
326 |
+
config_class = LMConfig
|
327 |
+
last_loss: Optional[torch.Tensor]
|
328 |
+
|
329 |
+
def __init__(self, params: LMConfig = None):
|
330 |
+
super().__init__(params)
|
331 |
+
if not params:
|
332 |
+
params = LMConfig()
|
333 |
+
self.params = params
|
334 |
+
self.vocab_size = params.vocab_size
|
335 |
+
self.n_layers = params.n_layers
|
336 |
+
# image的特殊占位符,对应每张图切分成M个token,和get_img_process中的数量对应
|
337 |
+
self.image_ids = params.image_ids
|
338 |
+
|
339 |
+
self.tok_embeddings = nn.Embedding(params.vocab_size, params.dim)
|
340 |
+
self.dropout = nn.Dropout(params.dropout)
|
341 |
+
self.layers = torch.nn.ModuleList()
|
342 |
+
for layer_id in range(self.n_layers):
|
343 |
+
self.layers.append(TransformerBlock(layer_id, params))
|
344 |
+
self.norm = RMSNorm(params.dim, eps=params.norm_eps)
|
345 |
+
self.output = nn.Linear(params.dim, params.vocab_size, bias=False)
|
346 |
+
self.tok_embeddings.weight = self.output.weight
|
347 |
+
pos_cis = precompute_pos_cis(self.params.dim // self.params.n_heads, self.params.max_seq_len)
|
348 |
+
self.register_buffer("pos_cis", pos_cis, persistent=False)
|
349 |
+
|
350 |
+
self.apply(self._init_weights)
|
351 |
+
|
352 |
+
for pn, p in self.named_parameters():
|
353 |
+
if pn.endswith('w3.weight') or pn.endswith('wo.weight'):
|
354 |
+
torch.nn.init.normal_(p, mean=0.0, std=0.02 / math.sqrt(2 * params.n_layers))
|
355 |
+
|
356 |
+
self.last_loss = None
|
357 |
+
self.OUT = CausalLMOutputWithPast()
|
358 |
+
self._no_split_modules = [name for name, _ in self.named_modules()]
|
359 |
+
|
360 |
+
self.vision_proj = VisionProj(768, params.dim, self.image_ids)
|
361 |
+
|
362 |
+
def _init_weights(self, module):
|
363 |
+
if isinstance(module, nn.Linear):
|
364 |
+
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
365 |
+
if module.bias is not None:
|
366 |
+
torch.nn.init.zeros_(module.bias)
|
367 |
+
elif isinstance(module, nn.Embedding):
|
368 |
+
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
369 |
+
|
370 |
+
# VLM
|
371 |
+
def count_vision_proj(self, tokens, h, image_encoders=None, seqlen=200):
|
372 |
+
# 查找token中<image>片段的索引,为了替换做准备
|
373 |
+
def find_indices(tokens, image_ids):
|
374 |
+
image_ids_tensor = torch.tensor(image_ids).to(tokens.device)
|
375 |
+
return [
|
376 |
+
[i, i + len(image_ids) - 1]
|
377 |
+
for batch_idx in range(tokens.size(0))
|
378 |
+
for i in range(tokens.size(1) - len(image_ids) + 1)
|
379 |
+
if torch.equal(tokens[batch_idx, i:i + len(image_ids)], image_ids_tensor)
|
380 |
+
] or None
|
381 |
+
|
382 |
+
image_indices = find_indices(tokens, self.image_ids)
|
383 |
+
|
384 |
+
# 如果此时有图像编码
|
385 |
+
if image_encoders is not None:
|
386 |
+
vision_proj = self.vision_proj(image_encoders)
|
387 |
+
if image_indices is not None:
|
388 |
+
# 创建一个新的张量来存储拼接后的结果
|
389 |
+
new_h = []
|
390 |
+
for i in range(h.size(0)):
|
391 |
+
before = h[i, :image_indices[i][0], :]
|
392 |
+
after = h[i, image_indices[i][1] + 1:, :]
|
393 |
+
# 拼接 before, vision_proj, after
|
394 |
+
new_h_i = torch.cat((before, vision_proj[i], after), dim=0)[:seqlen]
|
395 |
+
new_h.append(new_h_i)
|
396 |
+
# 将所有拼接后的结果堆叠起来
|
397 |
+
new_h = torch.stack(new_h, dim=0)
|
398 |
+
return new_h
|
399 |
+
|
400 |
+
return h
|
401 |
+
|
402 |
+
def forward(self, tokens: Optional[torch.Tensor] = None, targets: Optional[torch.Tensor] = None,
|
403 |
+
kv_cache=False, image_encoders=None, **keyargs):
|
404 |
+
current_idx = 0
|
405 |
+
if 'input_ids' in keyargs:
|
406 |
+
tokens = keyargs['input_ids']
|
407 |
+
if 'attention_mask' in keyargs:
|
408 |
+
targets = keyargs['attention_mask']
|
409 |
+
if 'current_idx' in keyargs:
|
410 |
+
current_idx = int(keyargs['current_idx'])
|
411 |
+
|
412 |
+
_bsz, seqlen = tokens.shape
|
413 |
+
# language proj token
|
414 |
+
h = self.tok_embeddings(tokens)
|
415 |
+
h = self.dropout(h)
|
416 |
+
# vision proj token
|
417 |
+
h = self.count_vision_proj(tokens=tokens, h=h, image_encoders=image_encoders, seqlen=seqlen)
|
418 |
+
|
419 |
+
pos_cis = self.pos_cis[current_idx:current_idx + seqlen]
|
420 |
+
for idx, layer in enumerate(self.layers):
|
421 |
+
h = layer(h, pos_cis, kv_cache)
|
422 |
+
|
423 |
+
h = self.norm(h)
|
424 |
+
|
425 |
+
if targets is not None:
|
426 |
+
logits = self.output(h)
|
427 |
+
self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=0,
|
428 |
+
reduction='none')
|
429 |
+
else:
|
430 |
+
logits = self.output(h[:, [-1], :])
|
431 |
+
self.last_loss = None
|
432 |
+
|
433 |
+
self.OUT.__setitem__('logits', logits)
|
434 |
+
self.OUT.__setitem__('last_loss', self.last_loss)
|
435 |
+
return self.OUT
|
436 |
+
|
437 |
+
@torch.inference_mode()
|
438 |
+
def generate(self, idx, eos, max_new_tokens, temperature=0.7, top_k=8, stream=True, rp=1., kv_cache=True,
|
439 |
+
image_encoders=None):
|
440 |
+
# rp: repetition_penalty
|
441 |
+
index = idx.shape[1]
|
442 |
+
init_inference = True
|
443 |
+
while idx.shape[1] < max_new_tokens - 1:
|
444 |
+
if init_inference or not kv_cache:
|
445 |
+
inference_res, init_inference = self(idx, kv_cache=kv_cache, image_encoders=image_encoders), False
|
446 |
+
else:
|
447 |
+
inference_res = self(idx[:, -1:], kv_cache=kv_cache, current_idx=idx.shape[1] - 1)
|
448 |
+
|
449 |
+
logits = inference_res.logits
|
450 |
+
logits = logits[:, -1, :]
|
451 |
+
|
452 |
+
for token in set(idx.tolist()[0]):
|
453 |
+
logits[:, token] /= rp
|
454 |
+
|
455 |
+
if temperature == 0.0:
|
456 |
+
_, idx_next = torch.topk(logits, k=1, dim=-1)
|
457 |
+
else:
|
458 |
+
logits = logits / temperature
|
459 |
+
if top_k is not None:
|
460 |
+
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
|
461 |
+
logits[logits < v[:, [-1]]] = -float('Inf')
|
462 |
+
|
463 |
+
probs = F.softmax(logits, dim=-1)
|
464 |
+
idx_next = torch.multinomial(probs, num_samples=1, generator=None)
|
465 |
+
|
466 |
+
if idx_next == eos:
|
467 |
+
break
|
468 |
+
|
469 |
+
idx = torch.cat((idx, idx_next), dim=1)
|
470 |
+
if stream:
|
471 |
+
yield idx[:, index:]
|
472 |
+
|
473 |
+
if not stream:
|
474 |
+
yield idx[:, index:]
|
475 |
+
|
476 |
+
@torch.inference_mode()
|
477 |
+
def eval_answer(self, idx):
|
478 |
+
idx_cond = idx if idx.size(1) <= self.params.max_seq_len else idx[:, -self.params.max_seq_len:]
|
479 |
+
inference_res = self(idx_cond)
|
480 |
+
logits = inference_res.logits
|
481 |
+
logits = logits[:, -1, :]
|
482 |
+
return logits
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8ea9a26eb58e36f226abb6ef7b251e4bbfcc3633edfc8a7e1623b3f190e22979
|
3 |
+
size 437407386
|