Spaces:
Sleeping
Sleeping
Alan Liu
commited on
Commit
•
79bea76
1
Parent(s):
5607124
fix len(modules)=0 case
Browse files- calc_util.py +63 -55
calc_util.py
CHANGED
@@ -65,10 +65,11 @@ def word_embedding_operation(model_config, inference_config):
|
|
65 |
#\end{equation}
|
66 |
if model_config['module_classes']:
|
67 |
modules = get_module_tensors_matched(lambda x: 'embed' in x and 'pos' not in x, model_config['module_classes'])
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
|
|
72 |
|
73 |
A = [inference_config['batchsize'], inference_config['input_seq_length'], model_config['vocab_size']]
|
74 |
B = [model_config['vocab_size'], model_config['hidden_size']]
|
@@ -79,7 +80,8 @@ def word_embedding_operation(model_config, inference_config):
|
|
79 |
def positional_embedding_operation(model_config, inference_config):
|
80 |
if model_config['module_classes']:
|
81 |
modules = get_module_tensors_matched(lambda x: 'embed' in x and 'pos' in x, model_config['module_classes'])
|
82 |
-
|
|
|
83 |
|
84 |
return multiplication_in_int64([inference_config['batchsize'], inference_config['input_seq_length'], model_config['hidden_size']])
|
85 |
|
@@ -87,15 +89,16 @@ def positional_embedding_operation(model_config, inference_config):
|
|
87 |
def attention_K_operation(model_config, inference_config, seq_length):
|
88 |
if model_config['module_classes']:
|
89 |
modules = get_module_tensors_matched(lambda x: 'att' in x and 'k' in x , model_config['module_classes'])
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
99 |
|
100 |
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
101 |
B = [model_config['hidden_size'], model_config['hidden_size_per_head']]
|
@@ -104,15 +107,16 @@ def attention_K_operation(model_config, inference_config, seq_length):
|
|
104 |
def attention_Q_operation(model_config, inference_config, seq_length):
|
105 |
if model_config['module_classes']:
|
106 |
modules = get_module_tensors_matched(lambda x: 'att' in x and 'q' in x , model_config['module_classes'])
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
|
|
116 |
|
117 |
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
118 |
B = [model_config['hidden_size'], model_config['hidden_size_per_head']]
|
@@ -121,15 +125,16 @@ def attention_Q_operation(model_config, inference_config, seq_length):
|
|
121 |
def attention_V_operation(model_config, inference_config, seq_length):
|
122 |
if model_config['module_classes']:
|
123 |
modules = get_module_tensors_matched(lambda x: 'att' in x and 'v' in x , model_config['module_classes'])
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
|
|
133 |
|
134 |
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
135 |
B = [model_config['hidden_size'], model_config['hidden_size_per_head']]
|
@@ -155,15 +160,16 @@ def attention_multV_operation(model_config, inference_config, seq_length_Q, seq_
|
|
155 |
def attention_out_operation(model_config, inference_config, seq_length):
|
156 |
if model_config['module_classes']:
|
157 |
modules = get_module_tensors_matched(lambda x: 'att' in x and 'k' in x , model_config['module_classes'])
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
|
|
167 |
|
168 |
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
169 |
B = [model_config['hidden_size'], model_config['hidden_size']]
|
@@ -174,11 +180,12 @@ def layernorm_operation(model_config, inference_config, seq_length):
|
|
174 |
# 5 is a modeled value
|
175 |
if model_config['module_classes']:
|
176 |
modules = get_module_tensors_matched(lambda x: 'norm' in x, model_config['module_classes'])
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
|
|
182 |
layernorm_operation = (5*inference_config['batchsize']*seq_length*model_config['hidden_size'])
|
183 |
return model_config['num_hidden_layers'] * model_config['layernorm_operation'] * layernorm_operation
|
184 |
|
@@ -186,15 +193,16 @@ def layernorm_operation(model_config, inference_config, seq_length):
|
|
186 |
def mlp_operation(model_config, inference_config, seq_length):
|
187 |
if model_config['module_classes']:
|
188 |
modules = get_module_tensors_matched(lambda x: 'fc' in x or 'mlp' in x, model_config['module_classes'])
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
|
|
198 |
|
199 |
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
200 |
B = [model_config['hidden_size'], model_config['intermediate_size']]
|
|
|
65 |
#\end{equation}
|
66 |
if model_config['module_classes']:
|
67 |
modules = get_module_tensors_matched(lambda x: 'embed' in x and 'pos' not in x, model_config['module_classes'])
|
68 |
+
if len(modules) > 0:
|
69 |
+
A = [inference_config['batchsize'], inference_config['input_seq_length'], modules[0][0]]
|
70 |
+
B = modules[0]
|
71 |
+
op_count = matrix_operation(A, B)
|
72 |
+
return op_count
|
73 |
|
74 |
A = [inference_config['batchsize'], inference_config['input_seq_length'], model_config['vocab_size']]
|
75 |
B = [model_config['vocab_size'], model_config['hidden_size']]
|
|
|
80 |
def positional_embedding_operation(model_config, inference_config):
|
81 |
if model_config['module_classes']:
|
82 |
modules = get_module_tensors_matched(lambda x: 'embed' in x and 'pos' in x, model_config['module_classes'])
|
83 |
+
if len(modules) > 0:
|
84 |
+
return multiplication_in_int64([inference_config['batchsize'], inference_config['input_seq_length'], modules[0][-1]])
|
85 |
|
86 |
return multiplication_in_int64([inference_config['batchsize'], inference_config['input_seq_length'], model_config['hidden_size']])
|
87 |
|
|
|
89 |
def attention_K_operation(model_config, inference_config, seq_length):
|
90 |
if model_config['module_classes']:
|
91 |
modules = get_module_tensors_matched(lambda x: 'att' in x and 'k' in x , model_config['module_classes'])
|
92 |
+
if len(modules) > 0:
|
93 |
+
total = 0
|
94 |
+
for module in modules:
|
95 |
+
if len(module) > 1:
|
96 |
+
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
97 |
+
B = [model_config['hidden_size'], model_config['hidden_size_per_head']]
|
98 |
+
total += model_config['num_attention_heads']*matrix_operation(A, B)
|
99 |
+
else:
|
100 |
+
total += model_config['hidden_size']
|
101 |
+
return total
|
102 |
|
103 |
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
104 |
B = [model_config['hidden_size'], model_config['hidden_size_per_head']]
|
|
|
107 |
def attention_Q_operation(model_config, inference_config, seq_length):
|
108 |
if model_config['module_classes']:
|
109 |
modules = get_module_tensors_matched(lambda x: 'att' in x and 'q' in x , model_config['module_classes'])
|
110 |
+
if len(modules) > 0:
|
111 |
+
total = 0
|
112 |
+
for module in modules:
|
113 |
+
if len(module) > 1:
|
114 |
+
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
115 |
+
B = [model_config['hidden_size'], model_config['hidden_size_per_head']]
|
116 |
+
total += model_config['num_attention_heads']*matrix_operation(A, B)
|
117 |
+
else:
|
118 |
+
total += model_config['hidden_size']
|
119 |
+
return total
|
120 |
|
121 |
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
122 |
B = [model_config['hidden_size'], model_config['hidden_size_per_head']]
|
|
|
125 |
def attention_V_operation(model_config, inference_config, seq_length):
|
126 |
if model_config['module_classes']:
|
127 |
modules = get_module_tensors_matched(lambda x: 'att' in x and 'v' in x , model_config['module_classes'])
|
128 |
+
if len(modules) > 0:
|
129 |
+
total = 0
|
130 |
+
for module in modules:
|
131 |
+
if len(module) > 1:
|
132 |
+
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
133 |
+
B = [model_config['hidden_size'], model_config['hidden_size_per_head']]
|
134 |
+
total += model_config['num_attention_heads']*matrix_operation(A, B)
|
135 |
+
else:
|
136 |
+
total += model_config['hidden_size']
|
137 |
+
return total
|
138 |
|
139 |
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
140 |
B = [model_config['hidden_size'], model_config['hidden_size_per_head']]
|
|
|
160 |
def attention_out_operation(model_config, inference_config, seq_length):
|
161 |
if model_config['module_classes']:
|
162 |
modules = get_module_tensors_matched(lambda x: 'att' in x and 'k' in x , model_config['module_classes'])
|
163 |
+
if len(modules) > 0:
|
164 |
+
total = 0
|
165 |
+
for module in modules:
|
166 |
+
if len(module) > 1:
|
167 |
+
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
168 |
+
B = [model_config['hidden_size'], model_config['hidden_size']]
|
169 |
+
total += matrix_operation(A, B)
|
170 |
+
else:
|
171 |
+
total += model_config['hidden_size']
|
172 |
+
return total
|
173 |
|
174 |
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
175 |
B = [model_config['hidden_size'], model_config['hidden_size']]
|
|
|
180 |
# 5 is a modeled value
|
181 |
if model_config['module_classes']:
|
182 |
modules = get_module_tensors_matched(lambda x: 'norm' in x, model_config['module_classes'])
|
183 |
+
if len(modules) > 0:
|
184 |
+
total = 0
|
185 |
+
for module in modules:
|
186 |
+
total += model_config['hidden_size']
|
187 |
+
return 5*total
|
188 |
+
|
189 |
layernorm_operation = (5*inference_config['batchsize']*seq_length*model_config['hidden_size'])
|
190 |
return model_config['num_hidden_layers'] * model_config['layernorm_operation'] * layernorm_operation
|
191 |
|
|
|
193 |
def mlp_operation(model_config, inference_config, seq_length):
|
194 |
if model_config['module_classes']:
|
195 |
modules = get_module_tensors_matched(lambda x: 'fc' in x or 'mlp' in x, model_config['module_classes'])
|
196 |
+
if len(modules) > 0:
|
197 |
+
total = 0
|
198 |
+
for module in modules:
|
199 |
+
if len(module) > 1:
|
200 |
+
A = [inference_config['batchsize'], seq_length, module[1]]
|
201 |
+
B = [module[1], module[0]]
|
202 |
+
total += matrix_operation(A, B)
|
203 |
+
else:
|
204 |
+
total += modules[-1][0]
|
205 |
+
return total
|
206 |
|
207 |
A = [inference_config['batchsize'], seq_length, model_config['hidden_size']]
|
208 |
B = [model_config['hidden_size'], model_config['intermediate_size']]
|