fffiloni commited on
Commit
e88dc2e
·
verified ·
1 Parent(s): 341f84d

Update text2vid_torch2.py

Browse files
Files changed (1) hide show
  1. text2vid_torch2.py +48 -1
text2vid_torch2.py CHANGED
@@ -167,7 +167,8 @@ class AttnProcessor2_0:
167
  hidden_states = hidden_states / attn.rescale_output_factor
168
 
169
  return hidden_states
170
-
 
171
  def get_qk(
172
  self, query, key):
173
  r"""
@@ -221,7 +222,53 @@ class AttnProcessor2_0:
221
 
222
 
223
  return query, key, dynamic_lambda, key1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
 
226
  def init_attention_func(unet):
227
 
 
167
  hidden_states = hidden_states / attn.rescale_output_factor
168
 
169
  return hidden_states
170
+
171
+ """
172
  def get_qk(
173
  self, query, key):
174
  r"""
 
222
 
223
 
224
  return query, key, dynamic_lambda, key1
225
+ """
226
+
227
+ def get_qk(
228
+ self, query, key):
229
+ r"""
230
+ Compute the attention scores.
231
+ Args:
232
+ query (`torch.Tensor`): The query tensor.
233
+ key (`torch.Tensor`): The key tensor.
234
+ attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied.
235
+ Returns:
236
+ `torch.Tensor`: The attention probabilities/scores.
237
+ """
238
+ q_old = query.clone()
239
+ k_old = key.clone()
240
+ dynamic_lambda = None
241
+ key1 = None
242
+
243
+ if self.use_last_attn_slice:
244
+ if self.last_attn_slice is not None:
245
+
246
+ query_list = self.last_attn_slice[0]
247
+ key_list = self.last_attn_slice[1]
248
+
249
+ if query.shape[1] == self.num_frames and query.shape == key.shape:
250
+ key1 = key.clone()
251
+ key1[:,:1,:key_list.shape[2]] = key_list[:,:1]
252
+ dynamic_lambda = torch.tensor([1 + self.LAMBDA * (i/50) for i in range(self.num_frames)]).to(key.dtype).cuda()
253
 
254
+ if q_old.shape == k_old.shape and q_old.shape[1] != self.num_frames:
255
+ batch_dim = query_list.shape[0] // self.bs
256
+ all_dim = query.shape[0] // self.bs
257
+ for i in range(self.bs):
258
+ # Ensure slice dimensions match
259
+ target_size = min(query[i*all_dim:(i*all_dim) + batch_dim, :query_list.shape[1], :query_list.shape[2]].size(0),
260
+ query_list[i*batch_dim:(i+1)*batch_dim].size(0))
261
+
262
+ # Assign values from query_list to query
263
+ query[i*all_dim:(i*all_dim) + target_size, :query_list.shape[1], :query_list.shape[2]] = \
264
+ query_list[i*batch_dim:i*batch_dim + target_size]
265
+
266
+ if self.save_last_attn_slice:
267
+ self.last_attn_slice = [query, key]
268
+ self.save_last_attn_slice = False
269
+
270
+ return query, key, dynamic_lambda, key1
271
+
272
 
273
  def init_attention_func(unet):
274