Commit
•
d44a6a5
1
Parent(s):
e54ca32
Update modeling_florence2.py
Browse files- modeling_florence2.py +0 -4
modeling_florence2.py
CHANGED
@@ -721,10 +721,6 @@ class DaViT(nn.Module):
|
|
721 |
|
722 |
|
723 |
|
724 |
-
if is_flash_attn_2_available():
|
725 |
-
from flash_attn import flash_attn_func, flash_attn_varlen_func
|
726 |
-
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
|
727 |
-
|
728 |
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
729 |
def _get_unpad_data(attention_mask):
|
730 |
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
|
|
721 |
|
722 |
|
723 |
|
|
|
|
|
|
|
|
|
724 |
# Copied from transformers.models.llama.modeling_llama._get_unpad_data
|
725 |
def _get_unpad_data(attention_mask):
|
726 |
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|