czczup commited on
Commit
9983100
1 Parent(s): 8eaeb45

Delete flash_attention.py

Browse files
Files changed (1) hide show
  1. flash_attention.py +0 -76
flash_attention.py DELETED
@@ -1,76 +0,0 @@
1
- # https://github.com/Dao-AILab/flash-attention/blob/v0.2.8/flash_attn/flash_attention.py
2
- import torch
3
- import torch.nn as nn
4
- from einops import rearrange
5
-
6
- try: # v1
7
- from flash_attn.flash_attn_interface import \
8
- flash_attn_unpadded_qkvpacked_func
9
- except: # v2
10
- from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
11
-
12
- from flash_attn.bert_padding import pad_input, unpad_input
13
-
14
-
15
- class FlashAttention(nn.Module):
16
- """Implement the scaled dot product attention with softmax.
17
- Arguments
18
- ---------
19
- softmax_scale: The temperature to use for the softmax attention.
20
- (default: 1/sqrt(d_keys) where d_keys is computed at
21
- runtime)
22
- attention_dropout: The dropout rate to apply to the attention
23
- (default: 0.0)
24
- """
25
-
26
- def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
27
- super().__init__()
28
- self.softmax_scale = softmax_scale
29
- self.dropout_p = attention_dropout
30
-
31
- def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
32
- max_s=None, need_weights=False):
33
- """Implements the multihead softmax attention.
34
- Arguments
35
- ---------
36
- qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
37
- if unpadded: (nnz, 3, h, d)
38
- key_padding_mask: a bool tensor of shape (B, S)
39
- """
40
- assert not need_weights
41
- assert qkv.dtype in [torch.float16, torch.bfloat16]
42
- assert qkv.is_cuda
43
-
44
- if cu_seqlens is None:
45
- batch_size = qkv.shape[0]
46
- seqlen = qkv.shape[1]
47
- if key_padding_mask is None:
48
- qkv = rearrange(qkv, 'b s ... -> (b s) ...')
49
- max_s = seqlen
50
- cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
51
- device=qkv.device)
52
- output = flash_attn_unpadded_qkvpacked_func(
53
- qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
54
- softmax_scale=self.softmax_scale, causal=causal
55
- )
56
- output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
57
- else:
58
- nheads = qkv.shape[-2]
59
- x = rearrange(qkv, 'b s three h d -> b s (three h d)')
60
- x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
61
- x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
62
- output_unpad = flash_attn_unpadded_qkvpacked_func(
63
- x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
64
- softmax_scale=self.softmax_scale, causal=causal
65
- )
66
- output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
67
- indices, batch_size, seqlen),
68
- 'b s (h d) -> b s h d', h=nheads)
69
- else:
70
- assert max_s is not None
71
- output = flash_attn_unpadded_qkvpacked_func(
72
- qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
73
- softmax_scale=self.softmax_scale, causal=causal
74
- )
75
-
76
- return output, None