"mmdet/models/layers/se_layer.py" did not exist on "2f6baaee5db2641711a85f745e9e0a57a4049a1f"
test_flash_attn.py 84.4 KB
Newer Older
Tri Dao's avatar
Tri Dao committed
1
2
import math

Tri Dao's avatar
Tri Dao committed
3
import pytest
Tri Dao's avatar
Tri Dao committed
4
5
6
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
Tri Dao's avatar
Tri Dao committed
7
8
9
10
11
12
13
from flash_attn import (
    flash_attn_func,
    flash_attn_kvpacked_func,
    flash_attn_qkvpacked_func,
    flash_attn_varlen_func,
    flash_attn_varlen_kvpacked_func,
    flash_attn_varlen_qkvpacked_func,
Tri Dao's avatar
Tri Dao committed
14
    flash_attn_with_kvcache,
Tri Dao's avatar
Tri Dao committed
15
)
16
from flash_attn.bert_padding import pad_input, unpad_input
Tri Dao's avatar
Tri Dao committed
17
from flash_attn.flash_attn_interface import _get_block_size
18
from flash_attn.layers.rotary import apply_rotary_emb
Tri Dao's avatar
Tri Dao committed
19
20

MAX_HEADDIM_SM8x = 192
Tri Dao's avatar
Tri Dao committed
21

Tri Dao's avatar
Tri Dao committed
22

Tri Dao's avatar
Tri Dao committed
23
24
25
26
is_sm75 = torch.cuda.get_device_capability("cuda") == (7, 5)
is_sm8x = torch.cuda.get_device_capability("cuda")[0] == 8
is_sm80 = torch.cuda.get_device_capability("cuda") == (8, 0)
is_sm90 = torch.cuda.get_device_capability("cuda") == (9, 0)
Tri Dao's avatar
Tri Dao committed
27
28


29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def attn_bias_from_alibi_slopes(
    slopes, seqlen_q, seqlen_k, query_padding_mask=None, key_padding_mask=None, causal=False
):
    batch, nheads = slopes.shape
    device = slopes.device
    slopes = rearrange(slopes, "b h -> b h 1 1")
    if causal:
        return torch.arange(-seqlen_k + 1, 1, device=device, dtype=torch.float32) * slopes
    else:
        row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
        col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
        sk = (
            seqlen_k
            if key_padding_mask is None
            else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
        )
        sq = (
            seqlen_q
            if query_padding_mask is None
            else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
        )
        relative_pos = torch.abs(row_idx + sk - sq - col_idx)
        return -slopes * relative_pos.to(dtype=slopes.dtype)


Tri Dao's avatar
Tri Dao committed
54
55
56
def generate_random_padding_mask(max_seqlen, batch_size, device, mode="random"):
    assert mode in ["full", "random", "third"]
    if mode == "full":
Tri Dao's avatar
Tri Dao committed
57
        lengths = torch.full((batch_size, 1), max_seqlen, device=device, dtype=torch.int32)
Tri Dao's avatar
Tri Dao committed
58
    elif mode == "random":
59
60
61
        lengths = torch.randint(
            max(1, max_seqlen - 20), max_seqlen + 1, (batch_size, 1), device=device
        )
Tri Dao's avatar
Tri Dao committed
62
    elif mode == "third":
63
        lengths = torch.randint(max_seqlen // 3, max_seqlen + 1, (batch_size, 1), device=device)
Tri Dao's avatar
Tri Dao committed
64
65
66
    padding_mask = (
        repeat(torch.arange(max_seqlen, device=device), "s -> b s", b=batch_size) < lengths
    )
Tri Dao's avatar
Tri Dao committed
67
68
69
    return padding_mask


Tri Dao's avatar
Tri Dao committed
70
71
72
def generate_qkv(
    q, k, v, query_padding_mask=None, key_padding_mask=None, kvpacked=False, qkvpacked=False
):
Tri Dao's avatar
Tri Dao committed
73
74
    """
    Arguments:
Tri Dao's avatar
Tri Dao committed
75
76
77
        q: (batch_size, seqlen_q, nheads, d)
        k: (batch_size, seqlen_k, nheads_k, d)
        v: (batch_size, seqlen_k, nheads_k, d)
Tri Dao's avatar
Tri Dao committed
78
79
80
81
        query_padding_mask: (batch_size, seqlen), bool
        key_padding_mask: (batch_size, seqlen), bool
    """
    assert not (kvpacked and qkvpacked)
Tri Dao's avatar
Tri Dao committed
82
83
84
85
    batch_size, seqlen_q, nheads, d = q.shape
    _, seqlen_k, nheads_k, _ = k.shape
    assert k.shape == (batch_size, seqlen_k, nheads_k, d)
    assert v.shape == (batch_size, seqlen_k, nheads_k, d)
Tri Dao's avatar
Tri Dao committed
86
87
88

    if query_padding_mask is not None:
        q_unpad, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(q, query_padding_mask)
Tri Dao's avatar
Tri Dao committed
89
90
91
        output_pad_fn = lambda output_unpad: pad_input(
            output_unpad, indices_q, batch_size, seqlen_q
        )
Tri Dao's avatar
Tri Dao committed
92
    else:
Tri Dao's avatar
Tri Dao committed
93
94
95
96
        q_unpad = rearrange(q, "b s h d -> (b s) h d")
        cu_seqlens_q = torch.arange(
            0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, device=q_unpad.device
        )
Tri Dao's avatar
Tri Dao committed
97
        max_seqlen_q = seqlen_q
Tri Dao's avatar
Tri Dao committed
98
99
100
        output_pad_fn = lambda output_unpad: rearrange(
            output_unpad, "(b s) h d -> b s h d", b=batch_size
        )
Tri Dao's avatar
Tri Dao committed
101
102
103
104
105

    if key_padding_mask is not None:
        k_unpad, indices_k, cu_seqlens_k, max_seqlen_k = unpad_input(k, key_padding_mask)
        v_unpad, _, _, _ = unpad_input(v, key_padding_mask)
    else:
Tri Dao's avatar
Tri Dao committed
106
107
108
109
110
        k_unpad = rearrange(k, "b s h d -> (b s) h d")
        v_unpad = rearrange(v, "b s h d -> (b s) h d")
        cu_seqlens_k = torch.arange(
            0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, device=k_unpad.device
        )
Tri Dao's avatar
Tri Dao committed
111
        max_seqlen_k = seqlen_k
Tri Dao's avatar
Tri Dao committed
112
113
114

    if qkvpacked:
        assert (query_padding_mask == key_padding_mask).all()
Tri Dao's avatar
Tri Dao committed
115
        assert nheads == nheads_k
Tri Dao's avatar
Tri Dao committed
116
        qkv_unpad = torch.stack([q_unpad, k_unpad, v_unpad], dim=1)
Tri Dao's avatar
Tri Dao committed
117
        qkv = torch.stack([q, k, v], dim=2)
Tri Dao's avatar
Tri Dao committed
118
        if query_padding_mask is not None:
Tri Dao's avatar
Tri Dao committed
119
            dqkv_pad_fn = lambda dqkv_unpad: pad_input(dqkv_unpad, indices_q, batch_size, seqlen_q)
Tri Dao's avatar
Tri Dao committed
120
        else:
Tri Dao's avatar
Tri Dao committed
121
122
123
124
125
126
127
128
129
130
131
            dqkv_pad_fn = lambda dqkv_unpad: rearrange(
                dqkv_unpad, "(b s) t h d -> b s t h d", b=batch_size
            )
        return (
            qkv_unpad.detach().requires_grad_(),
            cu_seqlens_q,
            max_seqlen_q,
            qkv.detach().requires_grad_(),
            output_pad_fn,
            dqkv_pad_fn,
        )
Tri Dao's avatar
Tri Dao committed
132
133
    elif kvpacked:
        kv_unpad = torch.stack([k_unpad, v_unpad], dim=1)
Tri Dao's avatar
Tri Dao committed
134
        kv = torch.stack([k, v], dim=2)
Tri Dao's avatar
Tri Dao committed
135
136
        dq_pad_fn = output_pad_fn
        if key_padding_mask is not None:
Tri Dao's avatar
Tri Dao committed
137
            dkv_pad_fn = lambda dkv_unpad: pad_input(dkv_unpad, indices_k, batch_size, seqlen_k)
Tri Dao's avatar
Tri Dao committed
138
        else:
Tri Dao's avatar
Tri Dao committed
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
            dkv_pad_fn = lambda dkv_unpad: rearrange(
                dkv_unpad, "(b s) t h d -> b s t h d", b=batch_size
            )
        return (
            q_unpad.detach().requires_grad_(),
            kv_unpad.detach().requires_grad_(),
            cu_seqlens_q,
            cu_seqlens_k,
            max_seqlen_q,
            max_seqlen_k,
            q.detach().requires_grad_(),
            kv.detach().requires_grad_(),
            output_pad_fn,
            dq_pad_fn,
            dkv_pad_fn,
        )
Tri Dao's avatar
Tri Dao committed
155
156
157
    else:
        dq_pad_fn = output_pad_fn
        if key_padding_mask is not None:
Tri Dao's avatar
Tri Dao committed
158
            dk_pad_fn = lambda dk_unpad: pad_input(dk_unpad, indices_k, batch_size, seqlen_k)
Tri Dao's avatar
Tri Dao committed
159
        else:
Tri Dao's avatar
Tri Dao committed
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
            dk_pad_fn = lambda dk_unpad: rearrange(dk_unpad, "(b s) h d -> b s h d", b=batch_size)
        return (
            q_unpad.detach().requires_grad_(),
            k_unpad.detach().requires_grad_(),
            v_unpad.detach().requires_grad_(),
            cu_seqlens_q,
            cu_seqlens_k,
            max_seqlen_q,
            max_seqlen_k,
            q.detach().requires_grad_(),
            k.detach().requires_grad_(),
            v.detach().requires_grad_(),
            output_pad_fn,
            dq_pad_fn,
            dk_pad_fn,
        )
Tri Dao's avatar
Tri Dao committed
176
177


Tri Dao's avatar
Tri Dao committed
178
179
180
181
182
183
184
def construct_local_mask(
    seqlen_q,
    seqlen_k,
    window_size=(-1, -1),  # -1 means infinite window size
    query_padding_mask=None,
    key_padding_mask=None,
    device=None,
185
):
186
187
188
189
190
191
192
193
194
195
196
197
    row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
    col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
    sk = (
        seqlen_k
        if key_padding_mask is None
        else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
    )
    sq = (
        seqlen_q
        if query_padding_mask is None
        else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
    )
Tri Dao's avatar
Tri Dao committed
198
199
200
201
202
203
204
205
    if window_size[0] < 0:
        return col_idx > row_idx + sk - sq + window_size[1]
    else:
        sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk
        return torch.logical_or(
            col_idx > torch.minimum(row_idx + sk - sq + window_size[1], sk),
            col_idx < row_idx + sk - sq - window_size[0],
        )
206
207


Tri Dao's avatar
Tri Dao committed
208
209
210
211
212
213
def attention_ref(
    q,
    k,
    v,
    query_padding_mask=None,
    key_padding_mask=None,
214
    attn_bias=None,
Tri Dao's avatar
Tri Dao committed
215
216
217
    dropout_p=0.0,
    dropout_mask=None,
    causal=False,
Tri Dao's avatar
Tri Dao committed
218
    window_size=(-1, -1),  # -1 means infinite window size
Tri Dao's avatar
Tri Dao committed
219
220
221
    upcast=True,
    reorder_ops=False,
):
Tri Dao's avatar
Tri Dao committed
222
223
224
    """
    Arguments:
        q: (batch_size, seqlen_q, nheads, head_dim)
Tri Dao's avatar
Tri Dao committed
225
226
        k: (batch_size, seqlen_k, nheads_k, head_dim)
        v: (batch_size, seqlen_k, nheads_k, head_dim)
Tri Dao's avatar
Tri Dao committed
227
228
        query_padding_mask: (batch_size, seqlen_q)
        key_padding_mask: (batch_size, seqlen_k)
229
        attn_bias: broadcastable to (batch_size, nheads, seqlen_q, seqlen_k)
Tri Dao's avatar
Tri Dao committed
230
231
        dropout_p: float
        dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k)
Tri Dao's avatar
Tri Dao committed
232
233
        causal: whether to apply causal masking
        window_size: (int, int), left and right window size
Tri Dao's avatar
Tri Dao committed
234
235
236
237
238
239
240
241
242
        upcast: whether to cast all inputs to fp32, do all computation in fp32, then cast
            output back to fp16/bf16.
        reorder_ops: whether to change the order of operations (scaling k instead of scaling k, etc.)
            without changing the math. This is to estimate the numerical error from operation
            reordering.
    Output:
        output: (batch_size, seqlen_q, nheads, head_dim)
        attention: (batch_size, nheads, seqlen_q, seqlen_k), softmax after dropout
    """
Tri Dao's avatar
Tri Dao committed
243
244
    if causal:
        window_size = (window_size[0], 0)
Tri Dao's avatar
Tri Dao committed
245
246
247
248
    dtype_og = q.dtype
    if upcast:
        q, k, v = q.float(), k.float(), v.float()
    seqlen_q, seqlen_k = q.shape[1], k.shape[1]
Tri Dao's avatar
Tri Dao committed
249
250
    k = repeat(k, "b s h d -> b s (h g) d", g=q.shape[2] // k.shape[2])
    v = repeat(v, "b s h d -> b s (h g) d", g=q.shape[2] // v.shape[2])
Tri Dao's avatar
Tri Dao committed
251
252
    d = q.shape[-1]
    if not reorder_ops:
Tri Dao's avatar
Tri Dao committed
253
        scores = torch.einsum("bthd,bshd->bhts", q / math.sqrt(d), k)
Tri Dao's avatar
Tri Dao committed
254
    else:
Tri Dao's avatar
Tri Dao committed
255
        scores = torch.einsum("bthd,bshd->bhts", q, k / math.sqrt(d))
Tri Dao's avatar
Tri Dao committed
256
    if key_padding_mask is not None:
Tri Dao's avatar
Tri Dao committed
257
        scores.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
Tri Dao's avatar
Tri Dao committed
258
259
260
261
262
263
264
265
    if window_size[0] >= 0 or window_size[1] >= 0:
        local_mask = construct_local_mask(
            seqlen_q,
            seqlen_k,
            window_size,
            query_padding_mask,
            key_padding_mask,
            q.device,
Tri Dao's avatar
Tri Dao committed
266
        )
Tri Dao's avatar
Tri Dao committed
267
        scores.masked_fill_(local_mask, float("-inf"))
268
269
270
    if attn_bias is not None:
        scores = scores + attn_bias
    attention = torch.softmax(scores, dim=-1).to(v.dtype)
Tri Dao's avatar
Tri Dao committed
271
272
273
274
275
276
277
    # Some rows might be completely masked out so we fill them with zero instead of NaN
    if window_size[0] >= 0 or window_size[1] >= 0:
        attention = attention.masked_fill(torch.all(local_mask, dim=-1, keepdim=True), 0.0)
    # We want to mask here so that the attention matrix doesn't have any NaNs
    # Otherwise we'll get NaN in dV
    if query_padding_mask is not None:
        attention = attention.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
Tri Dao's avatar
Tri Dao committed
278
279
280
281
282
    dropout_scaling = 1.0 / (1 - dropout_p)
    # attention_drop = attention.masked_fill(~dropout_mask, 0.0) * dropout_scaling
    # output = torch.einsum('bhts,bshd->bthd', attention_drop , v)
    if dropout_mask is not None:
        attention_drop = attention.masked_fill(~dropout_mask, 0.0)
Tri Dao's avatar
Tri Dao committed
283
284
    else:
        attention_drop = attention
Tri Dao's avatar
Tri Dao committed
285
    output = torch.einsum("bhts,bshd->bthd", attention_drop, v * dropout_scaling)
Tri Dao's avatar
Tri Dao committed
286
    if query_padding_mask is not None:
Tri Dao's avatar
Tri Dao committed
287
        output.masked_fill_(rearrange(~query_padding_mask, "b s -> b s 1 1"), 0.0)
Tri Dao's avatar
Tri Dao committed
288
289
290
    return output.to(dtype=dtype_og), attention.to(dtype=dtype_og)


Tri Dao's avatar
Tri Dao committed
291
292
293
294
295
def attention_kvpacked_ref(
    q,
    kv,
    query_padding_mask=None,
    key_padding_mask=None,
296
    attn_bias=None,
Tri Dao's avatar
Tri Dao committed
297
298
299
    dropout_p=0.0,
    dropout_mask=None,
    causal=False,
Tri Dao's avatar
Tri Dao committed
300
    window_size=(-1, -1),  # -1 means infinite window size
Tri Dao's avatar
Tri Dao committed
301
302
303
304
305
306
307
308
309
    upcast=True,
    reorder_ops=False,
):
    return attention_ref(
        q,
        kv[:, :, 0],
        kv[:, :, 1],
        query_padding_mask,
        key_padding_mask,
310
        attn_bias,
Tri Dao's avatar
Tri Dao committed
311
312
313
314
        dropout_p,
        dropout_mask,
        upcast=upcast,
        causal=causal,
Tri Dao's avatar
Tri Dao committed
315
        window_size=window_size,
Tri Dao's avatar
Tri Dao committed
316
317
        reorder_ops=reorder_ops,
    )
Tri Dao's avatar
Tri Dao committed
318
319


Tri Dao's avatar
Tri Dao committed
320
321
322
def attention_qkvpacked_ref(
    qkv,
    key_padding_mask=None,
323
    attn_bias=None,
Tri Dao's avatar
Tri Dao committed
324
325
326
    dropout_p=0.0,
    dropout_mask=None,
    causal=False,
Tri Dao's avatar
Tri Dao committed
327
    window_size=(-1, -1),  # -1 means infinite window size
Tri Dao's avatar
Tri Dao committed
328
329
330
331
332
333
334
335
336
    upcast=True,
    reorder_ops=False,
):
    return attention_ref(
        qkv[:, :, 0],
        qkv[:, :, 1],
        qkv[:, :, 2],
        key_padding_mask,
        key_padding_mask,
337
        attn_bias,
Tri Dao's avatar
Tri Dao committed
338
339
340
341
        dropout_p,
        dropout_mask,
        upcast=upcast,
        causal=causal,
Tri Dao's avatar
Tri Dao committed
342
        window_size=window_size,
Tri Dao's avatar
Tri Dao committed
343
344
        reorder_ops=reorder_ops,
    )
Tri Dao's avatar
Tri Dao committed
345
346
347
348
349
350
351
352
353
354
355


def generate_sparsity_mask(seqlen, sparsity=0.3):
    repeats = seqlen // 16 // 2
    # mask = torch.stack([torch.tensor([1, 0] * repeats, dtype=torch.bool, device='cuda'),
    #                     torch.tensor([0, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
    # mask = torch.stack([torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda'),
    #                     torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
    # mask = torch.stack([torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
    # mask = torch.stack([torch.tensor([1, 0] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
    nrow, ncol = seqlen // 16, seqlen // 256
Tri Dao's avatar
Tri Dao committed
356
    mask = torch.rand(nrow, ncol, device="cuda") < sparsity
Tri Dao's avatar
Tri Dao committed
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
    return mask


def attention_blocksparse_ref(qkv, blockmask, attn_mask, dropout_p, dropout_mask):
    """
    Arguments:
        qkv: (batch_size, seqlen, 3, nheads, head_dim)
        blockmask: (seqlen / 16, seqlen / 256)
        attn_mask: (batch_size, seqlen)
        dropout_p: float
        dropout_mask: (batch_size, nheads, seqlen, seqlen)
    Output:
        output: (batch_size, seqlen, nheads, head_dim)
        attention: softmax after dropout
    """
    q, k, v = qkv.float().unbind(dim=2)
    d = qkv.shape[-1]
    seqlen = qkv.shape[1]
Tri Dao's avatar
Tri Dao committed
375
376
377
    scores = torch.einsum("bthd,bshd->bhts", q / math.sqrt(d), k)
    scores.masked_fill_(rearrange(~attn_mask, "b s -> b 1 1 s"), float("-inf"))
    blockmask = repeat(blockmask, "s_16 s_256 -> (s_16 16) (s_256 256)")
Tri Dao's avatar
Tri Dao committed
378
    blockmask = blockmask[:seqlen, :seqlen]
Tri Dao's avatar
Tri Dao committed
379
    scores.masked_fill_(rearrange(~blockmask, "t s -> 1 1 t s"), float("-inf"))
Tri Dao's avatar
Tri Dao committed
380
    attention = torch.softmax(scores, dim=-1)
Tri Dao's avatar
Tri Dao committed
381
382
    attention = attention.masked_fill(rearrange(~attn_mask, "b s -> b 1 s 1"), 0.0)
    attention = attention.masked_fill_(rearrange(~blockmask, "t s -> 1 1 t s"), 0.0)
Tri Dao's avatar
Tri Dao committed
383
    attention_drop = attention.masked_fill(~dropout_mask, 0.0) / (1 - dropout_p)
Tri Dao's avatar
Tri Dao committed
384
385
    output = torch.einsum("bhts,bshd->bthd", attention_drop, v)
    output.masked_fill_(rearrange(~attn_mask, "b s -> b s 1 1"), 0)
Tri Dao's avatar
Tri Dao committed
386
387
388
    return output.to(dtype=qkv.dtype), attention.to(dtype=qkv.dtype)


Tri Dao's avatar
Tri Dao committed
389
def convert_flash_attn_S_to_softmax(
Tri Dao's avatar
Tri Dao committed
390
391
392
393
394
395
396
397
398
    S,
    seqlen_q,
    seqlen_k,
    query_padding_mask,
    key_padding_mask,
    head_dim,
    is_dropout,
    causal=False,
    window_size=(-1, -1),  # -1 means infinite window size
Tri Dao's avatar
Tri Dao committed
399
):
Tri Dao's avatar
Tri Dao committed
400
401
    """FlashAttention stores the S matrix in a different way.
    Arguments:
Tri Dao's avatar
Tri Dao committed
402
        S: (batch_size, nheads, seqlen_q_rounded, seqlen_k_rounded)
403
404
        query_padding_mask: (batch_size, seqlen_q_rounded)
        key_padding_mask: (batch_size, seqlen_k_rounded)
Tri Dao's avatar
Tri Dao committed
405
    """
Tri Dao's avatar
Tri Dao committed
406
407
    if causal:
        window_size = (window_size[0], 0)
408
    seqlen_q_rounded, seqlen_k_rounded = S.shape[-2:]
Tri Dao's avatar
Tri Dao committed
409
    warps_n = 4
Tri Dao's avatar
Tri Dao committed
410
    blocksize_m, blocksize_n = _get_block_size(S.device, head_dim, is_dropout, causal)
411
412
    nblocks_n = (seqlen_k_rounded + blocksize_n - 1) // blocksize_n
    nblocks_m = (seqlen_q_rounded + blocksize_m - 1) // blocksize_m
Tri Dao's avatar
Tri Dao committed
413
    mmas_n = (blocksize_n + 16 - 1) // 16
Tri Dao's avatar
Tri Dao committed
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
    S_flat = rearrange(
        S,
        "b h (nblocks_m blocksize_m) (nblocks_n blocksize_n) -> b h nblocks_m nblocks_n (blocksize_m blocksize_n)",
        blocksize_m=blocksize_m,
        blocksize_n=blocksize_n,
    )
    S_converted = rearrange(
        S_flat,
        "b h nblocks_m nblocks_n (mmas_n mmas_m warps_n eight four c2 c1 c0) -> b h (nblocks_m mmas_m warps_n c1 eight) (nblocks_n mmas_n c2 four c0)",
        mmas_n=mmas_n,
        warps_n=warps_n,
        eight=8,
        c0=2,
        c1=2,
        c2=2,
        four=4,
    )
431

Tri Dao's avatar
Tri Dao committed
432
433
434
435
436
437
438
439
    if window_size[0] >= 0 or window_size[1] >= 0:
        local_mask = construct_local_mask(
            seqlen_q,
            seqlen_k,
            window_size,
            query_padding_mask,
            key_padding_mask,
            S.device,
Tri Dao's avatar
Tri Dao committed
440
        )
Tri Dao's avatar
Tri Dao committed
441
442
        local_mask = F.pad(
            local_mask,
443
444
445
            (0, seqlen_k_rounded - seqlen_k, 0, seqlen_q_rounded - seqlen_q),
            value=True,
        )
Tri Dao's avatar
Tri Dao committed
446
        S_converted.masked_fill_(local_mask, 0.0)
Tri Dao's avatar
Tri Dao committed
447
448
449

    # Need to zero out things not in attention_mask in case S was initialized with random values
    # and some of those values aren't overwritten.
450
451
452
    seqlen_q_og = (
        query_padding_mask.shape[-1] if query_padding_mask is not None else seqlen_q_rounded
    )
Tri Dao's avatar
Tri Dao committed
453
    if query_padding_mask is not None:
454
        query_padding_mask = F.pad(query_padding_mask, (0, seqlen_q_rounded - seqlen_q_og))
Tri Dao's avatar
Tri Dao committed
455
        S_converted = S_converted.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
Tri Dao's avatar
Tri Dao committed
456
457
    seqlen_k_og = key_padding_mask.shape[-1] if key_padding_mask is not None else seqlen_k
    if key_padding_mask is not None:
458
        key_padding_mask = F.pad(key_padding_mask, (0, seqlen_k_rounded - seqlen_k_og))
Tri Dao's avatar
Tri Dao committed
459
        S_converted = S_converted.masked_fill(rearrange(~key_padding_mask, "b s -> b 1 1 s"), 0.0)
460
461
462
    S_converted = F.pad(S_converted, (0, 0, 0, seqlen_q_og - seqlen_q_rounded))
    S_converted = F.pad(S_converted, (0, seqlen_k_og - seqlen_k_rounded))
    return S_converted[:, :, :seqlen_q, :seqlen_k]
Tri Dao's avatar
Tri Dao committed
463
464


Tri Dao's avatar
Tri Dao committed
465
466
467
468
469
470
471
def normalize_flash_attn_S(
    attn_unnorm,
    q,
    k,
    v,
    query_padding_mask=None,
    key_padding_mask=None,
472
    attn_bias=None,
Tri Dao's avatar
Tri Dao committed
473
474
    is_dropout=False,
    causal=False,
Tri Dao's avatar
Tri Dao committed
475
    window_size=(-1, -1),  # -1 means infinite window size
Tri Dao's avatar
Tri Dao committed
476
):
Tri Dao's avatar
Tri Dao committed
477
478
479
480
481
    """
    Arguments:
        q: (batch_size, seqlen_q, nheads, head_dim)
        k, v: (batch_size, seqlen_k, nheads, head_dim)
        key_padding_mask: (batch_size, seqlen_q)
482
        attn_bias: broadcastable to (batch_size, nheads, seqlen_q, seqlen_k)
Tri Dao's avatar
Tri Dao committed
483
484
485
486
    Output:
        softmax_lse: (batch_size, nheads, seqlen_q)
        softmax_max: (batch_size, nheads, seqlen_q)
    """
Tri Dao's avatar
Tri Dao committed
487
488
    if causal:
        window_size = (window_size[0], 0)
Tri Dao's avatar
Tri Dao committed
489
490
491
    q, k, v = q.float(), k.float(), v.float()
    _, seqlen_q, _, head_dim = q.shape
    seqlen_k = k.shape[1]
Tri Dao's avatar
Tri Dao committed
492
    scores = torch.einsum("bthd,bshd->bhts", q / math.sqrt(head_dim), k)
Tri Dao's avatar
Tri Dao committed
493
    if key_padding_mask is not None:
Tri Dao's avatar
Tri Dao committed
494
        scores.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
Tri Dao's avatar
Tri Dao committed
495
496
497
498
499
500
501
502
    if window_size[0] >= 0 or window_size[1] >= 0:
        local_mask = construct_local_mask(
            seqlen_q,
            seqlen_k,
            window_size,
            query_padding_mask,
            key_padding_mask,
            q.device,
Tri Dao's avatar
Tri Dao committed
503
        )
Tri Dao's avatar
Tri Dao committed
504
        scores.masked_fill_(local_mask, float("-inf"))
505
506
    if attn_bias is not None:
        scores = scores + attn_bias.to(dtype=scores.dtype)
Tri Dao's avatar
Tri Dao committed
507
508
    _, block_size_n = _get_block_size(scores.device, head_dim, is_dropout, causal)
    scores_block = scores.split(block_size_n, dim=-1)
Tri Dao's avatar
Tri Dao committed
509
    lse_block = torch.stack([torch.logsumexp(s, dim=-1) for s in scores_block], dim=-1)
Tri Dao's avatar
Tri Dao committed
510
    lse = torch.logsumexp(lse_block, dim=-1)
511
512
513
    # lse could be -inf (i.e. all values in scores are -inf), and we want to set those to inf
    # so that when we do torch.exp(m - lse), we get 0.0 instead of NaN.
    lse[lse == float("-inf")] = float("inf")
Tri Dao's avatar
Tri Dao committed
514
515
516
    scores_max_block = torch.stack([torch.amax(s, dim=-1) for s in scores_block], dim=-1)
    cummax_block = torch.cummax(scores_max_block.flip(-1), dim=-1).values.flip(-1).unbind(dim=-1)
    attn_unnorm_block = attn_unnorm.split(block_size_n, dim=-1)
Tri Dao's avatar
Tri Dao committed
517
518
    attn_norm = torch.cat(
        [
519
            a * rearrange(torch.exp(m - lse), "b h s -> b h s 1")
Tri Dao's avatar
Tri Dao committed
520
521
522
523
            for a, m in zip(attn_unnorm_block, cummax_block)
        ],
        dim=-1,
    )
Tri Dao's avatar
Tri Dao committed
524
    if query_padding_mask is not None:
Tri Dao's avatar
Tri Dao committed
525
        attn_norm.masked_fill_(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
Tri Dao's avatar
Tri Dao committed
526
527
528
    return attn_norm.to(dtype=attn_unnorm.dtype)


Tri Dao's avatar
Tri Dao committed
529
def get_dropout_fraction(
Tri Dao's avatar
Tri Dao committed
530
531
532
533
534
    dropout_mask,
    query_padding_mask=None,
    key_padding_mask=None,
    causal=False,
    window_size=(-1, -1),  # -1 means infinite window size
Tri Dao's avatar
Tri Dao committed
535
):
Tri Dao's avatar
Tri Dao committed
536
537
538
539
540
    """
    dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k), bool. True means keep, False means drop.
    query_padding_mask: (batch_size, seqlen_q)
    key_padding_mask: (batch_size, seqlen_k)
    """
Tri Dao's avatar
Tri Dao committed
541
542
    if causal:
        window_size = (window_size[0], 0)
Tri Dao's avatar
Tri Dao committed
543
544
    batch_size, nheads, seqlen_q, seqlen_k = dropout_mask.shape
    dropped = ~dropout_mask
Tri Dao's avatar
Tri Dao committed
545
    valid = torch.ones_like(dropout_mask)
Tri Dao's avatar
Tri Dao committed
546
    if query_padding_mask is not None:
Tri Dao's avatar
Tri Dao committed
547
        dropped.masked_fill_(rearrange(~query_padding_mask, "b s -> b 1 s 1"), False)
Tri Dao's avatar
Tri Dao committed
548
        valid.masked_fill_(rearrange(~query_padding_mask, "b s -> b 1 s 1"), False)
Tri Dao's avatar
Tri Dao committed
549
    if key_padding_mask is not None:
Tri Dao's avatar
Tri Dao committed
550
        dropped.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), False)
Tri Dao's avatar
Tri Dao committed
551
552
553
554
555
556
557
558
559
        valid.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), False)
    if window_size[0] >= 0 or window_size[1] >= 0:
        local_mask = construct_local_mask(
            seqlen_q,
            seqlen_k,
            window_size,
            query_padding_mask,
            key_padding_mask,
            dropout_mask.device,
Tri Dao's avatar
Tri Dao committed
560
        )
Tri Dao's avatar
Tri Dao committed
561
562
        dropped.masked_fill_(local_mask, False)
        valid.masked_fill_(local_mask, False)
Tri Dao's avatar
Tri Dao committed
563
    dropped_total = dropped.sum()
Tri Dao's avatar
Tri Dao committed
564
    return dropped.sum() / valid.sum()
Tri Dao's avatar
Tri Dao committed
565
566


Tri Dao's avatar
Tri Dao committed
567
@pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
Tri Dao's avatar
Tri Dao committed
568
# @pytest.mark.parametrize("dtype", [torch.float16])
569
570
@pytest.mark.parametrize("alibi", [False, True])
# @pytest.mark.parametrize("alibi", [True])
Tri Dao's avatar
Tri Dao committed
571
572
@pytest.mark.parametrize("local", [False, True])
# @pytest.mark.parametrize("local", [True])
Tri Dao's avatar
Tri Dao committed
573
@pytest.mark.parametrize("causal", [False, True])
Tri Dao's avatar
Tri Dao committed
574
# @pytest.mark.parametrize("causal", [False])
Tri Dao's avatar
Tri Dao committed
575
@pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
576
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
577
# @pytest.mark.parametrize('d', [32, 64, 96, 128])
Tri Dao's avatar
Tri Dao committed
578
# @pytest.mark.parametrize("d", [64])
Tri Dao's avatar
Tri Dao committed
579
# @pytest.mark.parametrize('seqlen', [128, 256, 384, 512, 768, 1024, 2048])
580
581
@pytest.mark.parametrize("seqlen", [97, 128, 200, 384, 768, 1024, 1025, 2048])
# @pytest.mark.parametrize("seqlen", [97])
Tri Dao's avatar
Tri Dao committed
582
@pytest.mark.parametrize("dropout_p", [0.0, 0.17])
Tri Dao's avatar
Tri Dao committed
583
# @pytest.mark.parametrize("dropout_p", [0.0])
584
def test_flash_attn_qkvpacked(seqlen, d, dropout_p, causal, local, alibi, dtype):
Tri Dao's avatar
Tri Dao committed
585
    if seqlen >= 2048 and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30:
Tri Dao's avatar
Tri Dao committed
586
        pytest.skip()  # Reference implementation OOM
Tri Dao's avatar
Tri Dao committed
587
    device = "cuda"
Tri Dao's avatar
Tri Dao committed
588
589
    # set seed
    torch.random.manual_seed(0)
590
    batch_size = 8
Tri Dao's avatar
Tri Dao committed
591
    nheads = 9
Tri Dao's avatar
Tri Dao committed
592
    window_size = (-1, -1) if not local else torch.randint(0, seqlen, (2,))
Tri Dao's avatar
Tri Dao committed
593
594
595
    qkv = torch.randn(
        batch_size, seqlen, 3, nheads, d, device=device, dtype=dtype, requires_grad=True
    )
596
597
598
599
600
    if alibi:
        alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
        attn_bias = attn_bias_from_alibi_slopes(alibi_slopes, seqlen, seqlen, causal=causal)
    else:
        alibi_slopes, attn_bias = None, None
Tri Dao's avatar
Tri Dao committed
601
    out, lse, S_dmask = flash_attn_qkvpacked_func(
602
603
604
605
606
607
        qkv,
        dropout_p,
        causal=causal,
        window_size=window_size,
        alibi_slopes=alibi_slopes,
        return_attn_probs=True,
Tri Dao's avatar
Tri Dao committed
608
    )
Tri Dao's avatar
Tri Dao committed
609
610
    if dropout_p > 0.0:
        S_dmask_converted = convert_flash_attn_S_to_softmax(
Tri Dao's avatar
Tri Dao committed
611
612
613
614
615
616
617
618
619
            S_dmask,
            seqlen,
            seqlen,
            None,
            None,
            d,
            dropout_p > 0.0,
            causal=causal,
            window_size=window_size,
620
        )
Tri Dao's avatar
Tri Dao committed
621
622
        dropout_mask = S_dmask_converted >= 0
        attn_unnorm = S_dmask_converted.abs()
Tri Dao's avatar
Tri Dao committed
623
624
625
626
627
628
629
        attn = normalize_flash_attn_S(
            attn_unnorm,
            qkv[:, :, 0],
            qkv[:, :, 1],
            qkv[:, :, 2],
            None,
            None,
630
            attn_bias,
Tri Dao's avatar
Tri Dao committed
631
632
            dropout_p > 0.0,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
633
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
634
        )
Tri Dao's avatar
Tri Dao committed
635
636
637
        dropout_fraction = get_dropout_fraction(
            dropout_mask, None, None, causal=causal, window_size=window_size
        ).item()
Tri Dao's avatar
Tri Dao committed
638
        print(f"Actual dropout fraction: {dropout_fraction}")
Tri Dao's avatar
Tri Dao committed
639
640
641
    else:
        dropout_mask = None

Tri Dao's avatar
Tri Dao committed
642
    out_ref, attn_ref = attention_qkvpacked_ref(
643
        qkv, None, attn_bias, dropout_p, dropout_mask, causal=causal, window_size=window_size
Tri Dao's avatar
Tri Dao committed
644
    )
Tri Dao's avatar
Tri Dao committed
645
    out_pt, attn_pt = attention_qkvpacked_ref(
Tri Dao's avatar
Tri Dao committed
646
647
        qkv,
        None,
648
        attn_bias,
Tri Dao's avatar
Tri Dao committed
649
650
651
652
653
654
        dropout_p,
        dropout_mask,
        causal=causal,
        window_size=window_size,
        upcast=False,
        reorder_ops=True,
Tri Dao's avatar
Tri Dao committed
655
    )
Tri Dao's avatar
Tri Dao committed
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
    # v = qkv[:, :, 2].float()
    # qk = torch.einsum('bshd,bthd->bhst', qkv[:, :, 0], qkv[:, :, 1]).float()
    # if causal:
    #     causal_mask = torch.triu(torch.ones(seqlen, seqlen, dtype=torch.bool, device=qkv.device), 1)
    #     qk.masked_fill_(causal_mask, float('-inf'))
    # m = qk.amax(-1, keepdim=True)
    # s_tmp = torch.exp((qk - m) / math.sqrt(d))
    # p_tmp = torch.softmax(qk / math.sqrt(d), -1)
    # p_dropped = p_tmp if dropout_mask is None else p_tmp.masked_fill(~dropout_mask, 0)
    # lse_ref = torch.logsumexp(qk / math.sqrt(d), -1)
    # qk_max1 = torch.max(qk[:, :, 128:, 192:], -1, keepdim=True).values
    # qk_max2 = torch.max(qk[:, :, 128:, 128:], -1, keepdim=True).values
    # qk_max3 = torch.max(qk[:, :, 128:, 64:], -1, keepdim=True).values
    # qk_max4 = torch.max(qk[:, :, 128:, :], -1, keepdim=True).values
    # o1 = torch.einsum('bhst,bthd->bshd', torch.exp((qk[:, :, 128:, 192:] - qk_max1) / math.sqrt(d)), v[:, 192:])
    # o2 = torch.einsum('bhst,bthd->bshd', torch.exp((qk[:, :, 128:, 128:] - qk_max2) / math.sqrt(d)), v[:, 128:])
    # o3 = torch.einsum('bhst,bthd->bshd', torch.exp((qk[:, :, 128:, 64:] - qk_max3) / math.sqrt(d)), v[:, 64:])
    # o4 = torch.einsum('bhst,bthd->bshd', torch.exp((qk[:, :, 128:, :] - qk_max4) / math.sqrt(d)), v[:, :])
Tri Dao's avatar
Tri Dao committed
674
675
676
677
    print(f"Output max diff: {(out - out_ref).abs().max().item()}")
    print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
    print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
    print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
Tri Dao's avatar
Tri Dao committed
678
    if dropout_p > 0.0:
Tri Dao's avatar
Tri Dao committed
679
680
        print(f"Attention max diff: {(attn - attn_ref).abs().max().item()}")
        print(f"Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}")
Tri Dao's avatar
Tri Dao committed
681
682
683
684
685
686

    g = torch.randn_like(out)
    # do_o = (g.float() * out.float()).sum(-1)
    # dv_tmp = torch.einsum('bhts,bthd->bshd', attn_pt[:, :, :64], g[:, :64])
    # dv_tmp1 = torch.einsum('bhts,bthd->bshd', attn_pt[:, :, 64:], g[:, 64:])
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
Tri Dao's avatar
Tri Dao committed
687
688
689
690
691
692
693
694
695
696
697
        (dqkv,) = torch.autograd.grad(out, qkv, g)
        (dqkv_ref,) = torch.autograd.grad(out_ref, qkv, g)
        (dqkv_pt,) = torch.autograd.grad(out_pt, qkv, g)
        print(f"dQ max diff: {(dqkv[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}")
        print(f"dK max diff: {(dqkv[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}")
        print(f"dV max diff: {(dqkv[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}")
        print(f"dQKV mean diff: {(dqkv - dqkv_ref).abs().mean().item()}")
        print(f"dQ Pytorch max diff: {(dqkv_pt[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}")
        print(f"dK Pytorch max diff: {(dqkv_pt[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}")
        print(f"dV Pytorch max diff: {(dqkv_pt[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}")
        print(f"dQKV Pytorch mean diff: {(dqkv_pt - dqkv_ref).abs().mean().item()}")
Tri Dao's avatar
Tri Dao committed
698
699
700

    # Check that FlashAttention's numerical error is at most twice the numerical error
    # of a Pytorch implementation.
Tri Dao's avatar
Tri Dao committed
701
702
703
704
    assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()

    if dropout_p > 0.0:
        assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
705
706
707
        # With alibi, many of the prob values are 0.0 & -0.0 so dropout_fraction isn't accurate
        if not alibi:
            assert abs(dropout_fraction - dropout_p) <= (0.01 if not local else 0.025)
Tri Dao's avatar
Tri Dao committed
708
709
710

    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
        assert (dqkv - dqkv_ref).abs().max().item() <= 2 * (dqkv_pt - dqkv_ref).abs().max().item()
Tri Dao's avatar
Tri Dao committed
711
712


Tri Dao's avatar
Tri Dao committed
713
@pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
Tri Dao's avatar
Tri Dao committed
714
# @pytest.mark.parametrize('dtype', [torch.float16])
715
716
@pytest.mark.parametrize("alibi", [False, True])
# @pytest.mark.parametrize("alibi", [True])
Tri Dao's avatar
Tri Dao committed
717
718
@pytest.mark.parametrize("local", [False, True])
# @pytest.mark.parametrize("local", [True])
Tri Dao's avatar
Tri Dao committed
719
@pytest.mark.parametrize("causal", [False, True])
Tri Dao's avatar
Tri Dao committed
720
# @pytest.mark.parametrize('causal', [False])
721
@pytest.mark.parametrize("d", [32, 59, 64, 80, 96, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
722
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
723
# @pytest.mark.parametrize('d', [64])
724
@pytest.mark.parametrize("seqlen", [97, 128, 200, 257, 384, 512, 768, 1025, 2048])
Tri Dao's avatar
Tri Dao committed
725
# @pytest.mark.parametrize('seqlen', [128])
Tri Dao's avatar
Tri Dao committed
726
@pytest.mark.parametrize("dropout_p", [0.0, 0.17])
Tri Dao's avatar
Tri Dao committed
727
# @pytest.mark.parametrize('dropout_p', [0.0])
728
def test_flash_attn_varlen_qkvpacked(seqlen, d, dropout_p, causal, local, alibi, dtype):
Tri Dao's avatar
Tri Dao committed
729
    if seqlen >= 2048 and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30:
Tri Dao's avatar
Tri Dao committed
730
        pytest.skip()  # Reference implementation OOM
Tri Dao's avatar
Tri Dao committed
731
    device = "cuda"
Tri Dao's avatar
Tri Dao committed
732
733
    # set seed
    torch.random.manual_seed(0)
Tri Dao's avatar
Tri Dao committed
734
735
    batch_size = 5
    nheads = 6
Tri Dao's avatar
Tri Dao committed
736
    window_size = (-1, -1) if not local else torch.randint(0, seqlen, (2,))
Tri Dao's avatar
Tri Dao committed
737
738
739
    qkv = torch.randn(
        batch_size, seqlen, 3, nheads, d, device=device, dtype=dtype, requires_grad=True
    )
Tri Dao's avatar
Tri Dao committed
740

Tri Dao's avatar
Tri Dao committed
741
    key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode="random")
Tri Dao's avatar
Tri Dao committed
742
    # key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='full')
743
744
745
746
747
748
749
    if alibi:
        alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
        attn_bias = attn_bias_from_alibi_slopes(
            alibi_slopes, seqlen, seqlen, key_padding_mask, key_padding_mask, causal=causal
        )
    else:
        alibi_slopes, attn_bias = None, None
Tri Dao's avatar
Tri Dao committed
750

Tri Dao's avatar
Tri Dao committed
751
752
    qkv_unpad, cu_seqlens, max_seqlen, qkv, output_pad_fn, dqkv_pad_fn = generate_qkv(
        *qkv.unbind(dim=2), key_padding_mask, key_padding_mask, qkvpacked=True
Tri Dao's avatar
Tri Dao committed
753
    )
Tri Dao's avatar
Tri Dao committed
754
755

    out_unpad, sm_lse, S_dmask = flash_attn_varlen_qkvpacked_func(
Tri Dao's avatar
Tri Dao committed
756
757
758
759
760
761
        qkv_unpad,
        cu_seqlens,
        max_seqlen,
        dropout_p,
        causal=causal,
        window_size=window_size,
762
        alibi_slopes=alibi_slopes,
Tri Dao's avatar
Tri Dao committed
763
        return_attn_probs=True,
Tri Dao's avatar
Tri Dao committed
764
    )
Tri Dao's avatar
Tri Dao committed
765
766
767
    out = output_pad_fn(out_unpad)
    if dropout_p > 0.0:
        S_dmask_converted = convert_flash_attn_S_to_softmax(
768
769
770
771
772
773
774
775
            S_dmask,
            seqlen,
            seqlen,
            key_padding_mask,
            key_padding_mask,
            d,
            dropout_p > 0.0,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
776
            window_size=window_size,
777
        )
Tri Dao's avatar
Tri Dao committed
778
779
        dropout_mask = S_dmask_converted >= 0
        attn_unnorm = S_dmask_converted.abs()
Tri Dao's avatar
Tri Dao committed
780
781
782
783
784
785
786
        attn = normalize_flash_attn_S(
            attn_unnorm,
            qkv[:, :, 0],
            qkv[:, :, 1],
            qkv[:, :, 2],
            key_padding_mask,
            key_padding_mask,
787
            attn_bias,
Tri Dao's avatar
Tri Dao committed
788
789
            dropout_p > 0.0,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
790
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
791
792
        )
        dropout_fraction = get_dropout_fraction(
Tri Dao's avatar
Tri Dao committed
793
            dropout_mask, key_padding_mask, key_padding_mask, causal=causal, window_size=window_size
Tri Dao's avatar
Tri Dao committed
794
795
        ).item()
        print(f"Actual dropout fraction: {dropout_fraction}")
Tri Dao's avatar
Tri Dao committed
796
797
798
    else:
        dropout_mask = None

Tri Dao's avatar
Tri Dao committed
799
    out_ref, attn_ref = attention_qkvpacked_ref(
800
801
802
803
804
805
806
        qkv,
        key_padding_mask,
        attn_bias,
        dropout_p,
        dropout_mask,
        causal=causal,
        window_size=window_size,
Tri Dao's avatar
Tri Dao committed
807
808
809
810
    )
    out_pt, attn_pt = attention_qkvpacked_ref(
        qkv,
        key_padding_mask,
811
        attn_bias,
Tri Dao's avatar
Tri Dao committed
812
813
814
        dropout_p,
        dropout_mask,
        causal=causal,
Tri Dao's avatar
Tri Dao committed
815
        window_size=window_size,
Tri Dao's avatar
Tri Dao committed
816
817
818
819
820
821
822
        upcast=False,
        reorder_ops=True,
    )
    print(f"Output max diff: {(out - out_ref).abs().max().item()}")
    print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
    print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
    print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
Tri Dao's avatar
Tri Dao committed
823
    if dropout_p > 0.0:
Tri Dao's avatar
Tri Dao committed
824
825
        print(f"Attention max diff: {(attn - attn_ref).abs().max().item()}")
        print(f"Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}")
Tri Dao's avatar
Tri Dao committed
826
827
828

    g = torch.randn_like(out)
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
Tri Dao's avatar
Tri Dao committed
829
        (dqkv_unpad,) = torch.autograd.grad(out, qkv_unpad, g)
Tri Dao's avatar
Tri Dao committed
830
        dqkv = dqkv_pad_fn(dqkv_unpad)
Tri Dao's avatar
Tri Dao committed
831
832
833
834
835
836
837
838
839
840
        (dqkv_ref,) = torch.autograd.grad(out_ref, qkv, g)
        (dqkv_pt,) = torch.autograd.grad(out_pt, qkv, g)
        print(f"dQ max diff: {(dqkv[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}")
        print(f"dK max diff: {(dqkv[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}")
        print(f"dV max diff: {(dqkv[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}")
        print(f"dQKV mean diff: {(dqkv - dqkv_ref).abs().mean().item()}")
        print(f"dQ Pytorch max diff: {(dqkv_pt[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}")
        print(f"dK Pytorch max diff: {(dqkv_pt[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}")
        print(f"dV Pytorch max diff: {(dqkv_pt[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}")
        print(f"dQKV Pytorch mean diff: {(dqkv_pt - dqkv_ref).abs().mean().item()}")
Tri Dao's avatar
Tri Dao committed
841
842
843

    # Check that FlashAttention's numerical error is at most twice the numerical error
    # of a Pytorch implementation.
Tri Dao's avatar
Tri Dao committed
844
    assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()
Tri Dao's avatar
Tri Dao committed
845

Tri Dao's avatar
Tri Dao committed
846
847
    if dropout_p > 0.0:
        assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
848
849
850
        # With alibi, many of the prob values are 0.0 & -0.0 so dropout_fraction isn't accurate
        if not alibi:
            assert abs(dropout_fraction - dropout_p) <= (0.01 if not local else 0.025)
Tri Dao's avatar
Tri Dao committed
851
852
853

    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
        assert (dqkv - dqkv_ref).abs().max().item() <= 2 * (dqkv_pt - dqkv_ref).abs().max().item()
Tri Dao's avatar
Tri Dao committed
854
855


Tri Dao's avatar
Tri Dao committed
856
@pytest.mark.parametrize("kvpacked", [True, False])
857
# @pytest.mark.parametrize("kvpacked", [False])
Tri Dao's avatar
Tri Dao committed
858
@pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
859
# @pytest.mark.parametrize("dtype", [torch.bfloat16])
Tri Dao's avatar
Tri Dao committed
860
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
861
# @pytest.mark.parametrize("mha_type", ["mha"])
862
863
@pytest.mark.parametrize("alibi", [False, True])
# @pytest.mark.parametrize("alibi", [True])
Tri Dao's avatar
Tri Dao committed
864
865
@pytest.mark.parametrize("local", [False, True])
# @pytest.mark.parametrize("local", [True])
Tri Dao's avatar
Tri Dao committed
866
@pytest.mark.parametrize("causal", [False, True])
867
# @pytest.mark.parametrize("causal", [True])
868
@pytest.mark.parametrize("d", [32, 40, 59, 64, 96, 111, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
869
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
870
871
872
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
873
# @pytest.mark.parametrize("d", [64])
Tri Dao's avatar
Tri Dao committed
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
@pytest.mark.parametrize(
    "seqlen_q,seqlen_k",
    [
        (113, 203),
        (128, 217),
        (113, 211),
        (108, 256),
        (256, 512),
        (512, 256),
        (1024, 1024),
        (1023, 1024),
        (1024, 1023),
        (2048, 2048),
    ],
)
889
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
Tri Dao's avatar
Tri Dao committed
890
@pytest.mark.parametrize("dropout_p", [0.0, 0.17])
891
# @pytest.mark.parametrize("dropout_p", [0.17])
Tri Dao's avatar
Tri Dao committed
892
def test_flash_attn_output(
893
    seqlen_q, seqlen_k, d, dropout_p, causal, local, alibi, mha_type, dtype, kvpacked
Tri Dao's avatar
Tri Dao committed
894
):
Tri Dao's avatar
Tri Dao committed
895
896
897
898
    if (
        max(seqlen_q, seqlen_k) >= 2048
        and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30
    ):
Tri Dao's avatar
Tri Dao committed
899
        pytest.skip()  # Reference implementation OOM
Tri Dao's avatar
Tri Dao committed
900
    device = "cuda"
Tri Dao's avatar
Tri Dao committed
901
902
    # set seed
    torch.random.manual_seed(0)
903
    batch_size = 8
Tri Dao's avatar
Tri Dao committed
904
905
906
    nheads = 9
    nheads_k = nheads if mha_type == "mha" else (1 if mha_type == "mqa" else 3)
    assert nheads % nheads_k == 0
Tri Dao's avatar
Tri Dao committed
907
    window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
Tri Dao's avatar
Tri Dao committed
908
909
    q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
    if kvpacked:
Tri Dao's avatar
Tri Dao committed
910
911
912
        kv = torch.randn(
            batch_size, seqlen_k, 2, nheads_k, d, device=device, dtype=dtype, requires_grad=True
        )
Tri Dao's avatar
Tri Dao committed
913
    else:
Tri Dao's avatar
Tri Dao committed
914
915
916
917
918
919
        k = torch.randn(
            batch_size, seqlen_k, nheads_k, d, device=device, dtype=dtype, requires_grad=True
        )
        v = torch.randn(
            batch_size, seqlen_k, nheads_k, d, device=device, dtype=dtype, requires_grad=True
        )
920
921
922
923
924
    if alibi:
        alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
        attn_bias = attn_bias_from_alibi_slopes(alibi_slopes, seqlen_q, seqlen_k, causal=causal)
    else:
        alibi_slopes, attn_bias = None, None
Tri Dao's avatar
Tri Dao committed
925
926
927

    if kvpacked:
        out, lse, S_dmask = flash_attn_kvpacked_func(
928
929
930
931
932
933
934
            q,
            kv,
            dropout_p,
            causal=causal,
            window_size=window_size,
            alibi_slopes=alibi_slopes,
            return_attn_probs=True,
Tri Dao's avatar
Tri Dao committed
935
936
937
        )
    else:
        out, lse, S_dmask = flash_attn_func(
938
939
940
941
942
943
944
945
            q,
            k,
            v,
            dropout_p,
            causal=causal,
            window_size=window_size,
            alibi_slopes=alibi_slopes,
            return_attn_probs=True,
Tri Dao's avatar
Tri Dao committed
946
947
948
        )
    if dropout_p > 0.0:
        S_dmask_converted = convert_flash_attn_S_to_softmax(
Tri Dao's avatar
Tri Dao committed
949
950
951
952
953
954
955
956
957
            S_dmask,
            seqlen_q,
            seqlen_k,
            None,
            None,
            d,
            dropout_p > 0.0,
            causal=causal,
            window_size=window_size,
958
        )
Tri Dao's avatar
Tri Dao committed
959
960
961
962
963
964
965
966
        dropout_mask = S_dmask_converted >= 0
        attn_unnorm = S_dmask_converted.abs()
        if kvpacked:
            kv_rep = repeat(kv, "b s two h d -> b s two (h g) d", g=nheads // nheads_k)
            k_rep, v_rep = kv_rep.unbind(dim=2)
        else:
            k_rep = repeat(k, "b s h d -> b s (h g) d", g=nheads // nheads_k)
            v_rep = repeat(v, "b s h d -> b s (h g) d", g=nheads // nheads_k)
Tri Dao's avatar
Tri Dao committed
967
        attn = normalize_flash_attn_S(
Tri Dao's avatar
Tri Dao committed
968
969
970
971
972
973
            attn_unnorm,
            q,
            k_rep,
            v_rep,
            None,
            None,
974
            attn_bias,
Tri Dao's avatar
Tri Dao committed
975
976
977
            dropout_p > 0.0,
            causal=causal,
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
978
        )
Tri Dao's avatar
Tri Dao committed
979
980
981
        dropout_fraction = get_dropout_fraction(
            dropout_mask, None, None, causal=causal, window_size=window_size
        ).item()
Tri Dao's avatar
Tri Dao committed
982
        print(f"Actual dropout fraction: {dropout_fraction}")
Tri Dao's avatar
Tri Dao committed
983
984
    else:
        dropout_mask = None
Tri Dao's avatar
Tri Dao committed
985

Tri Dao's avatar
Tri Dao committed
986
    if kvpacked:
Tri Dao's avatar
Tri Dao committed
987
        out_ref, attn_ref = attention_kvpacked_ref(
Tri Dao's avatar
Tri Dao committed
988
989
990
991
            q,
            kv,
            None,
            None,
992
            attn_bias,
Tri Dao's avatar
Tri Dao committed
993
994
995
996
            dropout_p,
            dropout_mask,
            causal=causal,
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
997
998
999
1000
1001
1002
        )
        out_pt, attn_pt = attention_kvpacked_ref(
            q,
            kv,
            None,
            None,
1003
            attn_bias,
Tri Dao's avatar
Tri Dao committed
1004
1005
1006
            dropout_p,
            dropout_mask,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
1007
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
1008
1009
1010
            upcast=False,
            reorder_ops=True,
        )
Tri Dao's avatar
Tri Dao committed
1011
    else:
Tri Dao's avatar
Tri Dao committed
1012
        out_ref, attn_ref = attention_ref(
Tri Dao's avatar
Tri Dao committed
1013
1014
1015
1016
1017
            q,
            k,
            v,
            None,
            None,
1018
            attn_bias,
Tri Dao's avatar
Tri Dao committed
1019
1020
1021
1022
            dropout_p,
            dropout_mask,
            causal=causal,
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
1023
1024
1025
1026
1027
1028
1029
        )
        out_pt, attn_pt = attention_ref(
            q,
            k,
            v,
            None,
            None,
1030
            attn_bias,
Tri Dao's avatar
Tri Dao committed
1031
1032
1033
            dropout_p,
            dropout_mask,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
1034
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
1035
1036
1037
1038
1039
1040
1041
1042
            upcast=False,
            reorder_ops=True,
        )

    print(f"Output max diff: {(out - out_ref).abs().max().item()}")
    print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
    print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
    print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
Tri Dao's avatar
Tri Dao committed
1043
    if dropout_p > 0.0:
Tri Dao's avatar
Tri Dao committed
1044
1045
        print(f"Attention max diff: {(attn - attn_ref).abs().max().item()}")
        print(f"Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}")
Tri Dao's avatar
Tri Dao committed
1046
1047
1048
1049
1050

    g = torch.randn_like(out)
    do_o = (g.float() * out.float()).sum(-1)
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
        if kvpacked:
Tri Dao's avatar
Tri Dao committed
1051
1052
1053
1054
            (
                dq,
                dkv,
            ) = torch.autograd.grad(out, (q, kv), g)
Tri Dao's avatar
Tri Dao committed
1055
            dk, dv = dkv.unbind(2)
Tri Dao's avatar
Tri Dao committed
1056
1057
1058
1059
            (
                dq_ref,
                dkv_ref,
            ) = torch.autograd.grad(out_ref, (q, kv), g)
Tri Dao's avatar
Tri Dao committed
1060
            dk_ref, dv_ref = dkv_ref.unbind(2)
Tri Dao's avatar
Tri Dao committed
1061
1062
1063
1064
            (
                dq_pt,
                dkv_pt,
            ) = torch.autograd.grad(out_pt, (q, kv), g)
Tri Dao's avatar
Tri Dao committed
1065
1066
            dk_pt, dv_pt = dkv_pt.unbind(2)
        else:
Tri Dao's avatar
Tri Dao committed
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
            (
                dq,
                dk,
                dv,
            ) = torch.autograd.grad(out, (q, k, v), g)
            (
                dq_ref,
                dk_ref,
                dv_ref,
            ) = torch.autograd.grad(out_ref, (q, k, v), g)
            (
                dq_pt,
                dk_pt,
                dv_pt,
            ) = torch.autograd.grad(out_pt, (q, k, v), g)
        print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
        print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
        print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
        print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
        print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
        print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
        print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
        print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
        print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
        print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
        print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
        print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
Tri Dao's avatar
Tri Dao committed
1094
1095
1096

    # Check that FlashAttention's numerical error is at most twice the numerical error
    # of a Pytorch implementation.
Tri Dao's avatar
Tri Dao committed
1097
1098
1099
1100
    assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()

    if dropout_p > 0.0:
        assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
1101
1102
1103
        # With alibi, many of the prob values are 0.0 & -0.0 so dropout_fraction isn't accurate
        if not alibi:
            assert abs(dropout_fraction - dropout_p) <= (0.01 if not local else 0.025)
Tri Dao's avatar
Tri Dao committed
1104

Tri Dao's avatar
Tri Dao committed
1105
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
Tri Dao's avatar
Tri Dao committed
1106
1107
1108
1109
1110
        assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item()
        assert (dk - dk_ref).abs().max().item() <= 2 * (dk_pt - dk_ref).abs().max().item()
        assert (dv - dv_ref).abs().max().item() <= 2 * (dv_pt - dv_ref).abs().max().item()


Tri Dao's avatar
Tri Dao committed
1111
@pytest.mark.parametrize("kvpacked", [True, False])
Tri Dao's avatar
Tri Dao committed
1112
# @pytest.mark.parametrize('kvpacked', [False])
Tri Dao's avatar
Tri Dao committed
1113
@pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
1114
# @pytest.mark.parametrize('dtype', [torch.float16])
Tri Dao's avatar
Tri Dao committed
1115
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
Tri Dao's avatar
Tri Dao committed
1116
# @pytest.mark.parametrize('mha_type', ["mqa"])
1117
1118
@pytest.mark.parametrize("alibi", [False, True])
# @pytest.mark.parametrize("alibi", [True])
Tri Dao's avatar
Tri Dao committed
1119
1120
@pytest.mark.parametrize("local", [False, True])
# @pytest.mark.parametrize("local", [True])
Tri Dao's avatar
Tri Dao committed
1121
@pytest.mark.parametrize("causal", [False, True])
Tri Dao's avatar
Tri Dao committed
1122
# @pytest.mark.parametrize('causal', [True])
1123
@pytest.mark.parametrize("d", [32, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
1124
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
1125
# @pytest.mark.parametrize('d', [64])
Tri Dao's avatar
Tri Dao committed
1126
1127
1128
@pytest.mark.parametrize(
    "seqlen_q,seqlen_k",
    [
1129
        (1, 147),
Tri Dao's avatar
Tri Dao committed
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
        (113, 203),
        (128, 217),
        (113, 211),
        (108, 256),
        (256, 512),
        (512, 256),
        (1024, 1024),
        (1023, 1024),
        (1024, 1023),
        (2048, 2048),
    ],
)
Tri Dao's avatar
Tri Dao committed
1142
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(128, 128)])
Tri Dao's avatar
Tri Dao committed
1143
@pytest.mark.parametrize("dropout_p", [0.0, 0.17])
1144
# @pytest.mark.parametrize('dropout_p', [0.0])
Tri Dao's avatar
Tri Dao committed
1145
def test_flash_attn_varlen_output(
1146
    seqlen_q, seqlen_k, d, dropout_p, causal, local, alibi, mha_type, dtype, kvpacked
Tri Dao's avatar
Tri Dao committed
1147
1148
1149
1150
1151
):
    if (
        max(seqlen_q, seqlen_k) >= 2048
        and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30
    ):
1152
        pytest.skip()  # Reference implementation OOM
Tri Dao's avatar
Tri Dao committed
1153
    device = "cuda"
1154
1155
    # set seed
    torch.random.manual_seed(0)
1156
    batch_size = 8
Tri Dao's avatar
Tri Dao committed
1157
1158
1159
    nheads = 9
    nheads_k = nheads if mha_type == "mha" else (1 if mha_type == "mqa" else 3)
    assert nheads % nheads_k == 0
Tri Dao's avatar
Tri Dao committed
1160
    window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
Tri Dao's avatar
Tri Dao committed
1161
1162
    q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
    if kvpacked:
Tri Dao's avatar
Tri Dao committed
1163
1164
1165
        kv = torch.randn(
            batch_size, seqlen_k, 2, nheads_k, d, device=device, dtype=dtype, requires_grad=True
        )
1166
    else:
Tri Dao's avatar
Tri Dao committed
1167
1168
1169
1170
1171
1172
        k = torch.randn(
            batch_size, seqlen_k, nheads_k, d, device=device, dtype=dtype, requires_grad=True
        )
        v = torch.randn(
            batch_size, seqlen_k, nheads_k, d, device=device, dtype=dtype, requires_grad=True
        )
Tri Dao's avatar
Tri Dao committed
1173

Tri Dao's avatar
Tri Dao committed
1174
1175
    query_padding_mask = generate_random_padding_mask(seqlen_q, batch_size, device, mode="random")
    key_padding_mask = generate_random_padding_mask(seqlen_k, batch_size, device, mode="random")
Tri Dao's avatar
Tri Dao committed
1176
    # key_padding_mask = generate_random_padding_mask(seqlen_k, batch_size, device, mode='full')
1177
1178
1179
1180
1181
1182
1183
    if alibi:
        alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
        attn_bias = attn_bias_from_alibi_slopes(
            alibi_slopes, seqlen_q, seqlen_k, query_padding_mask, key_padding_mask, causal=causal
        )
    else:
        alibi_slopes, attn_bias = None, None
Tri Dao's avatar
Tri Dao committed
1184
1185

    if kvpacked:
Tri Dao's avatar
Tri Dao committed
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
        (
            q_unpad,
            kv_unpad,
            cu_seqlens_q,
            cu_seqlens_k,
            max_seqlen_q,
            max_seqlen_k,
            q,
            kv,
            output_pad_fn,
            dq_pad_fn,
            dkv_pad_fn,
        ) = generate_qkv(q, *kv.unbind(dim=2), query_padding_mask, key_padding_mask, kvpacked=True)
Tri Dao's avatar
Tri Dao committed
1199
        out_unpad, sm_lse, S_dmask = flash_attn_varlen_kvpacked_func(
Tri Dao's avatar
Tri Dao committed
1200
1201
1202
1203
1204
1205
1206
1207
            q_unpad,
            kv_unpad,
            cu_seqlens_q,
            cu_seqlens_k,
            max_seqlen_q,
            max_seqlen_k,
            dropout_p,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
1208
            window_size=window_size,
1209
1210
            alibi_slopes=alibi_slopes,
            return_attn_probs=True,
Tri Dao's avatar
Tri Dao committed
1211
1212
        )
    else:
Tri Dao's avatar
Tri Dao committed
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
        (
            q_unpad,
            k_unpad,
            v_unpad,
            cu_seqlens_q,
            cu_seqlens_k,
            max_seqlen_q,
            max_seqlen_k,
            q,
            k,
            v,
            output_pad_fn,
            dq_pad_fn,
            dk_pad_fn,
        ) = generate_qkv(q, k, v, query_padding_mask, key_padding_mask, kvpacked=False)
Tri Dao's avatar
Tri Dao committed
1228
        out_unpad, sm_lse, S_dmask = flash_attn_varlen_func(
Tri Dao's avatar
Tri Dao committed
1229
1230
1231
1232
1233
1234
1235
1236
1237
            q_unpad,
            k_unpad,
            v_unpad,
            cu_seqlens_q,
            cu_seqlens_k,
            max_seqlen_q,
            max_seqlen_k,
            dropout_p,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
1238
            window_size=window_size,
1239
1240
            alibi_slopes=alibi_slopes,
            return_attn_probs=True,
Tri Dao's avatar
Tri Dao committed
1241
        )
Tri Dao's avatar
Tri Dao committed
1242
1243
    out = output_pad_fn(out_unpad)
    if dropout_p > 0.0:
Tri Dao's avatar
Tri Dao committed
1244
        S_dmask_converted = convert_flash_attn_S_to_softmax(
1245
1246
1247
1248
1249
1250
1251
1252
            S_dmask,
            seqlen_q,
            seqlen_k,
            query_padding_mask,
            key_padding_mask,
            d,
            dropout_p > 0.0,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
1253
            window_size=window_size,
1254
        )
Tri Dao's avatar
Tri Dao committed
1255
1256
1257
1258
1259
1260
1261
1262
        dropout_mask = S_dmask_converted >= 0
        attn_unnorm = S_dmask_converted.abs()
        if kvpacked:
            kv_rep = repeat(kv, "b s two h d -> b s two (h g) d", g=nheads // nheads_k)
            k_rep, v_rep = kv_rep.unbind(dim=2)
        else:
            k_rep = repeat(k, "b s h d -> b s (h g) d", g=nheads // nheads_k)
            v_rep = repeat(v, "b s h d -> b s (h g) d", g=nheads // nheads_k)
Tri Dao's avatar
Tri Dao committed
1263
1264
1265
1266
1267
1268
1269
        attn = normalize_flash_attn_S(
            attn_unnorm,
            q,
            k_rep,
            v_rep,
            query_padding_mask,
            key_padding_mask,
1270
            attn_bias,
Tri Dao's avatar
Tri Dao committed
1271
1272
            dropout_p > 0.0,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
1273
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
1274
1275
        )
        dropout_fraction = get_dropout_fraction(
Tri Dao's avatar
Tri Dao committed
1276
1277
1278
1279
1280
            dropout_mask,
            query_padding_mask,
            key_padding_mask,
            causal=causal,
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
1281
1282
        ).item()
        print(f"Actual dropout fraction: {dropout_fraction}")
Tri Dao's avatar
Tri Dao committed
1283
1284
1285
1286
    else:
        dropout_mask = None

    if kvpacked:
Tri Dao's avatar
Tri Dao committed
1287
        out_ref, attn_ref = attention_kvpacked_ref(
Tri Dao's avatar
Tri Dao committed
1288
1289
1290
1291
            q,
            kv,
            query_padding_mask,
            key_padding_mask,
1292
            attn_bias,
Tri Dao's avatar
Tri Dao committed
1293
1294
1295
1296
            dropout_p,
            dropout_mask,
            causal=causal,
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
1297
1298
1299
1300
1301
1302
        )
        out_pt, attn_pt = attention_kvpacked_ref(
            q,
            kv,
            query_padding_mask,
            key_padding_mask,
1303
            attn_bias,
Tri Dao's avatar
Tri Dao committed
1304
1305
1306
            dropout_p,
            dropout_mask,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
1307
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
1308
1309
1310
            upcast=False,
            reorder_ops=True,
        )
Tri Dao's avatar
Tri Dao committed
1311
    else:
Tri Dao's avatar
Tri Dao committed
1312
        out_ref, attn_ref = attention_ref(
Tri Dao's avatar
Tri Dao committed
1313
1314
1315
1316
1317
            q,
            k,
            v,
            query_padding_mask,
            key_padding_mask,
1318
            attn_bias,
Tri Dao's avatar
Tri Dao committed
1319
1320
1321
1322
            dropout_p,
            dropout_mask,
            causal=causal,
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
1323
1324
1325
1326
1327
1328
1329
        )
        out_pt, attn_pt = attention_ref(
            q,
            k,
            v,
            query_padding_mask,
            key_padding_mask,
1330
            attn_bias,
Tri Dao's avatar
Tri Dao committed
1331
1332
1333
            dropout_p,
            dropout_mask,
            causal=causal,
Tri Dao's avatar
Tri Dao committed
1334
            window_size=window_size,
Tri Dao's avatar
Tri Dao committed
1335
1336
1337
1338
1339
1340
1341
1342
            upcast=False,
            reorder_ops=True,
        )

    print(f"Output max diff: {(out - out_ref).abs().max().item()}")
    print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
    print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
    print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
Tri Dao's avatar
Tri Dao committed
1343
    if dropout_p > 0.0:
Tri Dao's avatar
Tri Dao committed
1344
1345
        print(f"Attention max diff: {(attn - attn_ref).abs().max().item()}")
        print(f"Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}")
Tri Dao's avatar
Tri Dao committed
1346
1347
1348
1349

    g = torch.randn_like(out)
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
        if kvpacked:
Tri Dao's avatar
Tri Dao committed
1350
1351
1352
1353
            (
                dq_unpad,
                dkv_unpad,
            ) = torch.autograd.grad(out, (q_unpad, kv_unpad), g)
Tri Dao's avatar
Tri Dao committed
1354
            dk, dv = dkv_pad_fn(dkv_unpad).unbind(2)
Tri Dao's avatar
Tri Dao committed
1355
1356
1357
1358
            (
                dq_ref,
                dkv_ref,
            ) = torch.autograd.grad(out_ref, (q, kv), g)
Tri Dao's avatar
Tri Dao committed
1359
            dk_ref, dv_ref = dkv_ref.unbind(2)
Tri Dao's avatar
Tri Dao committed
1360
1361
1362
1363
            (
                dq_pt,
                dkv_pt,
            ) = torch.autograd.grad(out_pt, (q, kv), g)
Tri Dao's avatar
Tri Dao committed
1364
1365
            dk_pt, dv_pt = dkv_pt.unbind(2)
        else:
Tri Dao's avatar
Tri Dao committed
1366
1367
1368
1369
1370
            (
                dq_unpad,
                dk_unpad,
                dv_unpad,
            ) = torch.autograd.grad(out, (q_unpad, k_unpad, v_unpad), g)
Tri Dao's avatar
Tri Dao committed
1371
1372
            dk = dk_pad_fn(dk_unpad)
            dv = dk_pad_fn(dv_unpad)
Tri Dao's avatar
Tri Dao committed
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
            (
                dq_ref,
                dk_ref,
                dv_ref,
            ) = torch.autograd.grad(out_ref, (q, k, v), g)
            (
                dq_pt,
                dk_pt,
                dv_pt,
            ) = torch.autograd.grad(out_pt, (q, k, v), g)
Tri Dao's avatar
Tri Dao committed
1383
        dq = dq_pad_fn(dq_unpad)
Tri Dao's avatar
Tri Dao committed
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
        print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
        print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
        print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
        print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
        print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
        print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
        print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
        print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
        print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
        print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
        print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
        print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
1396
1397
1398

    # Check that FlashAttention's numerical error is at most twice the numerical error
    # of a Pytorch implementation.
Tri Dao's avatar
Tri Dao committed
1399
    assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()
1400

Tri Dao's avatar
Tri Dao committed
1401
1402
    if dropout_p > 0.0:
        assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
1403
1404
1405
        # With alibi, many of the prob values are 0.0 & -0.0 so dropout_fraction isn't accurate
        if not alibi:
            assert abs(dropout_fraction - dropout_p) <= (0.01 if not local else 0.025)
Tri Dao's avatar
Tri Dao committed
1406

Tri Dao's avatar
Tri Dao committed
1407
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
1408
1409
1410
        assert (dq - dq_ref).abs().max().item() <= 3 * (dq_pt - dq_ref).abs().max().item()
        assert (dk - dk_ref).abs().max().item() <= 3 * (dk_pt - dk_ref).abs().max().item()
        assert (dv - dv_ref).abs().max().item() <= 3 * (dv_pt - dv_ref).abs().max().item()
Tri Dao's avatar
Tri Dao committed
1411

1412

Tri Dao's avatar
Tri Dao committed
1413
@pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
1414
# @pytest.mark.parametrize("dtype", [torch.bfloat16])
Tri Dao's avatar
Tri Dao committed
1415
1416
@pytest.mark.parametrize("local", [False, True])
# @pytest.mark.parametrize("local", [True])
1417
@pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
1418
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
# @pytest.mark.parametrize("d", [64, 128])
@pytest.mark.parametrize("swap_sq_sk", [False, True])
# @pytest.mark.parametrize("swap_sq_sk", [True])
@pytest.mark.parametrize(
    "seqlen_q,seqlen_k",
    [
        (1, 239),
        (3, 799),
        (127, 512),
        (127, 513),
        (113, 203),
        (128, 217),
        (113, 211),
        (108, 256),
        (256, 512),
        (1023, 1024),
    ],
)
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
Tri Dao's avatar
Tri Dao committed
1441
def test_flash_attn_causal(seqlen_q, seqlen_k, swap_sq_sk, d, local, dtype):
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
    if (
        max(seqlen_q, seqlen_k) >= 2048
        and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30
    ):
        pytest.skip()  # Reference implementation OOM
    if swap_sq_sk:
        seqlen_q, seqlen_k = seqlen_k, seqlen_q
    device = "cuda"
    causal = True
    # set seed
    torch.random.manual_seed(0)
1453
    batch_size = 8
1454
    nheads = 9
Tri Dao's avatar
Tri Dao committed
1455
    window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
1456
1457
1458
    q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
    k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
    v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
Tri Dao's avatar
Tri Dao committed
1459
1460
    out = flash_attn_func(q, k, v, 0.0, causal=causal, window_size=window_size)
    out_ref, attn_ref = attention_ref(
1461
        q, k, v, None, None, None, 0.0, None, causal=causal, window_size=window_size
Tri Dao's avatar
Tri Dao committed
1462
    )
1463
1464
1465
1466
1467
1468
    out_pt, attn_pt = attention_ref(
        q,
        k,
        v,
        None,
        None,
1469
        None,
1470
1471
1472
        0.0,
        None,
        causal=causal,
Tri Dao's avatar
Tri Dao committed
1473
        window_size=window_size,
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
        upcast=False,
        reorder_ops=True,
    )

    print(f"Output max diff: {(out - out_ref).abs().max().item()}")
    print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
    print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
    print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")

    g = torch.randn_like(out)
    do_o = (g.float() * out.float()).sum(-1)
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
        (
            dq,
            dk,
            dv,
        ) = torch.autograd.grad(out, (q, k, v), g)
        (
            dq_ref,
            dk_ref,
            dv_ref,
        ) = torch.autograd.grad(out_ref, (q, k, v), g)
        (
            dq_pt,
            dk_pt,
            dv_pt,
        ) = torch.autograd.grad(out_pt, (q, k, v), g)
        print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
        print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
        print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
        print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
        print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
        print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
        print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
        print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
        print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
        print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
        print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
        print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")

    # Check that FlashAttention's numerical error is at most twice the numerical error
    # of a Pytorch implementation.
    assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item() + 1e-5

    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
        assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item() + 1e-5
        assert (dk - dk_ref).abs().max().item() <= 2 * (dk_pt - dk_ref).abs().max().item() + 1e-5
        assert (dv - dv_ref).abs().max().item() <= 2 * (dv_pt - dv_ref).abs().max().item() + 1e-5


@pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
# @pytest.mark.parametrize("dtype", [torch.bfloat16])
Tri Dao's avatar
Tri Dao committed
1526
1527
@pytest.mark.parametrize("local", [False, True])
# @pytest.mark.parametrize("local", [True])
1528
@pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
1529
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
1530
1531
1532
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
Tri Dao's avatar
Tri Dao committed
1533
# @pytest.mark.parametrize("d", [64])
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
@pytest.mark.parametrize("swap_sq_sk", [False, True])
# @pytest.mark.parametrize("swap_sq_sk", [True])
@pytest.mark.parametrize(
    "seqlen_q,seqlen_k",
    [
        (1, 239),
        (3, 799),
        (127, 512),
        (127, 513),
        (113, 203),
        (128, 217),
        (113, 211),
        (108, 256),
        (256, 512),
        (1023, 1024),
    ],
)
# @pytest.mark.parametrize("seqlen_q,seqlen_k", [(256, 128)])
Tri Dao's avatar
Tri Dao committed
1552
def test_flash_attn_varlen_causal(seqlen_q, seqlen_k, swap_sq_sk, d, local, dtype):
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
    if (
        max(seqlen_q, seqlen_k) >= 2048
        and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30
    ):
        pytest.skip()  # Reference implementation OOM
    if swap_sq_sk:
        seqlen_q, seqlen_k = seqlen_k, seqlen_q
    device = "cuda"
    causal = True
    # set seed
    torch.random.manual_seed(0)
1564
    batch_size = 8
1565
    nheads = 9
Tri Dao's avatar
Tri Dao committed
1566
    window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
    q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
    k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
    v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
    query_padding_mask = generate_random_padding_mask(seqlen_q, batch_size, device, mode="random")
    key_padding_mask = generate_random_padding_mask(seqlen_k, batch_size, device, mode="random")
    (
        q_unpad,
        k_unpad,
        v_unpad,
        cu_seqlens_q,
        cu_seqlens_k,
        max_seqlen_q,
        max_seqlen_k,
        q,
        k,
        v,
        output_pad_fn,
        dq_pad_fn,
        dk_pad_fn,
    ) = generate_qkv(q, k, v, query_padding_mask, key_padding_mask, kvpacked=False)
    out_unpad = flash_attn_varlen_func(
        q_unpad,
        k_unpad,
        v_unpad,
        cu_seqlens_q,
        cu_seqlens_k,
        max_seqlen_q,
        max_seqlen_k,
        0.0,
        causal=causal,
Tri Dao's avatar
Tri Dao committed
1597
        window_size=window_size,
1598
1599
1600
    )
    out = output_pad_fn(out_unpad)
    out_ref, attn_ref = attention_ref(
Tri Dao's avatar
Tri Dao committed
1601
1602
1603
1604
1605
        q,
        k,
        v,
        query_padding_mask,
        key_padding_mask,
1606
        None,
Tri Dao's avatar
Tri Dao committed
1607
1608
1609
1610
        0.0,
        None,
        causal=causal,
        window_size=window_size,
1611
1612
1613
1614
1615
1616
1617
    )
    out_pt, attn_pt = attention_ref(
        q,
        k,
        v,
        query_padding_mask,
        key_padding_mask,
1618
        None,
1619
1620
1621
        0.0,
        None,
        causal=causal,
Tri Dao's avatar
Tri Dao committed
1622
        window_size=window_size,
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
        upcast=False,
        reorder_ops=True,
    )

    print(f"Output max diff: {(out - out_ref).abs().max().item()}")
    print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
    print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
    print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")

    g = torch.randn_like(out)
    do_o = (g.float() * out.float()).sum(-1)
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
        (
            dq_unpad,
            dk_unpad,
            dv_unpad,
        ) = torch.autograd.grad(out, (q_unpad, k_unpad, v_unpad), g)
        dq = dq_pad_fn(dq_unpad)
        dk = dk_pad_fn(dk_unpad)
        dv = dk_pad_fn(dv_unpad)
        (
            dq_ref,
            dk_ref,
            dv_ref,
        ) = torch.autograd.grad(out_ref, (q, k, v), g)
        (
            dq_pt,
            dk_pt,
            dv_pt,
        ) = torch.autograd.grad(out_pt, (q, k, v), g)
        print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
        print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
        print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
        print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
        print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
        print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
        print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
        print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
        print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
        print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
        print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
        print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")

    # Check that FlashAttention's numerical error is at most twice the numerical error
    # of a Pytorch implementation.
    assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item() + 1e-5

    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
        assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item() + 1e-5
        assert (dk - dk_ref).abs().max().item() <= 2 * (dk_pt - dk_ref).abs().max().item() + 1e-5
        assert (dv - dv_ref).abs().max().item() <= 2 * (dv_pt - dv_ref).abs().max().item() + 1e-5


Tri Dao's avatar
Tri Dao committed
1676
1677
@pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
# @pytest.mark.parametrize("dtype", [torch.float16])
1678
1679
@pytest.mark.parametrize("alibi", [False, True])
# @pytest.mark.parametrize("alibi", [True])
Tri Dao's avatar
Tri Dao committed
1680
@pytest.mark.parametrize("local", [False, True])
1681
# @pytest.mark.parametrize("local", [False])
Tri Dao's avatar
Tri Dao committed
1682
1683
1684
1685
1686
1687
1688
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize("causal", [True])
@pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
1689
# @pytest.mark.parametrize("d", [64])
Tri Dao's avatar
Tri Dao committed
1690
1691
1692
1693
1694
1695
1696
@pytest.mark.parametrize("swap_sq_sk", [False, True])
# @pytest.mark.parametrize("swap_sq_sk", [False])
@pytest.mark.parametrize(
    "seqlen_q,seqlen_k",
    [
        (3, 1024),
        (1, 339),
1697
        (64, 800),
Tri Dao's avatar
Tri Dao committed
1698
1699
1700
1701
1702
1703
1704
1705
1706
        (3, 799),
        (64, 2048),
        (16, 20000),
        (16, 100000),
        (128, 128),
        (256, 256),
    ],
)
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
1707
def test_flash_attn_splitkv(seqlen_q, seqlen_k, swap_sq_sk, d, causal, local, alibi, dtype):
Tri Dao's avatar
Tri Dao committed
1708
1709
1710
1711
1712
1713
1714
    if swap_sq_sk:
        seqlen_q, seqlen_k = seqlen_k, seqlen_q
    device = "cuda"
    # set seed
    torch.random.manual_seed(0)
    batch_size = 1
    nheads = 12
Tri Dao's avatar
Tri Dao committed
1715
    window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
Tri Dao's avatar
Tri Dao committed
1716
1717
1718
    q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
    k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
    v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
1719
1720
1721
1722
1723
    if alibi:
        alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
        attn_bias = attn_bias_from_alibi_slopes(alibi_slopes, seqlen_q, seqlen_k, causal=causal)
    else:
        alibi_slopes, attn_bias = None, None
Tri Dao's avatar
Tri Dao committed
1724
    out, lse, _ = flash_attn_func(
1725
1726
1727
1728
1729
1730
1731
1732
        q,
        k,
        v,
        0.0,
        causal=causal,
        window_size=window_size,
        alibi_slopes=alibi_slopes,
        return_attn_probs=True,
Tri Dao's avatar
Tri Dao committed
1733
1734
    )
    out_ref, attn_ref = attention_ref(
1735
        q, k, v, None, None, attn_bias, 0.0, None, causal=causal, window_size=window_size
Tri Dao's avatar
Tri Dao committed
1736
    )
Tri Dao's avatar
Tri Dao committed
1737
1738
1739
1740
1741
1742
    out_pt, attn_pt = attention_ref(
        q,
        k,
        v,
        None,
        None,
1743
        attn_bias,
Tri Dao's avatar
Tri Dao committed
1744
1745
1746
        0.0,
        None,
        causal=causal,
Tri Dao's avatar
Tri Dao committed
1747
        window_size=window_size,
Tri Dao's avatar
Tri Dao committed
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
        upcast=False,
        reorder_ops=True,
    )

    print(f"Output max diff: {(out - out_ref).abs().max().item()}")
    print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
    print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
    print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")

    g = torch.randn_like(out)
    do_o = (g.float() * out.float()).sum(-1)
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
        (
            dq,
            dk,
            dv,
        ) = torch.autograd.grad(out, (q, k, v), g)
        (
            dq_ref,
            dk_ref,
            dv_ref,
        ) = torch.autograd.grad(out_ref, (q, k, v), g)
        (
            dq_pt,
            dk_pt,
            dv_pt,
        ) = torch.autograd.grad(out_pt, (q, k, v), g)
        print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
        print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
        print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
        print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
        print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
        print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
        print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
        print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
        print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
        print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
        print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
        print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")

    # Check that FlashAttention's numerical error is at most twice the numerical error
    # of a Pytorch implementation.
    assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item() + 1e-5

1792
    mult = 2 if not alibi else 8
Tri Dao's avatar
Tri Dao committed
1793
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
1794
1795
1796
        assert (dq - dq_ref).abs().max().item() <= mult * (dq_pt - dq_ref).abs().max().item() + 2e-4
        assert (dk - dk_ref).abs().max().item() <= mult * (dk_pt - dk_ref).abs().max().item() + 2e-4
        assert (dv - dv_ref).abs().max().item() <= mult * (dv_pt - dv_ref).abs().max().item() + 2e-4
Tri Dao's avatar
Tri Dao committed
1797

1798

1799
1800
# @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
@pytest.mark.parametrize("dtype", [torch.float16])
Tri Dao's avatar
Tri Dao committed
1801
@pytest.mark.parametrize("num_splits", [1, 0])
1802
# @pytest.mark.parametrize("num_splits", [1])
Tri Dao's avatar
Tri Dao committed
1803
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
1804
# @pytest.mark.parametrize("mha_type", ["mha"])
Tri Dao's avatar
Tri Dao committed
1805
@pytest.mark.parametrize("new_kv", [False, True])
1806
1807
1808
# @pytest.mark.parametrize("new_kv", [False])
@pytest.mark.parametrize("alibi", [False, True])
# @pytest.mark.parametrize("alibi", [True])
Tri Dao's avatar
Tri Dao committed
1809
@pytest.mark.parametrize("local", [False, True])
1810
# @pytest.mark.parametrize("local", [False])
Tri Dao's avatar
Tri Dao committed
1811
@pytest.mark.parametrize("causal", [False, True])
1812
# @pytest.mark.parametrize("causal", [False])
1813
@pytest.mark.parametrize("seqlen_new_eq_seqlen_q", [True, False])
1814
1815
1816
1817
# @pytest.mark.parametrize("seqlen_new_eq_seqlen_q", [True])
@pytest.mark.parametrize("rotary_interleaved", [False, True])
# @pytest.mark.parametrize("rotary_interleaved", [False])
@pytest.mark.parametrize("rotary_fraction", [0.0, 0.5, 1.0])
1818
# @pytest.mark.parametrize("rotary_fraction", [0.0])
1819
@pytest.mark.parametrize("has_batch_idx", [False, True])
1820
# @pytest.mark.parametrize("has_batch_idx", [False])
Tri Dao's avatar
Tri Dao committed
1821
@pytest.mark.parametrize("d", [32, 59, 64, 80, 96, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
1822
1823
1824
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
1825
# @pytest.mark.parametrize("d", [128])
Tri Dao's avatar
Tri Dao committed
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
@pytest.mark.parametrize(
    "seqlen_q,seqlen_k",
    [
        (1, 128),
        (1, 339),
        (3, 1024),
        (64, 800),
        (64, 256),
        (3, 799),
        (64, 2048),
        (16, 20000),
        (1, 128 * 1024),
        (16, 128 * 1024),
        (128, 128),
    ],
)
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
1843
def test_flash_attn_kvcache(
1844
1845
1846
    seqlen_q,
    seqlen_k,
    d,
1847
    has_batch_idx,
1848
1849
1850
1851
    rotary_fraction,
    rotary_interleaved,
    seqlen_new_eq_seqlen_q,
    causal,
Tri Dao's avatar
Tri Dao committed
1852
    local,
1853
    alibi,
1854
1855
1856
1857
    new_kv,
    mha_type,
    num_splits,
    dtype,
1858
):
Tri Dao's avatar
Tri Dao committed
1859
1860
    if seqlen_q > seqlen_k and new_kv:
        pytest.skip()
1861
1862
    if not new_kv and rotary_fraction > 0.0:
        pytest.skip()
Tri Dao's avatar
Tri Dao committed
1863
1864
1865
1866
    device = "cuda"
    # set seed
    torch.random.manual_seed(0)
    batch_size = 2
1867
    batch_size_cache = batch_size if not has_batch_idx else batch_size * 2
Tri Dao's avatar
Tri Dao committed
1868
    nheads = 6
1869
1870
    # rotary_dim must be a multiple of 16, and must be <= d
    rotary_dim = math.floor(int(rotary_fraction * d) / 16) * 16
Tri Dao's avatar
Tri Dao committed
1871
1872
    nheads_k = nheads if mha_type == "mha" else (1 if mha_type == "mqa" else 3)
    assert nheads % nheads_k == 0
Tri Dao's avatar
Tri Dao committed
1873
    window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
Tri Dao's avatar
Tri Dao committed
1874
    q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype)
1875
    seqlen_new = seqlen_q if seqlen_new_eq_seqlen_q else torch.randint(1, seqlen_q + 1, (1,)).item()
Tri Dao's avatar
Tri Dao committed
1876
    if new_kv:
1877
1878
        k = torch.randn(batch_size, seqlen_new, nheads_k, d, device=device, dtype=dtype)
        v = torch.randn(batch_size, seqlen_new, nheads_k, d, device=device, dtype=dtype)
Tri Dao's avatar
Tri Dao committed
1879
1880
    else:
        k, v = None, None
1881
1882
    k_cache = torch.randn(batch_size_cache, seqlen_k, nheads_k, d, device=device, dtype=dtype)
    v_cache = torch.randn(batch_size_cache, seqlen_k, nheads_k, d, device=device, dtype=dtype)
1883
1884
    cache_seqlens = torch.randint(
        0,
1885
        # If we don't use seqlen_q in the case of causal and rotary, cos/sin won't be long enough
Tri Dao's avatar
Tri Dao committed
1886
        (seqlen_k - (seqlen_q if (causal or local) and rotary_dim > 1 else seqlen_new) + 1)
1887
1888
        if new_kv
        else (seqlen_k + 1),
1889
1890
1891
1892
        (batch_size,),
        dtype=torch.int32,
        device=device,
    )
1893
1894
1895
    arange = rearrange(torch.arange(seqlen_k, device=device), "s -> 1 s")
    cache_seqlens_expanded = rearrange(cache_seqlens, "b -> b 1")
    key_padding_mask = arange < cache_seqlens_expanded + (seqlen_new if new_kv else 0)
1896
    if has_batch_idx:
1897
1898
1899
        cache_batch_idx = torch.randperm(batch_size_cache, dtype=torch.int32, device=device)[
            :batch_size
        ]
1900
1901
    else:
        cache_batch_idx = None
1902
1903
1904
1905
1906
1907
1908
    if alibi:
        alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
        attn_bias = attn_bias_from_alibi_slopes(
            alibi_slopes, seqlen_q, seqlen_k, None, key_padding_mask, causal=causal
        )
    else:
        alibi_slopes, attn_bias = None, None
Tri Dao's avatar
Tri Dao committed
1909
    # cache_seqlens = torch.tensor([64], dtype=torch.int32, device=device)
1910
1911
1912
1913
    if rotary_dim > 0:
        angle = torch.rand(seqlen_k, rotary_dim // 2, device=device) * 2 * math.pi
        cos = torch.cos(angle).to(dtype=dtype)
        sin = torch.sin(angle).to(dtype=dtype)
Tri Dao's avatar
Tri Dao committed
1914
        if causal or local:
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
            q_ro = apply_rotary_emb(
                q, cos, sin, seqlen_offsets=cache_seqlens, interleaved=rotary_interleaved
            )
        else:
            q_ro = rearrange(
                apply_rotary_emb(
                    rearrange(q, "b s h d -> b 1 (s h) d"),
                    cos,
                    sin,
                    seqlen_offsets=cache_seqlens,
                    interleaved=rotary_interleaved,
                ),
                "b 1 (s h) d -> b s h d",
                s=seqlen_q,
            )
        # q_ro = q
        k_ro = apply_rotary_emb(
            k, cos, sin, seqlen_offsets=cache_seqlens, interleaved=rotary_interleaved
        )
    else:
        cos, sin = None, None
        q_ro, k_ro = q, k
Tri Dao's avatar
Tri Dao committed
1937
    # k_cache[:, 64:] = -1
1938
1939
    k_cache_ref = (k_cache if not has_batch_idx else k_cache[cache_batch_idx]).clone()
    v_cache_ref = (v_cache if not has_batch_idx else v_cache[cache_batch_idx]).clone()
Tri Dao's avatar
Tri Dao committed
1940
    if new_kv:
1941
1942
1943
        update_mask = torch.logical_and(
            cache_seqlens_expanded <= arange, arange < cache_seqlens_expanded + seqlen_new
        )
1944
        k_cache_ref[update_mask] = rearrange(k_ro, "b s ... -> (b s) ...")
Tri Dao's avatar
Tri Dao committed
1945
1946
1947
        v_cache_ref[update_mask] = rearrange(v, "b s ... -> (b s) ...")
    k_cache_rep = repeat(k_cache_ref, "b s h d -> b s (h g) d", g=nheads // nheads_k)
    v_cache_rep = repeat(v_cache_ref, "b s h d -> b s (h g) d", g=nheads // nheads_k)
1948
    out = flash_attn_with_kvcache(
1949
1950
1951
1952
1953
1954
1955
1956
        q,
        k_cache,
        v_cache,
        k,
        v,
        cos,
        sin,
        cache_seqlens,
1957
        cache_batch_idx,
1958
        causal=causal,
Tri Dao's avatar
Tri Dao committed
1959
        window_size=window_size,
1960
        rotary_interleaved=rotary_interleaved,
1961
        alibi_slopes=alibi_slopes,
1962
        num_splits=num_splits,
1963
    )
Tri Dao's avatar
Tri Dao committed
1964
1965
1966
1967
    # out = flash_attn_with_kvcache(
    #     q, k_cache, v_cache, cache_seqlens=cache_seqlens, causal=causal, window_size=window_size
    # )
    # out = flash_attn_with_kvcache(q, k_cache, v_cache, causal=causal, window_size=window_size)
Tri Dao's avatar
Tri Dao committed
1968
1969
1970
1971
1972
1973
    # qk = torch.einsum("bqhd,bkhd->bhqk", q, k_cache_ref)
    # m = qk.amax(-1, keepdim=True)
    # s_tmp = torch.exp((qk - m) / math.sqrt(d))
    # o1 = torch.einsum('bhst,bthd->bshd', s_tmp, v_cache_ref)
    # lse_ref = torch.logsumexp(qk / math.sqrt(d), -1)
    # probs = torch.softmax(qk, dim=-1)
1974
    out_ref, _ = attention_ref(
Tri Dao's avatar
Tri Dao committed
1975
1976
1977
1978
1979
        q_ro,
        k_cache_rep,
        v_cache_rep,
        None,
        key_padding_mask,
1980
        attn_bias,
Tri Dao's avatar
Tri Dao committed
1981
1982
1983
1984
        0.0,
        None,
        causal=causal,
        window_size=window_size,
1985
1986
    )
    out_pt, _ = attention_ref(
1987
        q_ro,
1988
1989
1990
1991
        k_cache_rep,
        v_cache_rep,
        None,
        key_padding_mask,
1992
        attn_bias,
1993
1994
1995
        0.0,
        None,
        causal=causal,
Tri Dao's avatar
Tri Dao committed
1996
        window_size=window_size,
1997
1998
1999
        upcast=False,
        reorder_ops=True,
    )
Tri Dao's avatar
Tri Dao committed
2000
2001
2002
2003
2004
2005
2006
2007
    print(f"Output max diff: {(out - out_ref).abs().max().item()}")
    print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
    print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
    print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")

    # Check that FlashAttention's numerical error is at most twice the numerical error
    # of a Pytorch implementation.
    if new_kv:
2008
2009
2010
2011
        k_cache_select = k_cache if not has_batch_idx else k_cache[cache_batch_idx]
        v_cache_select = v_cache if not has_batch_idx else v_cache[cache_batch_idx]
        assert torch.allclose(k_cache_select, k_cache_ref, rtol=1e-3, atol=1e-3)
        assert torch.equal(v_cache_select, v_cache_ref)
2012
2013
    mult = 3 if not alibi else 5
    assert (out - out_ref).abs().max().item() <= mult * (out_pt - out_ref).abs().max().item() + 1e-5
Tri Dao's avatar
Tri Dao committed
2014

Tri Dao's avatar
Tri Dao committed
2015

2016
2017
# @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
@pytest.mark.parametrize("dtype", [torch.float16])
Tri Dao's avatar
Tri Dao committed
2018
@pytest.mark.parametrize("causal", [False, True])
2019
2020
# @pytest.mark.parametrize('causal', [True])
@pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
Tri Dao's avatar
Tri Dao committed
2021
# @pytest.mark.parametrize('d', [32, 56, 64, 80, 96, 128])
2022
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192])
Tri Dao's avatar
Tri Dao committed
2023
# @pytest.mark.parametrize('d', [128])
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
@pytest.mark.parametrize(
    "seqlen_q,seqlen_k",
    [
        (1, 239),
        (239, 1),
        (3, 799),
        (799, 3),
        (1024, 128),
        (97, 97),
        (128, 128),
        (200, 200),
        (256, 256),
        (257, 257),
        (384, 384),
        (512, 512),
        (768, 768),
        (1024, 1024),
    ],
)
2043
@pytest.mark.parametrize("dropout_p", [0.0, 0.17])
2044
2045
# @pytest.mark.parametrize("dropout_p", [0.0])
def test_flash_attn_race_condition(seqlen_q, seqlen_k, d, dropout_p, causal, dtype):
Tri Dao's avatar
Tri Dao committed
2046
    device = "cuda"
Tri Dao's avatar
Tri Dao committed
2047
2048
    # set seed
    torch.random.manual_seed(0)
2049
    batch_size = 60  # Sometimes we need large batch size for the race conditions to trigger
Tri Dao's avatar
Tri Dao committed
2050
    nheads = 4
2051
2052
2053
2054
2055
    q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
    k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
    v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
    torch.random.manual_seed(42)
    out0, lse0, _ = flash_attn_func(q, k, v, dropout_p, causal=causal, return_attn_probs=True)
Tri Dao's avatar
Tri Dao committed
2056
    g = torch.randn_like(out0)
2057
    if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
2058
2059
2060
2061
2062
        (
            dq0,
            dk0,
            dv0,
        ) = torch.autograd.grad(out0, (q, k, v), g)
2063
        # Numerical error if we just do any arithmetic on dq
2064
        dq_atol = 2 * ((dq0 + 0.3 - 0.3) - dq0).abs().max().item()
Tri Dao's avatar
Tri Dao committed
2065

2066
2067
2068
    for i in range(250):
        torch.random.manual_seed(42)
        out, lse, _ = flash_attn_func(q, k, v, dropout_p, causal=causal, return_attn_probs=True)
Tri Dao's avatar
Tri Dao committed
2069
2070
        assert torch.equal(out, out0)
        assert torch.equal(lse, lse0)
Tri Dao's avatar
Tri Dao committed
2071

2072
        if d <= MAX_HEADDIM_SM8x or (is_sm80 or is_sm90):
2073
2074
2075
2076
2077
2078
            (
                dq,
                dk,
                dv,
            ) = torch.autograd.grad(out, (q, k, v), g)
            dq_equal = torch.allclose(dq, dq0, atol=dq_atol)
2079
            if not dq_equal:
2080
2081
2082
                print(f"Iter {i}, {dq_atol = }, dQ max diff: {(dq - dq0).abs().max().item()}")
            assert torch.equal(dv, dv0)
            assert torch.equal(dk, dk0)
2083
            assert dq_equal
2084
2085


Tri Dao's avatar
Tri Dao committed
2086
2087
@pytest.mark.parametrize("dtype", [torch.float16])
@pytest.mark.parametrize("causal", [False, True])
2088
# @pytest.mark.parametrize('causal', [False])
Tri Dao's avatar
Tri Dao committed
2089
@pytest.mark.parametrize("d", [16, 32, 64])
2090
# @pytest.mark.parametrize('d', [16])
Tri Dao's avatar
Tri Dao committed
2091
@pytest.mark.parametrize("seqlen", [1, 2, 5, 17, 128])
2092
2093
# @pytest.mark.parametrize('seqlen', [2])
def test_flash_attn_bwd_overflow(seqlen, d, causal, dtype):
Tri Dao's avatar
Tri Dao committed
2094
    """We previously had a bug where not masking elements beyond seqlen_k caused NaN in dQ,
2095
2096
    in the case where seqlen % 128 != 0.
    """
Tri Dao's avatar
Tri Dao committed
2097
    device = "cuda"
2098
2099
2100
2101
2102
    # set seed
    torch.random.manual_seed(0)
    batch_size = 2
    nheads = 5
    q = torch.randn([batch_size, seqlen, nheads, d], dtype=dtype, device="cuda") * 5
Tri Dao's avatar
Tri Dao committed
2103
2104
2105
2106
    k, v = [
        torch.randn([batch_size, seqlen, nheads, d], dtype=dtype, device="cuda") * 3
        for _ in range(2)
    ]
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
    q.requires_grad_(True)
    k.requires_grad_(True)
    v.requires_grad_(True)
    out = flash_attn_func(q, k, v, causal=causal)
    g = torch.randn_like(out)
    out.backward(g)
    q_pt = q.detach().clone().requires_grad_(True)
    k_pt = k.detach().clone().requires_grad_(True)
    v_pt = v.detach().clone().requires_grad_(True)
    out_pt, _ = attention_ref(q_pt, k_pt, v_pt, causal=causal, upcast=False, reorder_ops=True)
    out_pt.backward(g)
    q_ref = q.detach().clone().requires_grad_(True)
    k_ref = k.detach().clone().requires_grad_(True)
    v_ref = v.detach().clone().requires_grad_(True)
    out_ref, attn_ref = attention_ref(q_ref, k_ref, v_ref, causal=causal)
    out_ref.backward(g)
Tri Dao's avatar
Tri Dao committed
2123
2124
2125
2126
2127
2128
    print(f"dQ max diff: {(q.grad - q_ref.grad).abs().max().item()}")
    print(f"dK max diff: {(k.grad - k_ref.grad).abs().max().item()}")
    print(f"dV max diff: {(v.grad - v_ref.grad).abs().max().item()}")
    print(f"dQ Pytorch max diff: {(q_pt.grad - q_ref.grad).abs().max().item()}")
    print(f"dK Pytorch max diff: {(k_pt.grad - k_ref.grad).abs().max().item()}")
    print(f"dV Pytorch max diff: {(v_pt.grad - v_ref.grad).abs().max().item()}")
2129
    assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()
Tri Dao's avatar
Tri Dao committed
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
    assert (q.grad - q_ref.grad).abs().max().item() <= 5 * (
        q_pt.grad - q_ref.grad
    ).abs().max().item() + 1e-3
    assert (k.grad - k_ref.grad).abs().max().item() <= 5 * (
        k_pt.grad - k_ref.grad
    ).abs().max().item() + 1e-3
    assert (v.grad - v_ref.grad).abs().max().item() <= 5 * (
        v_pt.grad - v_ref.grad
    ).abs().max().item() + 1e-3


@pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
2142
# @pytest.mark.parametrize('dtype', [torch.bfloat16])
Tri Dao's avatar
Tri Dao committed
2143
@pytest.mark.parametrize("causal", [False, True])
2144
# @pytest.mark.parametrize('causal', [False])
Tri Dao's avatar
Tri Dao committed
2145
@pytest.mark.parametrize("d", [64, 128])
2146
# @pytest.mark.parametrize('d', [64])
Tri Dao's avatar
Tri Dao committed
2147
@pytest.mark.parametrize("seqlen", [97, 128, 200, 256])
2148
2149
# @pytest.mark.parametrize('seqlen', [128])
def test_flash_attn_bwd_transpose(seqlen, d, causal, dtype):
Tri Dao's avatar
Tri Dao committed
2150
    """We previously had a bug where we were using the wrong strides of dout, which shows up
2151
2152
    when dout is not contiguous.
    """
Tri Dao's avatar
Tri Dao committed
2153
    device = "cuda"
2154
2155
2156
2157
    # set seed
    torch.random.manual_seed(0)
    batch_size = 5
    nheads = 2
Tri Dao's avatar
Tri Dao committed
2158
2159
2160
2161
    q, k, v = [
        torch.randn([batch_size, seqlen, nheads, d], dtype=dtype, device="cuda", requires_grad=True)
        for _ in range(3)
    ]
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
    out = rearrange(flash_attn_func(q, k, v, causal=causal), "b s ... -> s b ...")
    # So g is not contiguous
    g = torch.randn(seqlen, 2 * batch_size, nheads, d, dtype=dtype, device="cuda")[:, ::2]
    out.backward(g)
    q_pt = q.detach().clone().requires_grad_(True)
    k_pt = k.detach().clone().requires_grad_(True)
    v_pt = v.detach().clone().requires_grad_(True)
    out_pt, attn_pt = attention_ref(q_pt, k_pt, v_pt, causal=causal, upcast=False, reorder_ops=True)
    out_pt = rearrange(out_pt, "b s ... -> s b ...")
    out_pt.backward(g)
    q_ref = q.detach().clone().requires_grad_(True)
    k_ref = k.detach().clone().requires_grad_(True)
    v_ref = v.detach().clone().requires_grad_(True)
    out_ref, attn_ref = attention_ref(q_ref, k_ref, v_ref, causal=causal)
    out_ref = rearrange(out_ref, "b s ... -> s b ...")
    out_ref.backward(g)
Tri Dao's avatar
Tri Dao committed
2178
2179
2180
2181
2182
2183
    print(f"dQ max diff: {(q.grad - q_ref.grad).abs().max().item()}")
    print(f"dK max diff: {(k.grad - k_ref.grad).abs().max().item()}")
    print(f"dV max diff: {(v.grad - v_ref.grad).abs().max().item()}")
    print(f"dQ Pytorch max diff: {(q_pt.grad - q_ref.grad).abs().max().item()}")
    print(f"dK Pytorch max diff: {(k_pt.grad - k_ref.grad).abs().max().item()}")
    print(f"dV Pytorch max diff: {(v_pt.grad - v_ref.grad).abs().max().item()}")
2184
    assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()
Tri Dao's avatar
Tri Dao committed
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
    assert (q.grad - q_ref.grad).abs().max().item() <= 2 * (
        q_pt.grad - q_ref.grad
    ).abs().max().item()
    assert (k.grad - k_ref.grad).abs().max().item() <= 2 * (
        k_pt.grad - k_ref.grad
    ).abs().max().item()
    assert (v.grad - v_ref.grad).abs().max().item() <= 2 * (
        v_pt.grad - v_ref.grad
    ).abs().max().item()


@pytest.mark.parametrize("dtype", [torch.float16])
@pytest.mark.parametrize("causal", [False, True])
2198
# @pytest.mark.parametrize('causal', [False])
Tri Dao's avatar
Tri Dao committed
2199
@pytest.mark.parametrize("d", [16, 32, 64])
2200
2201
# @pytest.mark.parametrize('d', [16])
def test_flash_attn_bwd_varlen_overflow(d, causal, dtype):
Tri Dao's avatar
Tri Dao committed
2202
    """We previously had a bug where not masking elements beyond seqlen_k caused NaN in dQ,
2203
2204
    in the case where seqlen % 128 != 0 or varlen.
    """
Tri Dao's avatar
Tri Dao committed
2205
    device = "cuda"
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
    # set seed
    torch.random.manual_seed(0)
    nheads = 5
    q_cuseqlen = torch.tensor([0, 76, 110, 256], device=device, dtype=torch.int32)
    k_cuseqlen = torch.tensor([0, 1, 2, 3], device=device, dtype=torch.int32)
    Mq = 256
    Mk = 3

    q = torch.randn([Mq, nheads, d], dtype=dtype, device=device) * 3
    k, v = [torch.randn([Mk, nheads, d], dtype=dtype, device=device) * 3 for _ in range(2)]
    q.requires_grad_(True)
    k.requires_grad_(True)
    v.requires_grad_(True)

    out = flash_attn_varlen_func(q, k, v, q_cuseqlen, k_cuseqlen, Mq, Mk, causal=causal)
    g = torch.randn_like(out)
    out.backward(g)

    assert not q.grad.isnan().any()
    assert not k.grad.isnan().any()
    assert not v.grad.isnan().any()