example_tilelang_block_sparse_attn.py 9.11 KB
Newer Older
1
import math
2
import torch
3

4
5
import tilelang
import tilelang.language as T
6
import torch.nn.functional as F
7

8

9
10
11
12
def get_sparse_attn_mask_from_topk(x, topk, use_dense_for_last_block=False):
    bsz, num_head, downsample_len, _ = x.shape
    # N_CTX = downsample_len * BLOCK
    sparse_index = torch.topk(x, topk, dim=-1).indices
13
    dense_mask = torch.full([bsz, num_head, downsample_len, downsample_len], False, dtype=torch.bool, device=x.device)
14
15
    dense_mask.scatter_(-1, sparse_index, True)
    if use_dense_for_last_block:
16
        dense_mask[:, :, -2:, :] = True
17
    dense_mask.tril_()
18
    return dense_mask
19
20
21


def get_sparse_attn_mask_from_threshold(x, threshold, use_dense_for_last_block=False):
22
    dense_mask = x > threshold
23
    if use_dense_for_last_block:
24
        dense_mask[:, :, -2:, :] = True
25
    dense_mask.tril_()
26
    return dense_mask
27
28


29
@tilelang.jit(
30
31
    out_idx=[4],
    pass_configs={
32
        tilelang.PassConfigKey.TL_ENABLE_FAST_MATH: True,
33
34
    },
)
35
36
37
def blocksparse_flashattn(batch, heads, seq_len, dim, downsample_len, is_causal):
    block_M = 64
    block_N = 64
38
    num_stages = 1
39
    threads = 128
40
    scale = (1.0 / dim) ** 0.5 * 1.44269504  # log2(e)
41
42
43
    shape = [batch, heads, seq_len, dim]
    block_mask_shape = [batch, heads, downsample_len, downsample_len]

44
45
46
    dtype = T.float16
    accum_dtype = T.float32
    block_mask_dtype = T.bool
47
48
49
50

    def kernel_func(block_M, block_N, num_stages, threads):
        @T.macro
        def MMA0(
51
52
53
54
            K: T.Tensor(shape, dtype),
            Q_shared: T.SharedBuffer([block_M, dim], dtype),
            K_shared: T.SharedBuffer([block_N, dim], dtype),
            acc_s: T.FragmentBuffer([block_M, block_N], accum_dtype),
55
56
57
58
59
            k: T.int32,
            bx: T.int32,
            by: T.int32,
            bz: T.int32,
        ):
60
            T.copy(K[bz, by, k * block_N : (k + 1) * block_N, :], K_shared)
61
            if is_causal:
62
                for i, j in T.Parallel(block_M, block_N):
63
                    acc_s[i, j] = T.if_then_else(bx * block_M + i >= k * block_N + j, 0, -T.infinity(acc_s.dtype))
64
65
66
67
68
69
            else:
                T.clear(acc_s)
            T.gemm(Q_shared, K_shared, acc_s, transpose_B=True, policy=T.GemmWarpPolicy.FullRow)

        @T.macro
        def MMA1(
70
71
72
73
74
75
76
            V: T.Tensor(shape, dtype),
            V_shared: T.SharedBuffer([block_M, dim], dtype),
            acc_s_cast: T.FragmentBuffer([block_M, block_N], dtype),
            acc_o: T.FragmentBuffer([block_M, dim], accum_dtype),
            k: T.int32,
            by: T.int32,
            bz: T.int32,
77
        ):
78
            T.copy(V[bz, by, k * block_N : (k + 1) * block_N, :], V_shared)
79
80
81
82
            T.gemm(acc_s_cast, V_shared, acc_o, policy=T.GemmWarpPolicy.FullRow)

        @T.macro
        def Softmax(
83
84
85
86
87
88
89
            acc_s: T.FragmentBuffer([block_M, block_N], accum_dtype),
            acc_s_cast: T.FragmentBuffer([block_M, block_N], dtype),
            scores_max: T.FragmentBuffer([block_M], accum_dtype),
            scores_max_prev: T.FragmentBuffer([block_M], accum_dtype),
            scores_scale: T.FragmentBuffer([block_M], accum_dtype),
            scores_sum: T.FragmentBuffer([block_M], accum_dtype),
            logsum: T.FragmentBuffer([block_M], accum_dtype),
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
        ):
            T.copy(scores_max, scores_max_prev)
            T.fill(scores_max, -T.infinity(accum_dtype))
            T.reduce_max(acc_s, scores_max, dim=1, clear=False)
            # To do causal softmax, we need to set the scores_max to 0 if it is -inf
            # This process is called Check_inf in FlashAttention3 code, and it only need to be done
            # in the first ceil_div(kBlockM, kBlockN) steps.
            # for i in T.Parallel(block_M):
            #     scores_max[i] = T.if_then_else(scores_max[i] == -T.infinity(accum_dtype), 0, scores_max[i])
            for i in T.Parallel(block_M):
                scores_scale[i] = T.exp2(scores_max_prev[i] * scale - scores_max[i] * scale)
            for i, j in T.Parallel(block_M, block_N):
                # Instead of computing exp(x - max), we compute exp2(x * log_2(e) -
                # max * log_2(e)) This allows the compiler to use the ffma
                # instruction instead of fadd and fmul separately.
                acc_s[i, j] = T.exp2(acc_s[i, j] * scale - scores_max[i] * scale)
            T.reduce_sum(acc_s, scores_sum, dim=1)
            for i in T.Parallel(block_M):
                logsum[i] = logsum[i] * scores_scale[i] + scores_sum[i]
            T.copy(acc_s, acc_s_cast)

        @T.macro
        def Rescale(
113
114
            acc_o: T.FragmentBuffer([block_M, dim], accum_dtype),
            scores_scale: T.FragmentBuffer([block_M], accum_dtype),
115
116
117
118
119
        ):
            for i, j in T.Parallel(block_M, dim):
                acc_o[i, j] *= scores_scale[i]

        @T.prim_func
120
        def blocksparse_flashattn(
121
122
123
124
125
            Q: T.Tensor(shape, dtype),
            K: T.Tensor(shape, dtype),
            V: T.Tensor(shape, dtype),
            BlockSparseMask: T.Tensor(block_mask_shape, block_mask_dtype),
            Output: T.Tensor(shape, dtype),
126
        ):
127
            with T.Kernel(T.ceildiv(seq_len, block_M), heads, batch, threads=threads) as (bx, by, bz):
128
129
130
131
132
133
134
135
136
137
138
139
                Q_shared = T.alloc_shared([block_M, dim], dtype)
                K_shared = T.alloc_shared([block_N, dim], dtype)
                V_shared = T.alloc_shared([block_N, dim], dtype)
                O_shared = T.alloc_shared([block_M, dim], dtype)
                acc_s = T.alloc_fragment([block_M, block_N], accum_dtype)
                acc_s_cast = T.alloc_fragment([block_M, block_N], dtype)
                acc_o = T.alloc_fragment([block_M, dim], accum_dtype)
                scores_max = T.alloc_fragment([block_M], accum_dtype)
                scores_max_prev = T.alloc_fragment([block_M], accum_dtype)
                scores_scale = T.alloc_fragment([block_M], accum_dtype)
                scores_sum = T.alloc_fragment([block_M], accum_dtype)
                logsum = T.alloc_fragment([block_M], accum_dtype)
140
                block_mask = T.alloc_local([downsample_len], block_mask_dtype)
141

142
                T.copy(Q[bz, by, bx * block_M : (bx + 1) * block_M, :], Q_shared)
143
144
145
146
                T.fill(acc_o, 0)
                T.fill(logsum, 0)
                T.fill(scores_max, -T.infinity(accum_dtype))

147
148
149
                for vj in T.serial(downsample_len):
                    block_mask[vj] = BlockSparseMask[bz, by, bx, vj]

150
                loop_range = (
151
152
                    T.min(T.ceildiv(seq_len, block_N), T.ceildiv((bx + 1) * block_M, block_N)) if is_causal else T.ceildiv(seq_len, block_N)
                )
153
154

                for k in T.Pipelined(loop_range, num_stages=num_stages):
155
156
                    if block_mask[k] != 0:
                        MMA0(K, Q_shared, K_shared, acc_s, k, bx, by, bz)
157
                        Softmax(acc_s, acc_s_cast, scores_max, scores_max_prev, scores_scale, scores_sum, logsum)
158
159
                        Rescale(acc_o, scores_scale)
                        MMA1(V, V_shared, acc_s_cast, acc_o, k, by, bz)
160
161
162
                for i, j in T.Parallel(block_M, dim):
                    acc_o[i, j] /= logsum[i]
                T.copy(acc_o, O_shared)
163
                T.copy(O_shared, Output[bz, by, bx * block_M : (bx + 1) * block_M, :])
164

165
        return blocksparse_flashattn
166

167
168
    return kernel_func(block_M, block_N, num_stages, threads)

169

170
171
172
173
174
175
176
177
def test_topk_sparse_attention():
    # Config
    BATCH, N_HEADS, SEQ_LEN, D_HEAD = 1, 1, 256, 64
    TOPK = 2  # Keep top 8 elements per row
    BLOCK = 64
    torch.manual_seed(0)

    # Create inputs
178
179
180
    q = torch.randn(BATCH, N_HEADS, SEQ_LEN, D_HEAD, device="cuda", dtype=torch.float16)
    k = torch.randn(BATCH, N_HEADS, SEQ_LEN, D_HEAD, device="cuda", dtype=torch.float16)
    v = torch.randn(BATCH, N_HEADS, SEQ_LEN, D_HEAD, device="cuda", dtype=torch.float16)
181

182
    sm_scale = 1.0 / (D_HEAD**0.5)
183
184
185
186

    # Create sparse mask (downsampled to block level)
    downsample_factor = BLOCK
    downsample_len = math.ceil(SEQ_LEN / downsample_factor)
187
    x_ds = torch.randn([BATCH, N_HEADS, downsample_len, downsample_len], device="cuda", dtype=torch.bfloat16)
188
    x_ds[:, :, :, 0] = 100
189
190
    block_mask = get_sparse_attn_mask_from_topk(x_ds, topk=TOPK)

191
192
    # Run tilelang kernel
    kernel = blocksparse_flashattn(BATCH, N_HEADS, SEQ_LEN, D_HEAD, downsample_len, is_causal=True)
193

194
195
196
197
    tilelang_output = kernel(q, k, v, block_mask)

    # Compute reference
    # Expand block mask to full attention matrix
198
    full_mask = torch.kron(block_mask.float(), torch.ones(BLOCK, BLOCK, device="cuda"))
199
200
    full_mask = full_mask[..., :SEQ_LEN, :SEQ_LEN].bool()
    full_mask = full_mask & torch.tril(torch.ones_like(full_mask))  # Apply causal
201

202
    # PyTorch reference implementation
203
204
    attn = torch.einsum("bhsd,bhtd->bhst", q, k) * sm_scale
    attn = attn.masked_fill(~full_mask, float("-inf"))
205
    attn = F.softmax(attn, dim=-1)
206
    ref_output = torch.einsum("bhst,bhtd->bhsd", attn, v)
207

208
209
210
211
    print("ref_output", ref_output)
    print("tilelang_output", tilelang_output)

    # Verify accuracy
212
    torch.testing.assert_close(tilelang_output, ref_output, atol=1e-2, rtol=1e-2)
213
    print("Pass topk sparse attention test with qlen == klen")
214

215

216
def main():
217
    test_topk_sparse_attention()
218
219
220
221


if __name__ == "__main__":
    main()