test_attention.py 12.7 KB
Newer Older
1
import random
2
from typing import List, Optional, Tuple
3

4
import pytest
5
import torch
6
7
from xformers import ops as xops
from xformers.ops.fmha.attn_bias import BlockDiagonalCausalMask
8

9
from vllm._C import ops, cache_ops
10
from vllm.utils import get_max_shared_memory_bytes
11
12
from vllm.utils import is_hip
from allclose_default import get_default_atol, get_default_rtol
13

14
15
16
17
FLOAT32_BYTES = torch.finfo(torch.float).bits // 8
# This will change depending on the compute capability.
# - 512 as a buffer
MAX_SEQ_LEN = get_max_shared_memory_bytes() // FLOAT32_BYTES - 512
18
19
20
# There may not be enough gpu memory due to large NUM_BLOCKS.
# Reduce NUM_BLOCKS when it happens.
NUM_BLOCKS = 4321  # Arbitrary values for testing
21
PARTITION_SIZE = 512
22
23
24
# flshattF and tritonflashattF supported: {torch.float16, torch.bfloat16}
DTYPES = [torch.half, torch.bfloat16, torch.float
          ] if not is_hip() else [torch.half, torch.bfloat16]
25
NUM_GEN_SEQS = [7]  # Arbitrary values for testing
26
NUM_PREFILL_SEQS = [3]  # Arbitrary values for testing
27
NUM_HEADS = [(40, 40), (64, 8)]  # Arbitrary values for testing
28
29
30
31
32
33

# FlashAttention forward only supports head dimension at most 128
# https://github.com/ROCmSoftwarePlatform/flash-attention/blob/3d2b6f5d037782cc2c906909a46fb7e2e1b48b25/csrc/flash_attn_rocm/flash_api.cpp#L62
HEAD_SIZES = [64, 80, 96, 112, 128, 256
              ] if not is_hip() else [64, 80, 96, 112, 128]

34
BLOCK_SIZES = [16, 32]
35
USE_ALIBI = [False, True]
36
KV_CACHE_DTYPE = ["auto", "fp8_e5m2"]
37
SEEDS = [0]
38
39
40
CUDA_DEVICES = [
    f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)
]
41

42
43
44
45
46
47
48
49

def ref_masked_attention(
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    scale: float,
    attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
50
    attn_weights = scale * torch.einsum("qhd,khd->hqk", query, key).float()
51
    if attn_mask is not None:
52
53
54
        attn_weights = attn_weights + attn_mask.float()
    attn_weights = torch.softmax(attn_weights, dim=-1).to(value.dtype)
    out = torch.einsum("hqk,khd->qhd", attn_weights, value)
55
56
57
58
59
60
    return out


def ref_single_query_cached_kv_attention(
    output: torch.Tensor,
    query: torch.Tensor,
61
    num_queries_per_kv: int,
62
63
64
65
    key_cache: torch.Tensor,
    value_cache: torch.Tensor,
    block_tables: torch.Tensor,
    context_lens: torch.Tensor,
66
67
    scale: float,
    alibi_slopes: Optional[torch.Tensor],
68
) -> None:
69
70
    num_query_heads = query.shape[1]
    num_kv_heads = value_cache.shape[1]
71
72
    head_size = value_cache.shape[2]
    block_size = value_cache.shape[3]
73
    num_seqs = query.shape[0]
74

75
76
77
    block_tables = block_tables.cpu().tolist()
    context_lens = context_lens.cpu().tolist()
    for i in range(num_seqs):
78
79
80
81
82
83
84
85
86
87
88
        q = query[i].unsqueeze(0)
        block_table = block_tables[i]
        context_len = int(context_lens[i])

        keys = []
        values = []
        for j in range(context_len):
            block_number = int(block_table[j // block_size])
            block_offset = j % block_size

            k = key_cache[block_number, :, :, block_offset, :]
89
            k = k.reshape(num_kv_heads, head_size)
90
91
92
93
94
95
            keys.append(k)

            v = value_cache[block_number, :, :, block_offset]
            values.append(v)
        keys = torch.stack(keys, dim=0)
        values = torch.stack(values, dim=0)
96
97
98
99
100
101
102
103
        if num_queries_per_kv > 1:
            # Handle MQA and GQA
            keys = torch.repeat_interleave(keys, num_queries_per_kv, dim=1)
            values = torch.repeat_interleave(values, num_queries_per_kv, dim=1)

        alibi_bias = None
        if alibi_slopes is not None:
            # Create the ALiBi bias used in the paged attention kernel.
104
            position_ids = torch.arange(context_len).int()
105
            alibi_bias = (position_ids - context_len + 1).float()
106
107
108
109
110
            alibi_bias = alibi_slopes.view(-1, 1, 1) * alibi_bias.view(
                1, 1, -1)

        out = ref_masked_attention(q, keys, values, scale, alibi_bias)
        out = out.view(num_query_heads, head_size)
111
112
113
        output[i].copy_(out, non_blocking=True)


114
@pytest.mark.parametrize("version", ["v1", "v2"])
115
116
117
118
119
120
@pytest.mark.parametrize("num_seqs", NUM_GEN_SEQS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("use_alibi", USE_ALIBI)
@pytest.mark.parametrize("block_size", BLOCK_SIZES)
@pytest.mark.parametrize("dtype", DTYPES)
121
@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE)
122
@pytest.mark.parametrize("seed", SEEDS)
123
@pytest.mark.parametrize("device", CUDA_DEVICES)
124
def test_paged_attention(
125
    kv_cache_factory,
126
    version: str,
127
128
    num_seqs: int,
    num_heads: Tuple[int, int],
129
    head_size: int,
130
    use_alibi: bool,
131
132
    block_size: int,
    dtype: torch.dtype,
133
    kv_cache_dtype: str,
134
    seed: int,
135
    device: str,
136
) -> None:
137
138
    random.seed(seed)
    torch.random.manual_seed(seed)
139
140
141
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
    torch.set_default_device(device)
142
143
    scale = float(1.0 / (head_size**0.5))
    num_query_heads, num_kv_heads = num_heads
144
    query = torch.empty(num_seqs, num_query_heads, head_size, dtype=dtype)
145
146
147
148
149
150
    query.uniform_(-scale, scale)

    assert num_query_heads % num_kv_heads == 0
    num_queries_per_kv = num_query_heads // num_kv_heads
    alibi_slopes = None
    if use_alibi:
151
        alibi_slopes = torch.randn(num_query_heads, dtype=torch.float)
152
153

    context_lens = [random.randint(1, MAX_SEQ_LEN) for _ in range(num_seqs)]
154
    context_lens[-1] = MAX_SEQ_LEN
155
    max_context_len = max(context_lens)
156
    context_lens = torch.tensor(context_lens, dtype=torch.int)
157

158
    # Create the block tables.
159
160
    max_num_blocks_per_seq = (max_context_len + block_size - 1) // block_size
    block_tables = []
161
    for _ in range(num_seqs):
162
        block_table = [
163
            random.randint(0, NUM_BLOCKS - 1)
164
165
166
            for _ in range(max_num_blocks_per_seq)
        ]
        block_tables.append(block_table)
167
    block_tables = torch.tensor(block_tables, dtype=torch.int)
168

169
170
    # Create the KV caches.
    key_caches, value_caches = kv_cache_factory(NUM_BLOCKS, block_size, 1,
171
172
                                                num_kv_heads, head_size,
                                                kv_cache_dtype, dtype, seed,
173
                                                device)
174
    key_cache, value_cache = key_caches[0], value_caches[0]
Tao Peng's avatar
Tao Peng committed
175

176
177
    # Call the paged attention kernel.
    output = torch.empty_like(query)
178
    if version == "v1":
179
        ops.paged_attention_v1(
180
181
182
183
            output,
            query,
            key_cache,
            value_cache,
184
            num_kv_heads,
185
186
187
188
189
190
            scale,
            block_tables,
            context_lens,
            block_size,
            max_context_len,
            alibi_slopes,
191
            kv_cache_dtype,
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
        )
    elif version == "v2":
        num_partitions = ((max_context_len + PARTITION_SIZE - 1) //
                          PARTITION_SIZE)
        assert PARTITION_SIZE % block_size == 0
        num_seqs, num_heads, head_size = output.shape
        tmp_output = torch.empty(
            size=(num_seqs, num_heads, num_partitions, head_size),
            dtype=output.dtype,
        )
        exp_sums = torch.empty(
            size=(num_seqs, num_heads, num_partitions),
            dtype=torch.float32,
        )
        max_logits = torch.empty_like(exp_sums)
207
        ops.paged_attention_v2(
208
209
210
211
212
213
214
            output,
            exp_sums,
            max_logits,
            tmp_output,
            query,
            key_cache,
            value_cache,
215
            num_kv_heads,
216
217
218
219
220
221
            scale,
            block_tables,
            context_lens,
            block_size,
            max_context_len,
            alibi_slopes,
222
            kv_cache_dtype,
223
224
        )
    else:
225
        raise AssertionError(f"Unknown version: {version}")
226

227
    # Run the reference implementation.
228
229
230
231
232
233
234
    if kv_cache_dtype == "fp8_e5m2":
        # Convert cache data back to dtype.
        x = 16 // torch.tensor([], dtype=dtype).element_size()
        key_cache_shape = (NUM_BLOCKS, num_kv_heads, head_size // x,
                           block_size, x)
        dequantized_key_cache = torch.empty(size=key_cache_shape,
                                            dtype=dtype,
235
                                            device=device)
236
237
238
239
240
241
        cache_ops.convert_fp8_e5m2(key_cache, dequantized_key_cache)
        key_cache = dequantized_key_cache

        value_cache_shape = value_cache.shape
        dequantized_value_cache = torch.empty(size=value_cache_shape,
                                              dtype=dtype,
242
                                              device=device)
243
244
245
        cache_ops.convert_fp8_e5m2(value_cache, dequantized_value_cache)
        value_cache = dequantized_value_cache

246
247
248
249
    ref_output = torch.empty_like(query)
    ref_single_query_cached_kv_attention(
        ref_output,
        query,
250
        num_queries_per_kv,
251
252
253
254
        key_cache,
        value_cache,
        block_tables,
        context_lens,
255
256
        scale,
        alibi_slopes,
257
    )
258
259
260
261

    # NOTE(woosuk): Due to the kernel-level differences in the two
    # implementations, there is a small numerical difference in the two
    # outputs. Thus, we use a relaxed tolerance for the test.
262
263
264
    atol = get_default_atol(output) if is_hip() else 1e-3
    rtol = get_default_rtol(output) if is_hip() else 1e-5

265
266
267
268
269
    # NOTE(zhaoyang): FP8 KV Cache will introduce quantization error,
    # so we use a relaxed tolerance for the test.
    if kv_cache_dtype == "fp8_e5m2":
        atol, rtol = 1e-2, 1e-5
    assert torch.allclose(output, ref_output, atol=atol, rtol=rtol)
270
271


272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
def ref_multi_query_kv_attention(
    cu_seq_lens: List[int],
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    scale: float,
    dtype: torch.dtype,
) -> torch.Tensor:
    num_seqs = len(cu_seq_lens) - 1
    ref_outputs = []
    for i in range(num_seqs):
        start_idx = cu_seq_lens[i]
        end_idx = cu_seq_lens[i + 1]
        seq_len = end_idx - start_idx

        # Create attention mask.
        attn_mask = torch.triu(torch.ones(seq_len, seq_len, dtype=dtype),
                               diagonal=1)
        attn_mask = attn_mask * torch.finfo(dtype).min
291
        attn_mask = attn_mask.to(dtype=dtype)
292
293
294
295
296
297
298
299
300
301
302
303
304

        ref_output = ref_masked_attention(
            query[start_idx:end_idx],
            key[start_idx:end_idx],
            value[start_idx:end_idx],
            scale,
            attn_mask=attn_mask,
        )
        ref_outputs.append(ref_output)
    ref_output = torch.cat(ref_outputs, dim=0)
    return ref_output


305
# TODO(woosuk): Add tests for USE_ALIBI=True.
306
307
308
309
310
@pytest.mark.parametrize("num_seqs", NUM_PREFILL_SEQS)
@pytest.mark.parametrize("num_heads", NUM_HEADS)
@pytest.mark.parametrize("head_size", HEAD_SIZES)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("seed", SEEDS)
311
@pytest.mark.parametrize("device", CUDA_DEVICES)
312
@torch.inference_mode()
313
def test_multi_query_kv_attention(
314
    num_seqs: int,
315
    num_heads: Tuple[int, int],
316
317
    head_size: int,
    dtype: torch.dtype,
318
    seed: int,
319
    device: str,
320
) -> None:
321
322
    random.seed(seed)
    torch.random.manual_seed(seed)
323
324
325
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
    torch.set_default_device(device)
326
327
328
329
330
    # MAX_SEQ_LEN sometimes causes OOM in the reference implementation.
    # As the xformers library is already tested with its own tests, we can use
    # a smaller MAX_SEQ_LEN here.
    max_len = min(MAX_SEQ_LEN, 4096)
    seq_lens = random.sample(range(1, max_len), num_seqs)
331
332
    num_tokens = sum(seq_lens)

333
    scale = float(1.0 / (head_size**0.5))
334
    num_query_heads, num_kv_heads = num_heads
335
    qkv = torch.empty(num_tokens,
336
                      num_query_heads + 2 * num_kv_heads,
337
                      head_size,
338
                      dtype=dtype)
339
340
341
342
343
344
345
346
347
    qkv.uniform_(-scale, scale)
    query, key, value = qkv.split(
        [num_query_heads, num_kv_heads, num_kv_heads], dim=1)

    num_queries_per_kv = num_query_heads // num_kv_heads
    if num_queries_per_kv > 1:
        # Handle MQA and GQA
        key = torch.repeat_interleave(key, num_queries_per_kv, dim=1)
        value = torch.repeat_interleave(value, num_queries_per_kv, dim=1)
348
349
350
351
352
353
354
355
    attn_bias = BlockDiagonalCausalMask.from_seqlens(seq_lens)
    output = xops.memory_efficient_attention_forward(
        query.unsqueeze(0),
        key.unsqueeze(0),
        value.unsqueeze(0),
        attn_bias=attn_bias,
        p=0.0,
        scale=scale,
Woosuk Kwon's avatar
Woosuk Kwon committed
356
    )
357
    output = output.squeeze(0)
358

359
360
361
    cu_seq_lens = [0]
    for seq_len in seq_lens:
        cu_seq_lens.append(cu_seq_lens[-1] + seq_len)
362
363
364
365
366
    ref_output = ref_multi_query_kv_attention(
        cu_seq_lens,
        query,
        key,
        value,
367
        scale,
368
369
        dtype,
    )
370
371
372
    atol = get_default_atol(output) if is_hip() else 1e-3
    rtol = get_default_rtol(output) if is_hip() else 1e-5
    assert torch.allclose(output, ref_output, atol=atol, rtol=rtol)