attention.py 18.8 KB
Newer Older
1
"""Multi-head attention."""
2
from typing import Any, Dict, List, Optional
Woosuk Kwon's avatar
Woosuk Kwon committed
3
4
5

import torch
import torch.nn as nn
6
from xformers import ops as xops
Woosuk Kwon's avatar
Woosuk Kwon committed
7
8
from xformers.ops.fmha.attn_bias import (BlockDiagonalCausalMask,
                                         LowerTriangularMaskWithTensorBias)
Woosuk Kwon's avatar
Woosuk Kwon committed
9

Woosuk Kwon's avatar
Woosuk Kwon committed
10
11
12
from vllm import attention_ops
from vllm import cache_ops
from vllm.model_executor.input_metadata import InputMetadata
13
14
from vllm.model_executor.layers.rotary_embedding import (
    DynamicNTKScalingRotaryEmbedding, LinearScalingRotaryEmbedding,
Antoni Baum's avatar
Antoni Baum committed
15
    RotaryEmbedding, YaRNScalingRotaryEmbedding)
Woosuk Kwon's avatar
Woosuk Kwon committed
16

17
_SUPPORTED_HEAD_SIZES = [64, 80, 96, 112, 128, 256]
18
19
# Should be the same as PARTITION_SIZE in `paged_attention_v2_launcher`.
_PARTITION_SIZE = 512
20

21

Woosuk Kwon's avatar
Woosuk Kwon committed
22
class PagedAttention(nn.Module):
23
    # pylint: disable=line-too-long
Woosuk Kwon's avatar
Woosuk Kwon committed
24
    """GPT-style multi-head PagedAttention.
25

26
27
28
    This class takes query, key, and value tensors as input. The input tensors
    can either contain prompt tokens or generation tokens, in addition to
    paddings.
29
30
31
32
33
34
35
36
37
38
39

    The class does the following:
    1. Perform multi_query_kv_attention for the prompts. This operation does
        not use the KV cache.
    2. Wait for the cache operations (e.g., swap, copy) to finish. The cache
        operations are issued by the cache engine before executing the forward
        pass of the model, and they are executed asynchronously.
    3. Reshape and store the input key and value tensors in the KV cache.
    4. Perform single_query_cached_kv_attention for the generation tokens.
        This operation reads the previous key and value tensors from the KV
        cache.
40
    5. Return the output tensor.
41
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
42

Zhuohan Li's avatar
Zhuohan Li committed
43
44
45
46
    def __init__(self,
                 num_heads: int,
                 head_size: int,
                 scale: float,
47
48
                 num_kv_heads: Optional[int] = None,
                 sliding_window: Optional[int] = None) -> None:
49
        super().__init__()
50
51
        self.num_heads = num_heads
        self.head_size = head_size
52
        self.scale = float(scale)
Zhuohan Li's avatar
Zhuohan Li committed
53
        self.num_kv_heads = num_heads if num_kv_heads is None else num_kv_heads
54
        self.sliding_window = sliding_window
Zhuohan Li's avatar
Zhuohan Li committed
55
56
57
58
59
60

        assert self.num_heads % self.num_kv_heads == 0
        self.num_queries_per_kv = self.num_heads // self.num_kv_heads
        self.head_mapping = torch.repeat_interleave(
            torch.arange(self.num_kv_heads, dtype=torch.int32, device="cuda"),
            self.num_queries_per_kv)
Woosuk Kwon's avatar
Woosuk Kwon committed
61

62
        if self.head_size not in _SUPPORTED_HEAD_SIZES:
Woosuk Kwon's avatar
Woosuk Kwon committed
63
64
            raise ValueError(f"head_size ({self.head_size}) is not supported. "
                             f"Supported head sizes: {_SUPPORTED_HEAD_SIZES}.")
65

66
67
68
69
70
71
    def set_attn_bias(
        self,
        input_metadata: InputMetadata,
        dtype: torch.dtype,
    ) -> None:
        del dtype  # Unused.
72
        if input_metadata.attn_bias is not None:
Woosuk Kwon's avatar
Woosuk Kwon committed
73
74
            # Already set by a previous layer.
            return
75
76
        prompt_lens = [input_metadata.max_prompt_len
                       ] * input_metadata.num_prompts
Woosuk Kwon's avatar
Woosuk Kwon committed
77
        attn_bias = BlockDiagonalCausalMask.from_seqlens(prompt_lens)
78
79
        if self.sliding_window is not None:
            attn_bias = attn_bias.make_local_attention(self.sliding_window)
80
        input_metadata.attn_bias = attn_bias
Woosuk Kwon's avatar
Woosuk Kwon committed
81

Woosuk Kwon's avatar
Woosuk Kwon committed
82
83
    def multi_query_kv_attention(
        self,
84
85
86
87
        output: torch.Tensor,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
Woosuk Kwon's avatar
Woosuk Kwon committed
88
        input_metadata: InputMetadata,
89
    ) -> torch.Tensor:
90
91
92
93
94
        """Normal attention for the prompt tokens.

        Args:
            output: shape = [num_prompt_tokens, num_heads, head_size]
            query: shape = [num_prompt_tokens, num_heads, head_size]
Zhuohan Li's avatar
Zhuohan Li committed
95
96
            key: shape = [num_prompt_tokens, num_kv_heads, head_size]
            value: shape = [num_prompt_tokens, num_kv_heads, head_size]
Woosuk Kwon's avatar
Woosuk Kwon committed
97
            input_metadata: metadata for paged attention.
98
        """
Zhuohan Li's avatar
Zhuohan Li committed
99
100
101
102
103
104
105
        if self.num_kv_heads != self.num_heads:
            # Project the key and value tensors to the desired number of heads.
            key = torch.repeat_interleave(key, self.num_queries_per_kv, dim=1)
            value = torch.repeat_interleave(value,
                                            self.num_queries_per_kv,
                                            dim=1)

106
107
108
109
110
        # TODO(woosuk): The unsqueeze op may incur some CPU overhead. Optimize.
        out = xops.memory_efficient_attention_forward(
            query.unsqueeze(0),
            key.unsqueeze(0),
            value.unsqueeze(0),
111
            attn_bias=input_metadata.attn_bias,
112
113
            p=0.0,
            scale=self.scale,
Woosuk Kwon's avatar
Woosuk Kwon committed
114
        )
115
116
117
        # TODO(woosuk): Unnecessary copy. Optimize.
        output.copy_(out.squeeze(0))
        return output
Woosuk Kwon's avatar
Woosuk Kwon committed
118

119
120
121
122
123
124
125
126
    def get_alibi_slopes(self) -> Optional[torch.Tensor]:
        """Returns the slopes for the alibi attention bias.

        Returns:
            slopes: shape = [num_heads]
        """
        return None

Woosuk Kwon's avatar
Woosuk Kwon committed
127
128
    def single_query_cached_kv_attention(
        self,
129
130
131
132
        output: torch.Tensor,
        query: torch.Tensor,
        key_cache: torch.Tensor,
        value_cache: torch.Tensor,
Woosuk Kwon's avatar
Woosuk Kwon committed
133
        input_metadata: InputMetadata,
134
        alibi_slopes: Optional[torch.Tensor],
Woosuk Kwon's avatar
Woosuk Kwon committed
135
    ) -> None:
136
137
138
139
140
        """PagedAttention for the generation tokens.

        Args:
            output: shape = [num_generation_tokens, num_heads, head_size]
            query: shape = [num_generation_tokens, num_heads, head_size]
Zhuohan Li's avatar
Zhuohan Li committed
141
            key_cache: shape = [num_blocks, num_kv_heads, head_size/x,
142
                block_size, x]
Zhuohan Li's avatar
Zhuohan Li committed
143
144
            value_cache: shape = [num_blocks, num_kv_heads, head_size,
                block_size]
145
            input_metadata: metadata for paged attention.
146
            alibi_slopes: shape = [num_heads]
147
        """
148
        block_size = value_cache.shape[3]
149
150
151
152
153
154
155
156
157
158
        num_seqs, num_heads, head_size = query.shape
        max_num_partitions = (
            (input_metadata.max_context_len + _PARTITION_SIZE - 1) //
            _PARTITION_SIZE)
        # NOTE(woosuk): We use a simple heuristic to decide whether to use
        # PagedAttention V1 or V2. If the number of partitions is 1, we use
        # V1 to avoid the overhead of reduction. Also, if the number of
        # sequences or heads is large, we use V1 since there is enough work
        # to parallelize.
        # TODO(woosuk): Tune this heuristic.
159
160
161
        # For context len > 8192, use V2 kernel to avoid shared memory shortage.
        use_v1 = input_metadata.max_context_len <= 8192 and (
            max_num_partitions == 1 or num_seqs * num_heads > 512)
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
        if use_v1:
            # Run PagedAttention V1.
            attention_ops.paged_attention_v1(
                output,
                query,
                key_cache,
                value_cache,
                self.head_mapping,
                self.scale,
                input_metadata.block_tables,
                input_metadata.context_lens,
                block_size,
                input_metadata.max_context_len,
                alibi_slopes,
            )
        else:
            # Run PagedAttention V2.
            assert _PARTITION_SIZE % block_size == 0
            tmp_output = torch.empty(
                size=(num_seqs, num_heads, max_num_partitions, head_size),
                dtype=output.dtype,
                device=output.device,
            )
            exp_sums = torch.empty(
                size=(num_seqs, num_heads, max_num_partitions),
                dtype=torch.float32,
                device=output.device,
            )
            max_logits = torch.empty_like(exp_sums)
            attention_ops.paged_attention_v2(
                output,
                exp_sums,
                max_logits,
                tmp_output,
                query,
                key_cache,
                value_cache,
                self.head_mapping,
                self.scale,
                input_metadata.block_tables,
                input_metadata.context_lens,
                block_size,
                input_metadata.max_context_len,
                alibi_slopes,
            )
Woosuk Kwon's avatar
Woosuk Kwon committed
207
208
209

    def forward(
        self,
210
211
212
213
214
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
        key_cache: Optional[torch.Tensor],
        value_cache: Optional[torch.Tensor],
Woosuk Kwon's avatar
Woosuk Kwon committed
215
216
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
217
218
219
220
    ) -> torch.Tensor:
        """PagedAttention forward pass.

        NOTE: The query, key, and value tensors must be sliced from a qkv
221
        tensor of shape [batch_size, seq_len, 3 * num_heads * head_size].
222
223

        Args:
224
225
226
            query: shape = [batch_size, seq_len, num_heads * head_size]
            key: shape = [batch_size, seq_len, num_kv_heads * head_size]
            value: shape = [batch_size, num_kv_heads * head_size]
Zhuohan Li's avatar
Zhuohan Li committed
227
            key_cache: shape = [num_blocks, num_kv_heads, head_size/x,
228
                block_size, x]
Zhuohan Li's avatar
Zhuohan Li committed
229
230
            value_cache: shape = [num_blocks, num_kv_heads, head_size,
                block_size]
231
232
233
234
            input_metadata: metadata for paged attention.
            cache_event: event to wait for the cache operations to finish.

        Returns:
235
            shape = [batch_size, seq_len, num_heads * head_size]
236
        """
237
        batch_size, seq_len, _ = query.shape
Woosuk Kwon's avatar
Woosuk Kwon committed
238
        # Reshape the query, key, and value tensors.
239
        query = query.view(-1, self.num_heads, self.head_size)
Zhuohan Li's avatar
Zhuohan Li committed
240
241
        key = key.view(-1, self.num_kv_heads, self.head_size)
        value = value.view(-1, self.num_kv_heads, self.head_size)
Woosuk Kwon's avatar
Woosuk Kwon committed
242
243
244

        # Pre-allocate the output tensor.
        output = torch.empty_like(query)
Woosuk Kwon's avatar
Woosuk Kwon committed
245
246

        # Compute the attention op for prompts.
Woosuk Kwon's avatar
Woosuk Kwon committed
247
248
        num_prompt_tokens = input_metadata.num_prompt_tokens
        if num_prompt_tokens > 0:
Woosuk Kwon's avatar
Woosuk Kwon committed
249
250
            # Prompt run.
            assert input_metadata.num_generation_tokens == 0
251
            self.set_attn_bias(input_metadata, dtype=query.dtype)
252
            self.multi_query_kv_attention(
253
254
255
256
                output,
                query,
                key,
                value,
Woosuk Kwon's avatar
Woosuk Kwon committed
257
                input_metadata,
Woosuk Kwon's avatar
Woosuk Kwon committed
258
            )
Woosuk Kwon's avatar
Woosuk Kwon committed
259
260
261
262
263
264

        # Wait until the cache op is done.
        if cache_event is not None:
            cache_event.wait()

        # Reshape the keys and values and store them in the cache.
265
266
        # When key_cache and value_cache are not provided, the new key
        # and value vectors will not be cached.
267
268
269
270
        if key_cache is not None and value_cache is not None:
            key_to_cache = key
            value_to_cache = value
            slot_mapping = input_metadata.slot_mapping.view(-1)
271
272
273
274
275
            if input_metadata.to_cache is not None:
                key_to_cache = key_to_cache[input_metadata.to_cache]
                value_to_cache = value_to_cache[input_metadata.to_cache]
                slot_mapping = slot_mapping[input_metadata.to_cache]

Woosuk Kwon's avatar
Woosuk Kwon committed
276
            cache_ops.reshape_and_cache(
277
278
                key_to_cache,
                value_to_cache,
Woosuk Kwon's avatar
Woosuk Kwon committed
279
280
                key_cache,
                value_cache,
281
                slot_mapping,
Woosuk Kwon's avatar
Woosuk Kwon committed
282
            )
Woosuk Kwon's avatar
Woosuk Kwon committed
283
284

        if input_metadata.num_generation_tokens > 0:
Woosuk Kwon's avatar
Woosuk Kwon committed
285
286
            # Decoding run.
            assert input_metadata.num_prompt_tokens == 0
287
288
            assert key_cache is not None and value_cache is not None, (
                "key_cache and value_cache must be provided when "
289
                "generating tokens.")
Woosuk Kwon's avatar
Woosuk Kwon committed
290
            # Compute the attention op for generation tokens.
291
292
293
            self.single_query_cached_kv_attention(output, query, key_cache,
                                                  value_cache, input_metadata,
                                                  self.get_alibi_slopes())
Woosuk Kwon's avatar
Woosuk Kwon committed
294
295

        # Reshape the output tensor.
296
        # NOTE(woosuk): The output tensor may include paddings.
297
298
        return output.view(batch_size, seq_len,
                           self.num_heads * self.head_size)
299
300


Woosuk Kwon's avatar
Woosuk Kwon committed
301
class PagedAttentionWithRoPE(PagedAttention):
302
    """PagedAttention with rotary positional embedding."""
303
304
305

    def __init__(
        self,
306
307
        num_heads: int,
        head_size: int,
308
        scale: float,
309
        rotary_dim: int,
310
311
        max_position: int = 8192,
        base: int = 10000,
Zhuohan Li's avatar
Zhuohan Li committed
312
        num_kv_heads: Optional[int] = None,
313
        is_neox_style: bool = True,
314
        rope_scaling: Optional[Dict[str, Any]] = None,
315
        sliding_window: Optional[int] = None,
316
    ) -> None:
317
318
319
320
321
        super().__init__(num_heads,
                         head_size,
                         scale,
                         num_kv_heads,
                         sliding_window=sliding_window)
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
        if rope_scaling is None:
            self.rotary_emb = RotaryEmbedding(head_size, rotary_dim,
                                              max_position, base,
                                              is_neox_style)
        else:
            scaling_type = rope_scaling["type"]
            scaling_factor = rope_scaling["factor"]
            if scaling_type == "linear":
                self.rotary_emb = LinearScalingRotaryEmbedding(
                    head_size, rotary_dim, max_position, base, is_neox_style,
                    scaling_factor)
            elif scaling_type == "dynamic":
                self.rotary_emb = DynamicNTKScalingRotaryEmbedding(
                    head_size, rotary_dim, max_position, base, is_neox_style,
                    scaling_factor)
Antoni Baum's avatar
Antoni Baum committed
337
338
339
340
341
342
343
344
345
346
347
348
349
            elif scaling_type == "yarn":
                original_max_position = rope_scaling[
                    "original_max_position_embeddings"]
                assert max_position == original_max_position * scaling_factor
                extra_kwargs = {
                    k: v
                    for k, v in rope_scaling.items()
                    if k in ("extrapolation_factor", "attn_factor",
                             "beta_fast", "beta_slow")
                }
                self.rotary_emb = YaRNScalingRotaryEmbedding(
                    head_size, rotary_dim, original_max_position, base,
                    is_neox_style, scaling_factor, **extra_kwargs)
350
351
            else:
                raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
352
353
354

    def forward(
        self,
355
356
357
358
359
360
        positions: torch.Tensor,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
        key_cache: torch.Tensor,
        value_cache: torch.Tensor,
361
362
        input_metadata: InputMetadata,
        cache_event: Optional[torch.cuda.Event],
363
364
365
366
    ) -> torch.Tensor:
        """ PagedAttention forward pass with rotary embedding.

        Args:
367
368
369
370
            positions: shape = [batch_size, seq_len]
            query: shape = [batch_size, seq_len, num_heads * head_size]
            key: shape = [batch_size, seq_len, num_kv_heads * head_size]
            value: shape = [batch_size, seq_len, num_kv_heads * head_size]
Zhuohan Li's avatar
Zhuohan Li committed
371
            key_cache: shape = [num_blocks, num_kv_heads, head_size/x,
372
                block_size, x]
Zhuohan Li's avatar
Zhuohan Li committed
373
374
            value_cache: shape = [num_blocks, num_kv_heads, head_size,
                block_size]
375
376
377
378
            input_metadata: metadata for paged attention.
            cache_event: event to wait for the cache operations to finish.

        Returns:
379
            shape = [batch_size, seq_len, num_heads * head_size]
380
381
        """

382
383
        # Apply rotary embedding to the query and key before passing them
        # to the attention op.
384
        query, key = self.rotary_emb(positions, query, key)
385
        return super().forward(
Woosuk Kwon's avatar
Woosuk Kwon committed
386
387
            query,
            key,
388
389
390
391
392
393
            value,
            key_cache,
            value_cache,
            input_metadata,
            cache_event,
        )
Woosuk Kwon's avatar
Woosuk Kwon committed
394
395
396
397
398


class PagedAttentionWithALiBi(PagedAttention):
    """PagedAttention with ALiBi attention bias."""

Zhuohan Li's avatar
Zhuohan Li committed
399
400
401
402
403
404
405
    def __init__(self,
                 num_heads: int,
                 head_size: int,
                 scale: float,
                 slopes: List[float],
                 num_kv_heads: Optional[int] = None) -> None:
        super().__init__(num_heads, head_size, scale, num_kv_heads)
Woosuk Kwon's avatar
Woosuk Kwon committed
406
407
408
409
410
        assert len(slopes) == num_heads

        slopes = torch.tensor(slopes, dtype=torch.float32)
        self.register_buffer("alibi_slopes", slopes, persistent=False)

411
412
    def set_attn_bias(self, input_metadata: InputMetadata,
                      dtype: torch.dtype) -> None:
413
        if input_metadata.attn_bias is not None:
Woosuk Kwon's avatar
Woosuk Kwon committed
414
415
            # Already set by a previous layer.
            return
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
        # Generates ALiBi mask based on the max prompt length.
        max_prompt_len = input_metadata.max_prompt_len
        bias = torch.arange(max_prompt_len, dtype=dtype)
        # NOTE(zhuohan): HF uses
        #     `bias = bias[None, :].repeat(prompt_len, 1)`
        # here. We find that both biases give the same results, but
        # the bias below more accurately follows the original ALiBi
        # paper.
        bias = bias[None, :] - bias[:, None]
        bias = bias.to(self.alibi_slopes.device)

        # When using custom attention bias, xformers requires the bias to
        # be sliced from a tensor whose length is a multiple of 8.
        padded_len = (max_prompt_len + 7) // 8 * 8
        bias = torch.empty(
            input_metadata.num_prompts,
            self.num_heads,
            max_prompt_len,
            padded_len,
            device=self.alibi_slopes.device,
            dtype=dtype,
        )[:, :, :, :max_prompt_len].copy_(bias)
        bias.mul_(self.alibi_slopes[:, None, None])
        attn_bias = LowerTriangularMaskWithTensorBias(bias)
        input_metadata.attn_bias = attn_bias
Woosuk Kwon's avatar
Woosuk Kwon committed
441
442
443
444
445
446
447
448
449
450
451
452
453
454

    def multi_query_kv_attention(
        self,
        output: torch.Tensor,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
        input_metadata: InputMetadata,
    ) -> torch.Tensor:
        """Attention with ALiBi bias for the prompt tokens.

        Args:
            output: shape = [num_prompt_tokens, num_heads, head_size]
            query: shape = [num_prompt_tokens, num_heads, head_size]
Zhuohan Li's avatar
Zhuohan Li committed
455
456
            key: shape = [num_prompt_tokens, num_kv_heads, head_size]
            value: shape = [num_prompt_tokens, num_kv_heads, head_size]
Woosuk Kwon's avatar
Woosuk Kwon committed
457
458
            input_metadata: metadata for paged attention.
        """
Zhuohan Li's avatar
Zhuohan Li committed
459
460
461
462
463
464
        if self.num_kv_heads != self.num_heads:
            # Project the key and value tensors to the desired number of heads.
            key = torch.repeat_interleave(key, self.num_queries_per_kv, dim=1)
            value = torch.repeat_interleave(value,
                                            self.num_queries_per_kv,
                                            dim=1)
465
466
        batch_size = input_metadata.num_prompts
        seq_len = input_metadata.max_prompt_len
Zhuohan Li's avatar
Zhuohan Li committed
467

468
469
470
471
472
473
474
475
476
477
        out = xops.memory_efficient_attention_forward(
            query.view(batch_size, seq_len, self.num_heads, self.head_size),
            key.view(batch_size, seq_len, self.num_heads, self.head_size),
            value.view(batch_size, seq_len, self.num_heads, self.head_size),
            attn_bias=input_metadata.attn_bias,
            p=0.0,
            scale=self.scale,
        )
        # TODO(woosuk): Unnecessary copy. Optimize.
        output.copy_(out.view(-1, self.num_heads, self.head_size))
Woosuk Kwon's avatar
Woosuk Kwon committed
478
479
        return output

480
481
    def get_alibi_slopes(self) -> Optional[torch.Tensor]:
        return self.alibi_slopes