input_metadata.py 2.56 KB
Newer Older
1
from typing import Dict, List, Tuple
Woosuk Kwon's avatar
Woosuk Kwon committed
2
3

import torch
Woosuk Kwon's avatar
Woosuk Kwon committed
4
from xformers.ops import AttentionBias
Woosuk Kwon's avatar
Woosuk Kwon committed
5

Woosuk Kwon's avatar
Woosuk Kwon committed
6
7
from vllm.sampling_params import SamplingParams
from vllm.sequence import SequenceData
8

Woosuk Kwon's avatar
Woosuk Kwon committed
9
10

class InputMetadata:
11
12
13
14
15
16
17
18
19
20
21
    """Metadata for input sequences. Used for PagedAttention.

    Args:
        seq_groups: List of (seq_ids, sampling_params).
        seq_data: Seq_id -> SequenceData.
        prompt_lens: Lengths of prompts.
        slot_mapping: The address to write the new KV to of each token.
        context_lens: the length of attention context for each generation token.
        max_context_len: The maximum context length.
        block_tables: The block tables. (Seq id -> list of physical block)
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
22
23
24

    def __init__(
        self,
25
26
        seq_groups: List[Tuple[List[int], SamplingParams]],
        seq_data: Dict[int, SequenceData],
Woosuk Kwon's avatar
Woosuk Kwon committed
27
28
29
30
31
32
        prompt_lens: List[int],
        slot_mapping: torch.Tensor,
        context_lens: torch.Tensor,
        max_context_len: int,
        block_tables: torch.Tensor,
    ) -> None:
33
        self.seq_groups = seq_groups
34
        self.seq_data = seq_data
Woosuk Kwon's avatar
Woosuk Kwon committed
35
        self.prompt_lens = prompt_lens
Woosuk Kwon's avatar
Woosuk Kwon committed
36
        self.slot_mapping = slot_mapping
Woosuk Kwon's avatar
Woosuk Kwon committed
37
38
39
40
41
        self.context_lens = context_lens
        self.max_context_len = max_context_len
        self.block_tables = block_tables

        self.num_prompts = len(prompt_lens)
42
        self.num_prompt_tokens = sum(prompt_lens)
Woosuk Kwon's avatar
Woosuk Kwon committed
43
        self.num_generation_tokens = context_lens.shape[0]
Woosuk Kwon's avatar
Woosuk Kwon committed
44
        self.num_valid_tokens = slot_mapping.shape[0]
Woosuk Kwon's avatar
Woosuk Kwon committed
45
46
47
48
        if block_tables.numel() > 0:
            self.max_num_blocks_per_seq = block_tables.shape[1]
        else:
            self.max_num_blocks_per_seq = 0
49
50
        assert block_tables.shape[0] == self.num_generation_tokens
        assert context_lens.shape[0] == self.num_generation_tokens
Woosuk Kwon's avatar
Woosuk Kwon committed
51

Woosuk Kwon's avatar
Woosuk Kwon committed
52
53
54
        # Set during the execution of the first attention op.
        self.attn_bias: List[AttentionBias] = []

Woosuk Kwon's avatar
Woosuk Kwon committed
55
    def __repr__(self) -> str:
56
        # Print only useful metadata.
Woosuk Kwon's avatar
Woosuk Kwon committed
57
58
        return (f'InputMetadata('
                f'num_valid_tokens={self.num_valid_tokens}, '
59
60
                f'num_prompt_tokens={self.num_prompt_tokens}, '
                f'num_prompts={self.num_prompts}, '
Zhuohan Li's avatar
Zhuohan Li committed
61
                f'prompt_lens={self.prompt_lens}, '
62
                f'num_generation_tokens={self.num_generation_tokens}, '
Zhuohan Li's avatar
Zhuohan Li committed
63
                f'context_lens={self.context_lens}, '
64
65
66
67
                f'max_context_len={self.max_context_len}), '
                f'max_num_blocks_per_seq={self.max_num_blocks_per_seq}, '
                f'block_tables={self.block_tables}), '
                f'slot_mapping={self.slot_mapping}')