cache_engine.py 6.17 KB
Newer Older
1
"""CacheEngine class for managing the KV cache."""
Woosuk Kwon's avatar
Woosuk Kwon committed
2
from typing import Dict, List, Tuple
Woosuk Kwon's avatar
Woosuk Kwon committed
3
4

import torch
5

6
from vllm._C import cache_ops
Woosuk Kwon's avatar
Woosuk Kwon committed
7
from vllm.config import CacheConfig, ModelConfig, ParallelConfig
8
from vllm.logger import init_logger
9
from vllm.utils import in_wsl, STR_DTYPE_TO_TORCH_DTYPE
10
11

logger = init_logger(__name__)
Woosuk Kwon's avatar
Woosuk Kwon committed
12
13
14
15
16

KVCache = Tuple[torch.Tensor, torch.Tensor]


class CacheEngine:
17
18
19
20
21
22
    """Manages the KV cache.

    This class is responsible for initializing and managing the GPU and CPU KV
    caches. It also provides methods for performing KV cache operations, such
    as swapping and copying.
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
23
24
25

    def __init__(
        self,
26
27
28
        cache_config: CacheConfig,
        model_config: ModelConfig,
        parallel_config: ParallelConfig,
Woosuk Kwon's avatar
Woosuk Kwon committed
29
    ) -> None:
30
31
32
33
34
35
        self.cache_config = cache_config
        self.model_config = model_config
        self.parallel_config = parallel_config

        self.head_size = model_config.get_head_size()
        self.num_layers = model_config.get_num_layers(parallel_config)
Woosuk Kwon's avatar
Woosuk Kwon committed
36
        self.num_heads = model_config.get_num_kv_heads(parallel_config)
37
38
39
40

        self.block_size = cache_config.block_size
        self.num_gpu_blocks = cache_config.num_gpu_blocks
        self.num_cpu_blocks = cache_config.num_cpu_blocks
Woosuk Kwon's avatar
Woosuk Kwon committed
41

42
43
44
45
46
        if cache_config.cache_dtype == "auto":
            self.dtype = model_config.dtype
        else:
            self.dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_config.cache_dtype]

Woosuk Kwon's avatar
Woosuk Kwon committed
47
48
49
50
        # Initialize the cache.
        self.gpu_cache = self.allocate_gpu_cache()
        self.cpu_cache = self.allocate_cpu_cache()

Woosuk Kwon's avatar
Woosuk Kwon committed
51
        # Initialize the stream for caching operations.
Zhuohan Li's avatar
Zhuohan Li committed
52
53
        self.cache_stream = torch.cuda.Stream()
        assert self.cache_stream != torch.cuda.current_stream()
Woosuk Kwon's avatar
Woosuk Kwon committed
54
        # Initialize the events for stream synchronization.
55
        self.events = [torch.cuda.Event() for _ in range(self.num_layers)]
Woosuk Kwon's avatar
Woosuk Kwon committed
56

Woosuk Kwon's avatar
Woosuk Kwon committed
57
    def get_key_block_shape(self) -> Tuple[int, int, int, int]:
Woosuk Kwon's avatar
Woosuk Kwon committed
58
59
60
61
62
63
64
65
66
        element_size = torch.tensor([], dtype=self.dtype).element_size()
        x = 16 // element_size
        return (
            self.num_heads,
            self.head_size // x,
            self.block_size,
            x,
        )

Woosuk Kwon's avatar
Woosuk Kwon committed
67
    def get_value_block_shape(self) -> Tuple[int, int, int]:
Woosuk Kwon's avatar
Woosuk Kwon committed
68
69
70
        return (
            self.num_heads,
            self.head_size,
71
            self.block_size,
Woosuk Kwon's avatar
Woosuk Kwon committed
72
73
74
75
        )

    def allocate_gpu_cache(self) -> List[KVCache]:
        gpu_cache: List[KVCache] = []
76
77
        key_block_shape = self.get_key_block_shape()
        value_block_shape = self.get_value_block_shape()
Woosuk Kwon's avatar
Woosuk Kwon committed
78
        for _ in range(self.num_layers):
Woosuk Kwon's avatar
Woosuk Kwon committed
79
            key_blocks = torch.empty(
80
                size=(self.num_gpu_blocks, *key_block_shape),
Woosuk Kwon's avatar
Woosuk Kwon committed
81
                dtype=self.dtype,
Zhuohan Li's avatar
Zhuohan Li committed
82
                device="cuda",
Woosuk Kwon's avatar
Woosuk Kwon committed
83
84
            )
            value_blocks = torch.empty(
85
                size=(self.num_gpu_blocks, *value_block_shape),
Woosuk Kwon's avatar
Woosuk Kwon committed
86
                dtype=self.dtype,
Zhuohan Li's avatar
Zhuohan Li committed
87
                device="cuda",
Woosuk Kwon's avatar
Woosuk Kwon committed
88
89
            )
            gpu_cache.append((key_blocks, value_blocks))
Woosuk Kwon's avatar
Woosuk Kwon committed
90
91
        return gpu_cache

Woosuk Kwon's avatar
Woosuk Kwon committed
92
93
    def allocate_cpu_cache(self) -> List[KVCache]:
        cpu_cache: List[KVCache] = []
94
95
        key_block_shape = self.get_key_block_shape()
        value_block_shape = self.get_value_block_shape()
96
97
98
99
        pin_memory = not in_wsl()
        if not pin_memory:
            # Pinning memory in WSL is not supported.
            # https://docs.nvidia.com/cuda/wsl-user-guide/index.html#known-limitations-for-linux-cuda-applications
100
101
            logger.warning("Using 'pin_memory=False' as WSL is detected. "
                           "This may slow down the performance.")
Woosuk Kwon's avatar
Woosuk Kwon committed
102
        for _ in range(self.num_layers):
Woosuk Kwon's avatar
Woosuk Kwon committed
103
            key_blocks = torch.empty(
104
                size=(self.num_cpu_blocks, *key_block_shape),
Woosuk Kwon's avatar
Woosuk Kwon committed
105
                dtype=self.dtype,
106
                pin_memory=pin_memory,
Woosuk Kwon's avatar
Woosuk Kwon committed
107
108
            )
            value_blocks = torch.empty(
109
                size=(self.num_cpu_blocks, *value_block_shape),
Woosuk Kwon's avatar
Woosuk Kwon committed
110
                dtype=self.dtype,
111
                pin_memory=pin_memory,
Woosuk Kwon's avatar
Woosuk Kwon committed
112
113
            )
            cpu_cache.append((key_blocks, value_blocks))
Woosuk Kwon's avatar
Woosuk Kwon committed
114
115
        return cpu_cache

116
    def _swap(
Woosuk Kwon's avatar
Woosuk Kwon committed
117
118
119
120
121
122
123
124
125
126
        self,
        src: List[KVCache],
        dst: List[KVCache],
        src_to_dst: Dict[int, int],
    ) -> None:
        with torch.cuda.stream(self.cache_stream):
            for i in range(self.num_layers):
                src_key_cache, src_value_cache = src[i]
                dst_key_cache, dst_value_cache = dst[i]
                # Copy the key blocks.
127
                cache_ops.swap_blocks(src_key_cache, dst_key_cache, src_to_dst)
Woosuk Kwon's avatar
Woosuk Kwon committed
128
                # Copy the value blocks.
129
130
                cache_ops.swap_blocks(src_value_cache, dst_value_cache,
                                      src_to_dst)
Woosuk Kwon's avatar
Woosuk Kwon committed
131
132
133
                event = self.events[i]
                event.record(stream=self.cache_stream)

Woosuk Kwon's avatar
Woosuk Kwon committed
134
    def swap_in(self, src_to_dst: Dict[int, int]) -> None:
135
        self._swap(self.cpu_cache, self.gpu_cache, src_to_dst)
Woosuk Kwon's avatar
Woosuk Kwon committed
136

Woosuk Kwon's avatar
Woosuk Kwon committed
137
    def swap_out(self, src_to_dst: Dict[int, int]) -> None:
138
139
140
        self._swap(self.gpu_cache, self.cpu_cache, src_to_dst)

    def copy(self, src_to_dsts: Dict[int, List[int]]) -> None:
141
142
143
144
        key_caches = [key_cache for key_cache, _ in self.gpu_cache]
        value_caches = [value_cache for _, value_cache in self.gpu_cache]
        # NOTE(woosuk): This operation implicitly synchronizes the CPU and GPU.
        cache_ops.copy_blocks(key_caches, value_caches, src_to_dsts)
145
146
147
148

    @staticmethod
    def get_cache_block_size(
        block_size: int,
149
        cache_dtype: str,
150
151
152
153
        model_config: ModelConfig,
        parallel_config: ParallelConfig,
    ) -> int:
        head_size = model_config.get_head_size()
Woosuk Kwon's avatar
Woosuk Kwon committed
154
        num_heads = model_config.get_num_kv_heads(parallel_config)
155
156
157
158
159
        num_layers = model_config.get_num_layers(parallel_config)

        key_cache_block = block_size * num_heads * head_size
        value_cache_block = key_cache_block
        total = num_layers * (key_cache_block + value_cache_block)
160
161
162
163
164
        if cache_dtype == "auto":
            dtype = model_config.dtype
        else:
            dtype = STR_DTYPE_TO_TORCH_DTYPE[cache_dtype]
        dtype_size = _get_dtype_size(dtype)
165
166
167
168
169
        return dtype_size * total


def _get_dtype_size(dtype: torch.dtype) -> int:
    return torch.tensor([], dtype=dtype).element_size()