worker.py 12.5 KB
Newer Older
1
"""A GPU worker class."""
2
import gc
3
import os
4
from typing import Dict, List, Tuple, Set, Optional
Woosuk Kwon's avatar
Woosuk Kwon committed
5
6

import torch
7
import torch.distributed
Woosuk Kwon's avatar
Woosuk Kwon committed
8

9
10
from vllm.config import (CacheConfig, DeviceConfig, ModelConfig,
                         ParallelConfig, SchedulerConfig, LoRAConfig)
11
from vllm.model_executor import set_random_seed
Woosuk Kwon's avatar
Woosuk Kwon committed
12
from vllm.model_executor.parallel_utils import cupy_utils
13
from vllm.model_executor.parallel_utils.communication_op import (
14
    broadcast_tensor_dict)
15
from vllm.model_executor.parallel_utils.custom_all_reduce import init_custom_ar
Woosuk Kwon's avatar
Woosuk Kwon committed
16
from vllm.model_executor.parallel_utils.parallel_state import (
17
    ensure_model_parallel_initialized)
18
from vllm.sequence import SamplerOutput, SequenceGroupMetadata
Woosuk Kwon's avatar
Woosuk Kwon committed
19
from vllm.worker.cache_engine import CacheEngine
20
from vllm.worker.model_runner import ModelRunner
21
from vllm.lora.request import LoRARequest
22
from vllm.utils import is_hip
Woosuk Kwon's avatar
Woosuk Kwon committed
23

24

Woosuk Kwon's avatar
Woosuk Kwon committed
25
class Worker:
26
27
28
29
30
31
    """A worker class that executes (a partition of) the model on a GPU.

    Each worker is associated with a single GPU. The worker is responsible for
    maintaining the KV cache and executing the model on the GPU. In case of
    distributed inference, each worker is assigned a partition of the model.
    """
Woosuk Kwon's avatar
Woosuk Kwon committed
32
33
34

    def __init__(
        self,
35
36
37
        model_config: ModelConfig,
        parallel_config: ParallelConfig,
        scheduler_config: SchedulerConfig,
38
        device_config: DeviceConfig,
39
40
41
        local_rank: int,
        rank: int,
        distributed_init_method: str,
42
        lora_config: Optional[LoRAConfig] = None,
43
        kv_cache_dtype: Optional[str] = "auto",
44
        is_driver_worker: bool = False,
Woosuk Kwon's avatar
Woosuk Kwon committed
45
    ) -> None:
46
47
48
        self.model_config = model_config
        self.parallel_config = parallel_config
        self.scheduler_config = scheduler_config
49
        self.device_config = device_config
50
        self.local_rank = local_rank
51
52
        self.rank = rank
        self.distributed_init_method = distributed_init_method
53
        self.lora_config = lora_config
54
55
56
        self.is_driver_worker = is_driver_worker
        if self.is_driver_worker:
            assert self.rank == 0, "The driver worker must have rank 0."
57

58
59
60
        self.model_runner = ModelRunner(model_config,
                                        parallel_config,
                                        scheduler_config,
61
                                        device_config,
62
                                        lora_config=self.lora_config,
63
                                        kv_cache_dtype=kv_cache_dtype,
64
                                        is_driver_worker=is_driver_worker)
65
66
67
68
69
70
71
        # Uninitialized cache engine. Will be initialized by
        # self.init_cache_engine().
        self.cache_config = None
        self.cache_engine = None
        self.cache_events = None
        self.gpu_cache = None

Woosuk Kwon's avatar
Woosuk Kwon committed
72
    def init_model(self, cupy_port: Optional[int] = None) -> None:
73
74
75
76
77
78
79
80
        if self.device_config.device.type == "cuda":
            # torch.distributed.all_reduce does not free the input tensor until
            # the synchronization point. This causes the memory usage to grow
            # as the number of all_reduce calls increases. This env var disables
            # this behavior.
            # Related issue:
            # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
            os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
81

82
83
84
85
            # This env var set by Ray causes exceptions with graph building.
            os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None)
            self.device = torch.device(f"cuda:{self.local_rank}")
            torch.cuda.set_device(self.device)
86

87
            _check_if_gpu_supports_dtype(self.model_config.dtype)
88
89
            torch.cuda.empty_cache()
            self.init_gpu_memory = torch.cuda.mem_get_info()[0]
90
91
92
        else:
            raise RuntimeError(
                f"Not support device type: {self.device_config.device}")
93
        # Initialize the distributed environment.
94
        init_distributed_environment(self.parallel_config, self.rank,
Woosuk Kwon's avatar
Woosuk Kwon committed
95
                                     cupy_port, self.distributed_init_method)
96
97
        if not self.parallel_config.disable_custom_all_reduce:
            init_custom_ar()
Woosuk Kwon's avatar
Woosuk Kwon committed
98
        # Initialize the model.
99
        set_random_seed(self.model_config.seed)
100
101

    def load_model(self):
102
        self.model_runner.load_model()
103

104
    @torch.inference_mode()
105
106
107
108
109
    def profile_num_available_blocks(
        self,
        block_size: int,
        gpu_memory_utilization: float,
        cpu_swap_space: int,
110
        cache_dtype: str,
111
    ) -> Tuple[int, int]:
112
113
114
115
116
117
118
119
        """Profiles the peak memory usage of the model and returns the maximum
        number of GPU and CPU cache blocks that can be allocated.

        Args:
            block_size: The size of the cache block.
            gpu_memory_utilization: The fraction of the total GPU memory to use.
            cpu_swap_space: The size of the CPU swap space in bytes.
        """
120
121
122
123
        # Profile the memory usage of the model and get the maximum number of
        # cache blocks that can be allocated with the remaining free memory.
        torch.cuda.empty_cache()

124
125
126
        # Execute a forward pass with dummy inputs to profile the memory usage
        # of the model.
        self.model_runner.profile_run()
127
128
129
130

        # Calculate the number of blocks that can be allocated with the
        # profiled peak memory.
        torch.cuda.synchronize()
131
        free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info()
132
133
134
        # NOTE(woosuk): Here we assume that the other processes using the same
        # GPU did not change their memory usage during the profiling.
        peak_memory = self.init_gpu_memory - free_gpu_memory
135

136
        cache_block_size = CacheEngine.get_cache_block_size(
137
            block_size, cache_dtype, self.model_config, self.parallel_config)
138
139
140
        num_gpu_blocks = int(
            (total_gpu_memory * gpu_memory_utilization - peak_memory) //
            cache_block_size)
141
        num_cpu_blocks = int(cpu_swap_space // cache_block_size)
142
143
        num_gpu_blocks = max(num_gpu_blocks, 0)
        num_cpu_blocks = max(num_cpu_blocks, 0)
144
145
146
        if self.model_runner.lora_manager:
            self.model_runner.remove_all_loras()
        gc.collect()
147
148
149
        torch.cuda.empty_cache()
        return num_gpu_blocks, num_cpu_blocks

150
151
    def init_cache_engine(self, cache_config: CacheConfig) -> None:
        self.cache_config = cache_config
152
153
        self.cache_engine = CacheEngine(self.cache_config, self.model_config,
                                        self.parallel_config)
Woosuk Kwon's avatar
Woosuk Kwon committed
154
155
        self.cache_events = self.cache_engine.events
        self.gpu_cache = self.cache_engine.gpu_cache
156
        self.model_runner.set_block_size(self.cache_engine.block_size)
Woosuk Kwon's avatar
Woosuk Kwon committed
157

158
159
160
161
162
163
164
    def warm_up_model(self) -> None:
        if not self.model_config.enforce_eager:
            self.model_runner.capture_model(self.gpu_cache)
        # Reset the seed to ensure that the random state is not affected by
        # the model initialization and profiling.
        set_random_seed(self.model_config.seed)

165
    def cache_swap(
Woosuk Kwon's avatar
Woosuk Kwon committed
166
167
168
        self,
        blocks_to_swap_in: Dict[int, int],
        blocks_to_swap_out: Dict[int, int],
169
        blocks_to_copy: Dict[int, List[int]],
170
    ) -> None:
Woosuk Kwon's avatar
Woosuk Kwon committed
171
        # Issue cache operations.
172
        issued_cache_op = False
Woosuk Kwon's avatar
Woosuk Kwon committed
173
174
        if blocks_to_swap_in:
            self.cache_engine.swap_in(blocks_to_swap_in)
175
            issued_cache_op = True
Woosuk Kwon's avatar
Woosuk Kwon committed
176
177
        if blocks_to_swap_out:
            self.cache_engine.swap_out(blocks_to_swap_out)
178
            issued_cache_op = True
Woosuk Kwon's avatar
Woosuk Kwon committed
179
180
        if blocks_to_copy:
            self.cache_engine.copy(blocks_to_copy)
181
            issued_cache_op = True
Woosuk Kwon's avatar
Woosuk Kwon committed
182

183
        cache_events = self.cache_events if issued_cache_op else None
Woosuk Kwon's avatar
Woosuk Kwon committed
184

185
186
187
188
189
        # Wait for cache operations to finish.
        # TODO(woosuk): Profile swapping overhead and optimize if needed.
        if cache_events is not None:
            for event in cache_events:
                event.wait()
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204

    @torch.inference_mode()
    def execute_model(
        self,
        seq_group_metadata_list: Optional[List[SequenceGroupMetadata]] = None,
        blocks_to_swap_in: Optional[Dict[int, int]] = None,
        blocks_to_swap_out: Optional[Dict[int, int]] = None,
        blocks_to_copy: Optional[Dict[int, List[int]]] = None,
    ) -> Optional[SamplerOutput]:
        if self.is_driver_worker:
            assert seq_group_metadata_list is not None
            num_seq_groups = len(seq_group_metadata_list)
            assert blocks_to_swap_in is not None
            assert blocks_to_swap_out is not None
            assert blocks_to_copy is not None
205
206
207
208
209
210
211
            data = {
                "num_seq_groups": num_seq_groups,
                "blocks_to_swap_in": blocks_to_swap_in,
                "blocks_to_swap_out": blocks_to_swap_out,
                "blocks_to_copy": blocks_to_copy,
            }
            broadcast_tensor_dict(data, src=0)
212
        else:
213
214
215
216
217
218
219
            data = broadcast_tensor_dict(src=0)
            num_seq_groups = data["num_seq_groups"]
            blocks_to_swap_in = data["blocks_to_swap_in"]
            blocks_to_swap_out = data["blocks_to_swap_out"]
            blocks_to_copy = data["blocks_to_copy"]

        self.cache_swap(blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy)
220

Woosuk Kwon's avatar
Woosuk Kwon committed
221
        # If there is no input, we don't need to execute the model.
222
        if num_seq_groups == 0:
Woosuk Kwon's avatar
Woosuk Kwon committed
223
224
            return {}

225
        output = self.model_runner.execute_model(seq_group_metadata_list,
226
                                                 self.gpu_cache)
Woosuk Kwon's avatar
Woosuk Kwon committed
227
228
        return output

229
230
231
232
233
234
235
236
237
    def add_lora(self, lora_request: LoRARequest) -> bool:
        return self.model_runner.add_lora(lora_request)

    def remove_lora(self, lora_id: int) -> bool:
        return self.model_runner.remove_lora(lora_id)

    def list_loras(self) -> Set[int]:
        return self.model_runner.list_loras()

Woosuk Kwon's avatar
Woosuk Kwon committed
238

239
def init_distributed_environment(
240
241
    parallel_config: ParallelConfig,
    rank: int,
Woosuk Kwon's avatar
Woosuk Kwon committed
242
    cupy_port: Optional[int],
243
    distributed_init_method: Optional[str] = None,
244
245
) -> None:
    """Initialize the distributed environment."""
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
    if torch.distributed.is_initialized():
        torch_world_size = torch.distributed.get_world_size()
        if torch_world_size != parallel_config.world_size:
            raise RuntimeError(
                "torch.distributed is already initialized but the torch world "
                "size does not match parallel_config.world_size "
                f"({torch_world_size} vs. {parallel_config.world_size}).")
    elif not distributed_init_method:
        raise ValueError(
            "distributed_init_method must be set if torch.distributed "
            "is not already initialized")
    else:
        torch.distributed.init_process_group(
            backend="nccl",
            world_size=parallel_config.world_size,
            rank=rank,
            init_method=distributed_init_method,
        )

Woosuk Kwon's avatar
Woosuk Kwon committed
265
266
267
268
269
270
271
    if cupy_utils.is_initialized():
        cupy_world_size = cupy_utils.get_world_size()
        if cupy_world_size != parallel_config.world_size:
            raise RuntimeError(
                "cupy.distributed is already initialized but the cupy world "
                "size does not match parallel_config.world_size "
                f"({cupy_world_size} vs. {parallel_config.world_size}).")
272
273
    elif (parallel_config.world_size > 1 and cupy_port is not None
          and not is_hip()):
Woosuk Kwon's avatar
Woosuk Kwon committed
274
275
276
277
278
279
280
281
282
283
        # NOTE(woosuk): We don't initialize CuPy process group when world size
        # is 1.
        # TODO(woosuk): Support multi-node connection.
        cupy_utils.init_process_group(
            world_size=parallel_config.world_size,
            rank=rank,
            host="localhost",
            port=cupy_port,
        )

284
285
    # A small all_reduce for warmup.
    torch.distributed.all_reduce(torch.zeros(1).cuda())
Woosuk Kwon's avatar
Woosuk Kwon committed
286
287
    if cupy_utils.is_initialized():
        cupy_utils.all_reduce(torch.zeros(1).cuda())
288
289
    ensure_model_parallel_initialized(parallel_config.tensor_parallel_size,
                                      parallel_config.pipeline_parallel_size)
290
291


292
293
294
295
296
297
298
299
300
def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype):
    # Check if the GPU supports the dtype.
    if torch_dtype == torch.bfloat16:
        compute_capability = torch.cuda.get_device_capability()
        if compute_capability[0] < 8:
            gpu_name = torch.cuda.get_device_name()
            raise ValueError(
                "Bfloat16 is only supported on GPUs with compute capability "
                f"of at least 8.0. Your {gpu_name} GPU has compute capability "
Woosuk Kwon's avatar
Woosuk Kwon committed
301
302
303
                f"{compute_capability[0]}.{compute_capability[1]}. "
                "You can use float16 instead by explicitly setting the"
                "`dtype` flag in CLI, for example: --dtype=half.")