config.py 16.1 KB
Newer Older
1
2
3
from typing import Optional

import torch
4
from transformers import PretrainedConfig
5

Woosuk Kwon's avatar
Woosuk Kwon committed
6
from vllm.logger import init_logger
7
from vllm.transformers_utils.config import get_config
Woosuk Kwon's avatar
Woosuk Kwon committed
8
from vllm.utils import get_cpu_memory
9
10
11

logger = init_logger(__name__)

12
_GB = 1 << 30
13

14
15

class ModelConfig:
16
17
18
19
    """Configuration for the model.

    Args:
        model: Name or path of the huggingface model to use.
20
        tokenizer: Name or path of the huggingface tokenizer to use.
21
22
        tokenizer_mode: Tokenizer mode. "auto" will use the fast tokenizer if
            available, and "slow" will always use the slow tokenizer.
23
24
        trust_remote_code: Trust remote code (e.g., from HuggingFace) when
            downloading the model and tokenizer.
25
26
        download_dir: Directory to download and load the weights, default to the
            default cache directory of huggingface.
27
28
29
30
31
32
33
34
35
36
        load_format: The format of the model weights to load:
            "auto" will try to load the weights in the safetensors format and
                fall back to the pytorch bin format if safetensors format is
                not available.
            "pt" will load the weights in the pytorch bin format.
            "safetensors" will load the weights in the safetensors format.
            "npcache" will load the weights in pytorch format and store
                a numpy cache to speed up the loading.
            "dummy" will initialize the weights with random values, which is
                mainly for profiling.
37
38
39
40
        dtype: Data type for model weights and activations. The "auto" option
            will use FP16 precision for FP32 and FP16 models, and BF16 precision
            for BF16 models.
        seed: Random seed for reproducibility.
Jasmond L's avatar
Jasmond L committed
41
42
43
        revision: The specific model version to use. It can be a branch name,
            a tag name, or a commit id. If unspecified, will use the default
            version.
44
45
46
        tokenizer_revision: The specific tokenizer version to use. It can be a
            branch name, a tag name, or a commit id. If unspecified, will use
            the default version.
47
48
        max_model_len: Maximum length of a sequence (including prompt and
            output). If None, will be derived from the model.
49
50
        quantization: Quantization method that was used to quantize the model
            weights. If None, we assume the model weights are not quantized.
51
    """
52
53
54
55

    def __init__(
        self,
        model: str,
56
57
        tokenizer: str,
        tokenizer_mode: str,
58
        trust_remote_code: bool,
59
        download_dir: Optional[str],
60
        load_format: str,
61
62
        dtype: str,
        seed: int,
63
        revision: Optional[str] = None,
64
        tokenizer_revision: Optional[str] = None,
65
        max_model_len: Optional[int] = None,
66
        quantization: Optional[str] = None,
67
68
    ) -> None:
        self.model = model
69
        self.tokenizer = tokenizer
70
        self.tokenizer_mode = tokenizer_mode
71
        self.trust_remote_code = trust_remote_code
72
        self.download_dir = download_dir
73
        self.load_format = load_format
74
        self.seed = seed
Jasmond L's avatar
Jasmond L committed
75
        self.revision = revision
76
        self.tokenizer_revision = tokenizer_revision
77
        self.quantization = quantization
78

Jasmond L's avatar
Jasmond L committed
79
        self.hf_config = get_config(model, trust_remote_code, revision)
80
        self.dtype = _get_and_verify_dtype(self.hf_config, dtype)
81
82
        self.max_model_len = _get_and_verify_max_len(self.hf_config,
                                                     max_model_len)
83
        self._verify_load_format()
84
        self._verify_tokenizer_mode()
85
        self._verify_quantization()
86

87
88
89
90
91
92
93
94
95
96
    def _verify_load_format(self) -> None:
        load_format = self.load_format.lower()
        if load_format not in [
                "auto", "pt", "safetensors", "npcache", "dummy"
        ]:
            raise ValueError(
                f"Unknown load format: {self.load_format}. Must be one of "
                "'auto', 'pt', 'safetensors', 'npcache', or 'dummy'.")
        self.load_format = load_format

97
98
99
100
101
102
103
    def _verify_tokenizer_mode(self) -> None:
        tokenizer_mode = self.tokenizer_mode.lower()
        if tokenizer_mode not in ["auto", "slow"]:
            raise ValueError(
                f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
                "either 'auto' or 'slow'.")
        self.tokenizer_mode = tokenizer_mode
104

105
    def _verify_quantization(self) -> None:
chooper1's avatar
chooper1 committed
106
        supported_quantization = ["awq", "squeezellm"]
107
108
109
110
111
112
113
114
115
        if self.quantization is None:
            return
        quantization = self.quantization.lower()
        if quantization not in supported_quantization:
            raise ValueError(
                f"Unknown quantization: {self.quantization}. Must be one of "
                f"{supported_quantization}.")
        self.quantization = quantization

116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
    def verify_with_parallel_config(
        self,
        parallel_config: "ParallelConfig",
    ) -> None:
        total_num_attention_heads = self.hf_config.num_attention_heads
        tensor_parallel_size = parallel_config.tensor_parallel_size
        if total_num_attention_heads % tensor_parallel_size != 0:
            raise ValueError(
                f"Total number of attention heads ({total_num_attention_heads})"
                " must be divisible by tensor parallel size "
                f"({tensor_parallel_size}).")

        total_num_hidden_layers = self.hf_config.num_hidden_layers
        pipeline_parallel_size = parallel_config.pipeline_parallel_size
        if total_num_hidden_layers % pipeline_parallel_size != 0:
            raise ValueError(
                f"Total number of hidden layers ({total_num_hidden_layers}) "
                "must be divisible by pipeline parallel size "
                f"({pipeline_parallel_size}).")

    def get_hidden_size(self) -> int:
        return self.hf_config.hidden_size

    def get_head_size(self) -> int:
        # FIXME(woosuk): This may not be true for all models.
        return self.hf_config.hidden_size // self.hf_config.num_attention_heads

Woosuk Kwon's avatar
Woosuk Kwon committed
143
144
    def get_num_kv_heads(self, parallel_config: "ParallelConfig") -> int:
        """Returns the number of KV heads per GPU worker."""
Zhuohan Li's avatar
Zhuohan Li committed
145
        # For GPTBigCode & Falcon:
146
        # NOTE: for falcon, when new_decoder_architecture is True, the
Zhuohan Li's avatar
Zhuohan Li committed
147
148
        # multi_query flag is ignored and we use n_head_kv for the number of
        # KV heads.
149
        falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
150
        new_decoder_arch_falcon = (
151
            self.hf_config.model_type in falcon_model_types
152
153
154
            and getattr(self.hf_config, "new_decoder_architecture", False))
        if not new_decoder_arch_falcon and getattr(self.hf_config,
                                                   "multi_query", False):
Zhuohan Li's avatar
Zhuohan Li committed
155
            # Multi-query attention, only one KV head.
Woosuk Kwon's avatar
Woosuk Kwon committed
156
            # Currently, tensor parallelism is not supported in this case.
Zhuohan Li's avatar
Zhuohan Li committed
157
158
159
            return 1
        # For Falcon:
        if getattr(self.hf_config, "n_head_kv", None) is not None:
Zhuohan Li's avatar
Zhuohan Li committed
160
161
            return (self.hf_config.n_head_kv //
                    parallel_config.tensor_parallel_size)
Woosuk Kwon's avatar
Woosuk Kwon committed
162
163
164
        if getattr(self.hf_config, "num_kv_heads", None) is not None:
            return (self.hf_config.num_kv_heads //
                    parallel_config.tensor_parallel_size)
Zhuohan Li's avatar
Zhuohan Li committed
165
166
167
168
        # For LLaMA-2:
        if getattr(self.hf_config, "num_key_value_heads", None) is not None:
            return (self.hf_config.num_key_value_heads //
                    parallel_config.tensor_parallel_size)
GoHomeToMacDonal's avatar
GoHomeToMacDonal committed
169
170
171
172
        # For ChatGLM-2:
        if getattr(self.hf_config, "multi_query_group_num", None) is not None:
            return (self.hf_config.multi_query_group_num //
                    parallel_config.tensor_parallel_size)
173
174
175
176
177
178
179
180
181
        total_num_attention_heads = self.hf_config.num_attention_heads
        return total_num_attention_heads // parallel_config.tensor_parallel_size

    def get_num_layers(self, parallel_config: "ParallelConfig") -> int:
        total_num_hidden_layers = self.hf_config.num_hidden_layers
        return total_num_hidden_layers // parallel_config.pipeline_parallel_size


class CacheConfig:
182
183
184
185
186
    """Configuration for the KV cache.

    Args:
        block_size: Size of a cache block in number of tokens.
        gpu_memory_utilization: Fraction of GPU memory to use for the
Woosuk Kwon's avatar
Woosuk Kwon committed
187
            vLLM execution.
188
189
        swap_space: Size of the CPU swap space per GPU (in GiB).
    """
190

191
192
193
194
195
    def __init__(
        self,
        block_size: int,
        gpu_memory_utilization: float,
        swap_space: int,
196
        sliding_window: Optional[int] = None,
197
198
199
    ) -> None:
        self.block_size = block_size
        self.gpu_memory_utilization = gpu_memory_utilization
200
        self.swap_space_bytes = swap_space * _GB
201
        self.sliding_window = sliding_window
202
        self._verify_args()
203
204
205
206
207

        # Will be set after profiling.
        self.num_gpu_blocks = None
        self.num_cpu_blocks = None

208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
    def _verify_args(self) -> None:
        if self.gpu_memory_utilization > 1.0:
            raise ValueError(
                "GPU memory utilization must be less than 1.0. Got "
                f"{self.gpu_memory_utilization}.")

    def verify_with_parallel_config(
        self,
        parallel_config: "ParallelConfig",
    ) -> None:
        total_cpu_memory = get_cpu_memory()
        # FIXME(woosuk): Here, it is assumed that the GPUs in a tensor parallel
        # group are in the same node. However, the GPUs may span multiple nodes.
        num_gpus_per_node = parallel_config.tensor_parallel_size
        cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node

224
225
226
        msg = (f"{cpu_memory_usage / _GB:.2f} GiB out of "
               f"the {total_cpu_memory / _GB:.2f} GiB total CPU memory is "
               "allocated for the swap space.")
227
228
229
        if cpu_memory_usage > 0.7 * total_cpu_memory:
            raise ValueError("Too large swap space. " + msg)
        elif cpu_memory_usage > 0.4 * total_cpu_memory:
230
            logger.warning("Possibly too large swap space. " + msg)
231

232
233

class ParallelConfig:
234
235
236
237
238
239
240
241
242
    """Configuration for the distributed execution.

    Args:
        pipeline_parallel_size: Number of pipeline parallel groups.
        tensor_parallel_size: Number of tensor parallel groups.
        worker_use_ray: Whether to use Ray for model workers. Will be set to
            True if either pipeline_parallel_size or tensor_parallel_size is
            greater than 1.
    """
243

244
245
246
247
    def __init__(
        self,
        pipeline_parallel_size: int,
        tensor_parallel_size: int,
248
        worker_use_ray: bool,
249
250
251
    ) -> None:
        self.pipeline_parallel_size = pipeline_parallel_size
        self.tensor_parallel_size = tensor_parallel_size
252
        self.worker_use_ray = worker_use_ray
253
254
255

        self.world_size = pipeline_parallel_size * tensor_parallel_size
        if self.world_size > 1:
256
            self.worker_use_ray = True
257
258
259
260
261
262
263
264
265
        self._verify_args()

    def _verify_args(self) -> None:
        if self.pipeline_parallel_size > 1:
            raise NotImplementedError(
                "Pipeline parallelism is not supported yet.")


class SchedulerConfig:
266
267
268
269
270
271
272
    """Scheduler configuration.

    Args:
        max_num_batched_tokens: Maximum number of tokens to be processed in
            a single iteration.
        max_num_seqs: Maximum number of sequences to be processed in a single
            iteration.
Chaofan Lin's avatar
Chaofan Lin committed
273
        max_model_len: Maximum length of a sequence (including prompt
Lily Liu's avatar
Lily Liu committed
274
            and generated text).
275
        max_paddings: Maximum number of paddings to be added to a batch.
276
    """
277

278
279
280
281
282
    def __init__(
        self,
        max_num_batched_tokens: Optional[int],
        max_num_seqs: int,
        max_model_len: int,
283
        max_paddings: int,
284
285
286
287
288
289
290
    ) -> None:
        if max_num_batched_tokens is not None:
            self.max_num_batched_tokens = max_num_batched_tokens
        else:
            # If max_model_len is too short, use 2048 as the default value for
            # higher throughput.
            self.max_num_batched_tokens = max(max_model_len, 2048)
291
        self.max_num_seqs = max_num_seqs
Lily Liu's avatar
Lily Liu committed
292
        self.max_model_len = max_model_len
293
        self.max_paddings = max_paddings
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
        self._verify_args()

    def _verify_args(self) -> None:
        if self.max_num_batched_tokens < self.max_model_len:
            raise ValueError(
                f"max_num_batched_tokens ({self.max_num_batched_tokens}) is "
                f"smaller than max_model_len ({self.max_model_len}). "
                "This effectively limits the maximum sequence length to "
                "max_num_batched_tokens and makes vLLM reject longer "
                "sequences. Please increase max_num_batched_tokens or "
                "decrease max_model_len.")
        if self.max_num_batched_tokens < self.max_num_seqs:
            raise ValueError(
                f"max_num_batched_tokens ({self.max_num_batched_tokens}) must "
                "be greater than or equal to max_num_seqs "
                f"({self.max_num_seqs}).")
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331


_STR_DTYPE_TO_TORCH_DTYPE = {
    "half": torch.float16,
    "float16": torch.float16,
    "float": torch.float32,
    "float32": torch.float32,
    "bfloat16": torch.bfloat16,
}


def _get_and_verify_dtype(
    config: PretrainedConfig,
    dtype: str,
) -> torch.dtype:
    # NOTE: getattr(config, "torch_dtype", torch.float32) is not correct
    # because config.torch_dtype can be None.
    config_dtype = getattr(config, "torch_dtype", None)
    if config_dtype is None:
        config_dtype = torch.float32

    dtype = dtype.lower()
Woosuk Kwon's avatar
Woosuk Kwon committed
332
    if dtype == "auto":
333
334
335
336
337
338
        if config_dtype == torch.float32:
            # Following the common practice, we use float16 for float32 models.
            torch_dtype = torch.float16
        else:
            torch_dtype = config_dtype
    else:
339
340
        if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
            raise ValueError(f"Unknown dtype: {dtype}")
341
342
343
344
345
346
347
348
349
350
351
        torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]

    # Verify the dtype.
    if torch_dtype != config_dtype:
        if torch_dtype == torch.float32:
            # Upcasting to float32 is allowed.
            pass
        elif config_dtype == torch.float32:
            # Downcasting from float32 to float16 or bfloat16 is allowed.
            pass
        else:
Woosuk Kwon's avatar
Woosuk Kwon committed
352
            # Casting between float16 and bfloat16 is allowed with a warning.
353
            logger.warning(f"Casting {config_dtype} to {torch_dtype}.")
354
355

    return torch_dtype
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379


def _get_and_verify_max_len(
    hf_config: PretrainedConfig,
    max_model_len: Optional[int],
) -> int:
    """Get and verify the model's maximum length."""
    derived_max_model_len = float("inf")
    possible_keys = [
        # OPT
        "max_position_embeddings",
        # GPT-2
        "n_positions",
        # MPT
        "max_seq_len",
        # Others
        "max_sequence_length",
        "max_seq_length",
        "seq_len",
    ]
    for key in possible_keys:
        max_len_key = getattr(hf_config, key, None)
        if max_len_key is not None:
            derived_max_model_len = min(derived_max_model_len, max_len_key)
380
    if derived_max_model_len == float("inf"):
381
382
383
384
385
386
387
388
389
390
391
        if max_model_len is not None:
            # If max_model_len is specified, we use it.
            return max_model_len

        default_max_len = 2048
        logger.warning(
            "The model's config.json does not contain any of the following "
            "keys to determine the original maximum length of the model: "
            f"{possible_keys}. Assuming the model's maximum length is "
            f"{default_max_len}.")
        derived_max_model_len = default_max_len
392

393
394
395
396
    rope_scaling = getattr(hf_config, "rope_scaling", None)
    if rope_scaling is not None:
        assert "factor" in rope_scaling
        scaling_factor = rope_scaling["factor"]
Antoni Baum's avatar
Antoni Baum committed
397
398
399
        if rope_scaling["type"] == "yarn":
            derived_max_model_len = rope_scaling[
                "original_max_position_embeddings"]
400
401
        derived_max_model_len *= scaling_factor

402
403
404
405
406
407
408
409
410
    if max_model_len is None:
        max_model_len = derived_max_model_len
    elif max_model_len > derived_max_model_len:
        raise ValueError(
            f"User-specified max_model_len ({max_model_len}) is greater than "
            f"the derived max_model_len ({max_len_key}={derived_max_model_len}"
            " in model's config.json). This may lead to incorrect model "
            "outputs or CUDA errors. Make sure the value is correct and "
            "within the model context size.")
411
    return int(max_model_len)