arg_utils.py 15.7 KB
Newer Older
1
import argparse
2
3
4
import dataclasses
from dataclasses import dataclass
from typing import Optional, Tuple
5

6
7
from vllm.config import (CacheConfig, DeviceConfig, ModelConfig,
                         ParallelConfig, SchedulerConfig, LoRAConfig)
8
9


10
@dataclass
Zhuohan Li's avatar
Zhuohan Li committed
11
class EngineArgs:
Woosuk Kwon's avatar
Woosuk Kwon committed
12
    """Arguments for vLLM engine."""
13
    model: str
14
    tokenizer: Optional[str] = None
15
    tokenizer_mode: str = 'auto'
16
    trust_remote_code: bool = False
17
    download_dir: Optional[str] = None
18
    load_format: str = 'auto'
19
    dtype: str = 'auto'
20
    kv_cache_dtype: str = 'auto'
21
    seed: int = 0
22
    max_model_len: Optional[int] = None
23
    worker_use_ray: bool = False
24
25
    pipeline_parallel_size: int = 1
    tensor_parallel_size: int = 1
26
    max_parallel_loading_workers: Optional[int] = None
27
28
    block_size: int = 16
    swap_space: int = 4  # GiB
29
    gpu_memory_utilization: float = 0.90
30
    max_num_batched_tokens: Optional[int] = None
31
    max_num_seqs: int = 256
32
    max_paddings: int = 256
33
    disable_log_stats: bool = False
Jasmond L's avatar
Jasmond L committed
34
    revision: Optional[str] = None
35
    code_revision: Optional[str] = None
36
    tokenizer_revision: Optional[str] = None
37
    quantization: Optional[str] = None
38
39
    enforce_eager: bool = False
    max_context_len_to_capture: int = 8192
40
    disable_custom_all_reduce: bool = False
41
42
43
44
45
46
    enable_lora: bool = False
    max_loras: int = 1
    max_lora_rank: int = 16
    lora_extra_vocab_size: int = 256
    lora_dtype = 'auto'
    max_cpu_loras: Optional[int] = None
47
    device: str = 'cuda'
48

49
    def __post_init__(self):
50
51
        if self.tokenizer is None:
            self.tokenizer = self.model
52
53
54

    @staticmethod
    def add_cli_args(
55
            parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
Woosuk Kwon's avatar
Woosuk Kwon committed
56
        """Shared CLI arguments for vLLM engine."""
57
58
59
60

        # NOTE: If you update any of the arguments below, please also
        # make sure to update docs/source/models/engine_args.rst

61
        # Model arguments
62
63
64
65
66
67
68
69
70
71
        parser.add_argument(
            '--model',
            type=str,
            default='facebook/opt-125m',
            help='name or path of the huggingface model to use')
        parser.add_argument(
            '--tokenizer',
            type=str,
            default=EngineArgs.tokenizer,
            help='name or path of the huggingface tokenizer to use')
Jasmond L's avatar
Jasmond L committed
72
73
74
75
76
77
78
        parser.add_argument(
            '--revision',
            type=str,
            default=None,
            help='the specific model version to use. It can be a branch '
            'name, a tag name, or a commit id. If unspecified, will use '
            'the default version.')
79
80
81
82
83
84
85
        parser.add_argument(
            '--code-revision',
            type=str,
            default=None,
            help='the specific revision to use for the model code on '
            'Hugging Face Hub. It can be a branch name, a tag name, or a '
            'commit id. If unspecified, will use the default version.')
86
87
88
89
90
91
92
        parser.add_argument(
            '--tokenizer-revision',
            type=str,
            default=None,
            help='the specific tokenizer version to use. It can be a branch '
            'name, a tag name, or a commit id. If unspecified, will use '
            'the default version.')
93
94
        parser.add_argument('--tokenizer-mode',
                            type=str,
95
96
97
                            default=EngineArgs.tokenizer_mode,
                            choices=['auto', 'slow'],
                            help='tokenizer mode. "auto" will use the fast '
98
99
                            'tokenizer if available, and "slow" will '
                            'always use the slow tokenizer.')
100
101
102
        parser.add_argument('--trust-remote-code',
                            action='store_true',
                            help='trust remote code from huggingface')
103
104
        parser.add_argument('--download-dir',
                            type=str,
Zhuohan Li's avatar
Zhuohan Li committed
105
                            default=EngineArgs.download_dir,
106
                            help='directory to download and load the weights, '
107
108
                            'default to the default cache dir of '
                            'huggingface')
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
        parser.add_argument(
            '--load-format',
            type=str,
            default=EngineArgs.load_format,
            choices=['auto', 'pt', 'safetensors', 'npcache', 'dummy'],
            help='The format of the model weights to load. '
            '"auto" will try to load the weights in the safetensors format '
            'and fall back to the pytorch bin format if safetensors format '
            'is not available. '
            '"pt" will load the weights in the pytorch bin format. '
            '"safetensors" will load the weights in the safetensors format. '
            '"npcache" will load the weights in pytorch format and store '
            'a numpy cache to speed up the loading. '
            '"dummy" will initialize the weights with random values, '
            'which is mainly for profiling.')
124
125
126
127
        parser.add_argument(
            '--dtype',
            type=str,
            default=EngineArgs.dtype,
Woosuk Kwon's avatar
Woosuk Kwon committed
128
129
130
            choices=[
                'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
            ],
131
132
133
134
            help='data type for model weights and activations. '
            'The "auto" option will use FP16 precision '
            'for FP32 and FP16 models, and BF16 precision '
            'for BF16 models.')
135
136
137
138
        parser.add_argument(
            '--kv-cache-dtype',
            type=str,
            choices=['auto', 'fp8_e5m2'],
139
            default=EngineArgs.kv_cache_dtype,
140
141
142
            help='Data type for kv cache storage. If "auto", will use model '
            'data type. Note FP8 is not supported when cuda version is '
            'lower than 11.8.')
143
144
        parser.add_argument('--max-model-len',
                            type=int,
145
                            default=EngineArgs.max_model_len,
146
147
                            help='model context length. If unspecified, '
                            'will be automatically derived from the model.')
148
        # Parallel arguments
149
150
        parser.add_argument('--worker-use-ray',
                            action='store_true',
151
                            help='use Ray for distributed serving, will be '
152
153
154
155
                            'automatically set when using more than 1 GPU')
        parser.add_argument('--pipeline-parallel-size',
                            '-pp',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
156
                            default=EngineArgs.pipeline_parallel_size,
157
                            help='number of pipeline stages')
158
159
160
        parser.add_argument('--tensor-parallel-size',
                            '-tp',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
161
                            default=EngineArgs.tensor_parallel_size,
162
                            help='number of tensor parallel replicas')
163
164
165
        parser.add_argument(
            '--max-parallel-loading-workers',
            type=int,
166
            default=EngineArgs.max_parallel_loading_workers,
167
168
169
            help='load model sequentially in multiple batches, '
            'to avoid RAM OOM when using tensor '
            'parallel and large models')
170
        # KV cache arguments
171
172
        parser.add_argument('--block-size',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
173
                            default=EngineArgs.block_size,
Woosuk Kwon's avatar
Woosuk Kwon committed
174
                            choices=[8, 16, 32],
175
176
                            help='token block size')
        # TODO(woosuk): Support fine-grained seeds (e.g., seed per request).
177
178
179
        parser.add_argument('--seed',
                            type=int,
                            default=EngineArgs.seed,
180
                            help='random seed')
181
182
        parser.add_argument('--swap-space',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
183
                            default=EngineArgs.swap_space,
184
                            help='CPU swap space size (GiB) per GPU')
185
186
187
188
189
190
191
        parser.add_argument(
            '--gpu-memory-utilization',
            type=float,
            default=EngineArgs.gpu_memory_utilization,
            help='the fraction of GPU memory to be used for '
            'the model executor, which can range from 0 to 1.'
            'If unspecified, will use the default value of 0.9.')
192
193
        parser.add_argument('--max-num-batched-tokens',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
194
                            default=EngineArgs.max_num_batched_tokens,
195
                            help='maximum number of batched tokens per '
196
197
198
                            'iteration')
        parser.add_argument('--max-num-seqs',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
199
                            default=EngineArgs.max_num_seqs,
200
                            help='maximum number of sequences per iteration')
201
202
203
204
        parser.add_argument('--max-paddings',
                            type=int,
                            default=EngineArgs.max_paddings,
                            help='maximum number of paddings in a batch')
205
206
        parser.add_argument('--disable-log-stats',
                            action='store_true',
207
                            help='disable logging statistics')
208
209
210
211
        # Quantization settings.
        parser.add_argument('--quantization',
                            '-q',
                            type=str,
CHU Tianxiang's avatar
CHU Tianxiang committed
212
                            choices=['awq', 'gptq', 'squeezellm', None],
213
                            default=EngineArgs.quantization,
214
215
216
217
218
219
                            help='Method used to quantize the weights. If '
                            'None, we first check the `quantization_config` '
                            'attribute in the model config file. If that is '
                            'None, we assume the model weights are not '
                            'quantized and use `dtype` to determine the data '
                            'type of the weights.')
220
221
222
223
224
225
226
227
228
229
230
        parser.add_argument('--enforce-eager',
                            action='store_true',
                            help='Always use eager-mode PyTorch. If False, '
                            'will use eager mode and CUDA graph in hybrid '
                            'for maximal performance and flexibility.')
        parser.add_argument('--max-context-len-to-capture',
                            type=int,
                            default=EngineArgs.max_context_len_to_capture,
                            help='maximum context length covered by CUDA '
                            'graphs. When a sequence has context length '
                            'larger than this, we fall back to eager mode.')
231
232
233
234
        parser.add_argument('--disable-custom-all-reduce',
                            action='store_true',
                            default=EngineArgs.disable_custom_all_reduce,
                            help='See ParallelConfig')
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
        # LoRA related configs
        parser.add_argument('--enable-lora',
                            action='store_true',
                            help='If True, enable handling of LoRA adapters.')
        parser.add_argument('--max-loras',
                            type=int,
                            default=EngineArgs.max_loras,
                            help='Max number of LoRAs in a single batch.')
        parser.add_argument('--max-lora-rank',
                            type=int,
                            default=EngineArgs.max_lora_rank,
                            help='Max LoRA rank.')
        parser.add_argument(
            '--lora-extra-vocab-size',
            type=int,
            default=EngineArgs.lora_extra_vocab_size,
            help=('Maximum size of extra vocabulary that can be '
                  'present in a LoRA adapter (added to the base '
                  'model vocabulary).'))
        parser.add_argument(
            '--lora-dtype',
            type=str,
            default=EngineArgs.lora_dtype,
            choices=['auto', 'float16', 'bfloat16', 'float32'],
            help=('Data type for LoRA. If auto, will default to '
                  'base model dtype.'))
        parser.add_argument(
            '--max-cpu-loras',
            type=int,
            default=EngineArgs.max_cpu_loras,
            help=('Maximum number of LoRAs to store in CPU memory. '
                  'Must be >= than max_num_seqs. '
                  'Defaults to max_num_seqs.'))
268
269
270
271
272
273
274
        parser.add_argument(
            "--device",
            type=str,
            default=EngineArgs.device,
            choices=["cuda"],
            help=('Device type for vLLM execution. '
                  'Currently, only CUDA-compatible devices are supported.'))
275
        return parser
276
277

    @classmethod
278
    def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs':
279
280
281
        # Get the list of attributes of this dataclass.
        attrs = [attr.name for attr in dataclasses.fields(cls)]
        # Set the attributes from the parsed arguments.
Zhuohan Li's avatar
Zhuohan Li committed
282
283
        engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
        return engine_args
284

Zhuohan Li's avatar
Zhuohan Li committed
285
    def create_engine_configs(
286
        self,
287
    ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig,
288
289
               DeviceConfig, Optional[LoRAConfig]]:
        device_config = DeviceConfig(self.device)
290
291
292
293
294
295
        model_config = ModelConfig(
            self.model, self.tokenizer, self.tokenizer_mode,
            self.trust_remote_code, self.download_dir, self.load_format,
            self.dtype, self.seed, self.revision, self.code_revision,
            self.tokenizer_revision, self.max_model_len, self.quantization,
            self.enforce_eager, self.max_context_len_to_capture)
296
297
        cache_config = CacheConfig(self.block_size,
                                   self.gpu_memory_utilization,
298
                                   self.swap_space, self.kv_cache_dtype,
299
                                   model_config.get_sliding_window())
300
301
        parallel_config = ParallelConfig(self.pipeline_parallel_size,
                                         self.tensor_parallel_size,
302
                                         self.worker_use_ray,
303
304
                                         self.max_parallel_loading_workers,
                                         self.disable_custom_all_reduce)
305
        scheduler_config = SchedulerConfig(self.max_num_batched_tokens,
306
                                           self.max_num_seqs,
307
308
                                           model_config.max_model_len,
                                           self.max_paddings)
309
310
311
312
313
314
315
        lora_config = LoRAConfig(
            max_lora_rank=self.max_lora_rank,
            max_loras=self.max_loras,
            lora_extra_vocab_size=self.lora_extra_vocab_size,
            lora_dtype=self.lora_dtype,
            max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
            and self.max_cpu_loras > 0 else None) if self.enable_lora else None
316
317
        return (model_config, cache_config, parallel_config, scheduler_config,
                device_config, lora_config)
318
319


320
@dataclass
Zhuohan Li's avatar
Zhuohan Li committed
321
class AsyncEngineArgs(EngineArgs):
Woosuk Kwon's avatar
Woosuk Kwon committed
322
    """Arguments for asynchronous vLLM engine."""
Zhuohan Li's avatar
Zhuohan Li committed
323
    engine_use_ray: bool = False
324
    disable_log_requests: bool = False
325
    max_log_len: Optional[int] = None
326
327
328

    @staticmethod
    def add_cli_args(
329
            parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
Zhuohan Li's avatar
Zhuohan Li committed
330
        parser = EngineArgs.add_cli_args(parser)
331
332
        parser.add_argument('--engine-use-ray',
                            action='store_true',
Zhuohan Li's avatar
Zhuohan Li committed
333
                            help='use Ray to start the LLM engine in a '
334
335
336
                            'separate process as the server process.')
        parser.add_argument('--disable-log-requests',
                            action='store_true',
337
                            help='disable logging requests')
338
339
340
341
342
343
        parser.add_argument('--max-log-len',
                            type=int,
                            default=None,
                            help='max number of prompt characters or prompt '
                            'ID numbers being printed in log. '
                            'Default: unlimited.')
344
        return parser