arg_utils.py 15.6 KB
Newer Older
1
import argparse
2
3
4
import dataclasses
from dataclasses import dataclass
from typing import Optional, Tuple
5

6
7
from vllm.config import (CacheConfig, DeviceConfig, ModelConfig,
                         ParallelConfig, SchedulerConfig, LoRAConfig)
8
9


10
@dataclass
Zhuohan Li's avatar
Zhuohan Li committed
11
class EngineArgs:
Woosuk Kwon's avatar
Woosuk Kwon committed
12
    """Arguments for vLLM engine."""
13
    model: str
14
    tokenizer: Optional[str] = None
15
    tokenizer_mode: str = 'auto'
16
    trust_remote_code: bool = False
17
    download_dir: Optional[str] = None
18
    load_format: str = 'auto'
19
    dtype: str = 'auto'
20
    kv_cache_dtype: str = 'auto'
21
    seed: int = 0
22
    max_model_len: Optional[int] = None
23
    worker_use_ray: bool = False
24
25
    pipeline_parallel_size: int = 1
    tensor_parallel_size: int = 1
26
    max_parallel_loading_workers: Optional[int] = None
27
28
    block_size: int = 16
    swap_space: int = 4  # GiB
29
    gpu_memory_utilization: float = 0.90
30
    max_num_batched_tokens: Optional[int] = None
31
    max_num_seqs: int = 256
32
    max_paddings: int = 256
33
    disable_log_stats: bool = False
Jasmond L's avatar
Jasmond L committed
34
    revision: Optional[str] = None
35
    code_revision: Optional[str] = None
36
    tokenizer_revision: Optional[str] = None
37
    quantization: Optional[str] = None
38
39
    enforce_eager: bool = False
    max_context_len_to_capture: int = 8192
40
    disable_custom_all_reduce: bool = False
41
42
43
44
45
46
    enable_lora: bool = False
    max_loras: int = 1
    max_lora_rank: int = 16
    lora_extra_vocab_size: int = 256
    lora_dtype = 'auto'
    max_cpu_loras: Optional[int] = None
47
    device: str = 'auto'
48

49
    def __post_init__(self):
50
51
        if self.tokenizer is None:
            self.tokenizer = self.model
52
53
54

    @staticmethod
    def add_cli_args(
55
            parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
Woosuk Kwon's avatar
Woosuk Kwon committed
56
        """Shared CLI arguments for vLLM engine."""
57
58
59
60

        # NOTE: If you update any of the arguments below, please also
        # make sure to update docs/source/models/engine_args.rst

61
        # Model arguments
62
63
64
65
66
67
68
69
70
71
        parser.add_argument(
            '--model',
            type=str,
            default='facebook/opt-125m',
            help='name or path of the huggingface model to use')
        parser.add_argument(
            '--tokenizer',
            type=str,
            default=EngineArgs.tokenizer,
            help='name or path of the huggingface tokenizer to use')
Jasmond L's avatar
Jasmond L committed
72
73
74
75
76
77
78
        parser.add_argument(
            '--revision',
            type=str,
            default=None,
            help='the specific model version to use. It can be a branch '
            'name, a tag name, or a commit id. If unspecified, will use '
            'the default version.')
79
80
81
82
83
84
85
        parser.add_argument(
            '--code-revision',
            type=str,
            default=None,
            help='the specific revision to use for the model code on '
            'Hugging Face Hub. It can be a branch name, a tag name, or a '
            'commit id. If unspecified, will use the default version.')
86
87
88
89
90
91
92
        parser.add_argument(
            '--tokenizer-revision',
            type=str,
            default=None,
            help='the specific tokenizer version to use. It can be a branch '
            'name, a tag name, or a commit id. If unspecified, will use '
            'the default version.')
93
94
        parser.add_argument('--tokenizer-mode',
                            type=str,
95
96
97
                            default=EngineArgs.tokenizer_mode,
                            choices=['auto', 'slow'],
                            help='tokenizer mode. "auto" will use the fast '
98
99
                            'tokenizer if available, and "slow" will '
                            'always use the slow tokenizer.')
100
101
102
        parser.add_argument('--trust-remote-code',
                            action='store_true',
                            help='trust remote code from huggingface')
103
104
        parser.add_argument('--download-dir',
                            type=str,
Zhuohan Li's avatar
Zhuohan Li committed
105
                            default=EngineArgs.download_dir,
106
                            help='directory to download and load the weights, '
107
108
                            'default to the default cache dir of '
                            'huggingface')
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
        parser.add_argument(
            '--load-format',
            type=str,
            default=EngineArgs.load_format,
            choices=['auto', 'pt', 'safetensors', 'npcache', 'dummy'],
            help='The format of the model weights to load. '
            '"auto" will try to load the weights in the safetensors format '
            'and fall back to the pytorch bin format if safetensors format '
            'is not available. '
            '"pt" will load the weights in the pytorch bin format. '
            '"safetensors" will load the weights in the safetensors format. '
            '"npcache" will load the weights in pytorch format and store '
            'a numpy cache to speed up the loading. '
            '"dummy" will initialize the weights with random values, '
            'which is mainly for profiling.')
124
125
126
127
        parser.add_argument(
            '--dtype',
            type=str,
            default=EngineArgs.dtype,
Woosuk Kwon's avatar
Woosuk Kwon committed
128
129
130
            choices=[
                'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
            ],
131
132
133
134
            help='data type for model weights and activations. '
            'The "auto" option will use FP16 precision '
            'for FP32 and FP16 models, and BF16 precision '
            'for BF16 models.')
135
136
137
138
        parser.add_argument(
            '--kv-cache-dtype',
            type=str,
            choices=['auto', 'fp8_e5m2'],
139
            default=EngineArgs.kv_cache_dtype,
140
141
142
            help='Data type for kv cache storage. If "auto", will use model '
            'data type. Note FP8 is not supported when cuda version is '
            'lower than 11.8.')
143
144
        parser.add_argument('--max-model-len',
                            type=int,
145
                            default=EngineArgs.max_model_len,
146
147
                            help='model context length. If unspecified, '
                            'will be automatically derived from the model.')
148
        # Parallel arguments
149
150
        parser.add_argument('--worker-use-ray',
                            action='store_true',
151
                            help='use Ray for distributed serving, will be '
152
153
154
155
                            'automatically set when using more than 1 GPU')
        parser.add_argument('--pipeline-parallel-size',
                            '-pp',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
156
                            default=EngineArgs.pipeline_parallel_size,
157
                            help='number of pipeline stages')
158
159
160
        parser.add_argument('--tensor-parallel-size',
                            '-tp',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
161
                            default=EngineArgs.tensor_parallel_size,
162
                            help='number of tensor parallel replicas')
163
164
165
        parser.add_argument(
            '--max-parallel-loading-workers',
            type=int,
166
            default=EngineArgs.max_parallel_loading_workers,
167
168
169
            help='load model sequentially in multiple batches, '
            'to avoid RAM OOM when using tensor '
            'parallel and large models')
170
        # KV cache arguments
171
172
        parser.add_argument('--block-size',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
173
                            default=EngineArgs.block_size,
174
                            choices=[8, 16, 32, 128],
175
                            help='token block size')
176
177
178
        parser.add_argument('--seed',
                            type=int,
                            default=EngineArgs.seed,
179
                            help='random seed')
180
181
        parser.add_argument('--swap-space',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
182
                            default=EngineArgs.swap_space,
183
                            help='CPU swap space size (GiB) per GPU')
184
185
186
187
188
189
190
        parser.add_argument(
            '--gpu-memory-utilization',
            type=float,
            default=EngineArgs.gpu_memory_utilization,
            help='the fraction of GPU memory to be used for '
            'the model executor, which can range from 0 to 1.'
            'If unspecified, will use the default value of 0.9.')
191
192
        parser.add_argument('--max-num-batched-tokens',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
193
                            default=EngineArgs.max_num_batched_tokens,
194
                            help='maximum number of batched tokens per '
195
196
197
                            'iteration')
        parser.add_argument('--max-num-seqs',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
198
                            default=EngineArgs.max_num_seqs,
199
                            help='maximum number of sequences per iteration')
200
201
202
203
        parser.add_argument('--max-paddings',
                            type=int,
                            default=EngineArgs.max_paddings,
                            help='maximum number of paddings in a batch')
204
205
        parser.add_argument('--disable-log-stats',
                            action='store_true',
206
                            help='disable logging statistics')
207
208
209
210
        # Quantization settings.
        parser.add_argument('--quantization',
                            '-q',
                            type=str,
CHU Tianxiang's avatar
CHU Tianxiang committed
211
                            choices=['awq', 'gptq', 'squeezellm', None],
212
                            default=EngineArgs.quantization,
213
214
215
216
217
218
                            help='Method used to quantize the weights. If '
                            'None, we first check the `quantization_config` '
                            'attribute in the model config file. If that is '
                            'None, we assume the model weights are not '
                            'quantized and use `dtype` to determine the data '
                            'type of the weights.')
219
220
221
222
223
224
225
226
227
228
229
        parser.add_argument('--enforce-eager',
                            action='store_true',
                            help='Always use eager-mode PyTorch. If False, '
                            'will use eager mode and CUDA graph in hybrid '
                            'for maximal performance and flexibility.')
        parser.add_argument('--max-context-len-to-capture',
                            type=int,
                            default=EngineArgs.max_context_len_to_capture,
                            help='maximum context length covered by CUDA '
                            'graphs. When a sequence has context length '
                            'larger than this, we fall back to eager mode.')
230
231
232
233
        parser.add_argument('--disable-custom-all-reduce',
                            action='store_true',
                            default=EngineArgs.disable_custom_all_reduce,
                            help='See ParallelConfig')
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
        # LoRA related configs
        parser.add_argument('--enable-lora',
                            action='store_true',
                            help='If True, enable handling of LoRA adapters.')
        parser.add_argument('--max-loras',
                            type=int,
                            default=EngineArgs.max_loras,
                            help='Max number of LoRAs in a single batch.')
        parser.add_argument('--max-lora-rank',
                            type=int,
                            default=EngineArgs.max_lora_rank,
                            help='Max LoRA rank.')
        parser.add_argument(
            '--lora-extra-vocab-size',
            type=int,
            default=EngineArgs.lora_extra_vocab_size,
            help=('Maximum size of extra vocabulary that can be '
                  'present in a LoRA adapter (added to the base '
                  'model vocabulary).'))
        parser.add_argument(
            '--lora-dtype',
            type=str,
            default=EngineArgs.lora_dtype,
            choices=['auto', 'float16', 'bfloat16', 'float32'],
            help=('Data type for LoRA. If auto, will default to '
                  'base model dtype.'))
        parser.add_argument(
            '--max-cpu-loras',
            type=int,
            default=EngineArgs.max_cpu_loras,
            help=('Maximum number of LoRAs to store in CPU memory. '
                  'Must be >= than max_num_seqs. '
                  'Defaults to max_num_seqs.'))
267
268
269
270
271
        parser.add_argument("--device",
                            type=str,
                            default=EngineArgs.device,
                            choices=["auto", "cuda", "neuron"],
                            help='Device type for vLLM execution.')
272
        return parser
273
274

    @classmethod
275
    def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs':
276
277
278
        # Get the list of attributes of this dataclass.
        attrs = [attr.name for attr in dataclasses.fields(cls)]
        # Set the attributes from the parsed arguments.
Zhuohan Li's avatar
Zhuohan Li committed
279
280
        engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
        return engine_args
281

Zhuohan Li's avatar
Zhuohan Li committed
282
    def create_engine_configs(
283
        self,
284
    ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig,
285
286
               DeviceConfig, Optional[LoRAConfig]]:
        device_config = DeviceConfig(self.device)
287
288
289
290
291
292
        model_config = ModelConfig(
            self.model, self.tokenizer, self.tokenizer_mode,
            self.trust_remote_code, self.download_dir, self.load_format,
            self.dtype, self.seed, self.revision, self.code_revision,
            self.tokenizer_revision, self.max_model_len, self.quantization,
            self.enforce_eager, self.max_context_len_to_capture)
293
294
        cache_config = CacheConfig(self.block_size,
                                   self.gpu_memory_utilization,
295
                                   self.swap_space, self.kv_cache_dtype,
296
                                   model_config.get_sliding_window())
297
298
        parallel_config = ParallelConfig(self.pipeline_parallel_size,
                                         self.tensor_parallel_size,
299
                                         self.worker_use_ray,
300
301
                                         self.max_parallel_loading_workers,
                                         self.disable_custom_all_reduce)
302
        scheduler_config = SchedulerConfig(self.max_num_batched_tokens,
303
                                           self.max_num_seqs,
304
305
                                           model_config.max_model_len,
                                           self.max_paddings)
306
307
308
309
310
311
312
        lora_config = LoRAConfig(
            max_lora_rank=self.max_lora_rank,
            max_loras=self.max_loras,
            lora_extra_vocab_size=self.lora_extra_vocab_size,
            lora_dtype=self.lora_dtype,
            max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
            and self.max_cpu_loras > 0 else None) if self.enable_lora else None
313
314
        return (model_config, cache_config, parallel_config, scheduler_config,
                device_config, lora_config)
315
316


317
@dataclass
Zhuohan Li's avatar
Zhuohan Li committed
318
class AsyncEngineArgs(EngineArgs):
Woosuk Kwon's avatar
Woosuk Kwon committed
319
    """Arguments for asynchronous vLLM engine."""
Zhuohan Li's avatar
Zhuohan Li committed
320
    engine_use_ray: bool = False
321
    disable_log_requests: bool = False
322
    max_log_len: Optional[int] = None
323
324
325

    @staticmethod
    def add_cli_args(
326
            parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
Zhuohan Li's avatar
Zhuohan Li committed
327
        parser = EngineArgs.add_cli_args(parser)
328
329
        parser.add_argument('--engine-use-ray',
                            action='store_true',
Zhuohan Li's avatar
Zhuohan Li committed
330
                            help='use Ray to start the LLM engine in a '
331
332
333
                            'separate process as the server process.')
        parser.add_argument('--disable-log-requests',
                            action='store_true',
334
                            help='disable logging requests')
335
336
337
338
339
340
        parser.add_argument('--max-log-len',
                            type=int,
                            default=None,
                            help='max number of prompt characters or prompt '
                            'ID numbers being printed in log. '
                            'Default: unlimited.')
341
        return parser