arg_utils.py 15.5 KB
Newer Older
1
import argparse
2
3
4
import dataclasses
from dataclasses import dataclass
from typing import Optional, Tuple
5

6
7
from vllm.config import (CacheConfig, DeviceConfig, ModelConfig,
                         ParallelConfig, SchedulerConfig, LoRAConfig)
8
9


10
@dataclass
Zhuohan Li's avatar
Zhuohan Li committed
11
class EngineArgs:
Woosuk Kwon's avatar
Woosuk Kwon committed
12
    """Arguments for vLLM engine."""
13
    model: str
14
    tokenizer: Optional[str] = None
15
    tokenizer_mode: str = 'auto'
16
    trust_remote_code: bool = False
17
    download_dir: Optional[str] = None
18
    load_format: str = 'auto'
19
    dtype: str = 'auto'
20
    kv_cache_dtype: str = 'auto'
21
    seed: int = 0
22
    max_model_len: Optional[int] = None
23
    worker_use_ray: bool = False
24
25
    pipeline_parallel_size: int = 1
    tensor_parallel_size: int = 1
26
    max_parallel_loading_workers: Optional[int] = None
27
28
    block_size: int = 16
    swap_space: int = 4  # GiB
29
    gpu_memory_utilization: float = 0.90
30
    max_num_batched_tokens: Optional[int] = None
31
    max_num_seqs: int = 256
32
    max_paddings: int = 256
33
    disable_log_stats: bool = False
Jasmond L's avatar
Jasmond L committed
34
    revision: Optional[str] = None
35
    tokenizer_revision: Optional[str] = None
36
    quantization: Optional[str] = None
37
38
    enforce_eager: bool = False
    max_context_len_to_capture: int = 8192
39
    disable_custom_all_reduce: bool = False
40
41
42
43
44
45
    enable_lora: bool = False
    max_loras: int = 1
    max_lora_rank: int = 16
    lora_extra_vocab_size: int = 256
    lora_dtype = 'auto'
    max_cpu_loras: Optional[int] = None
46
    device: str = 'cuda'
47

48
    def __post_init__(self):
49
50
        if self.tokenizer is None:
            self.tokenizer = self.model
51
52
53

    @staticmethod
    def add_cli_args(
54
            parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
Woosuk Kwon's avatar
Woosuk Kwon committed
55
        """Shared CLI arguments for vLLM engine."""
56
57
58
59

        # NOTE: If you update any of the arguments below, please also
        # make sure to update docs/source/models/engine_args.rst

60
        # Model arguments
61
62
63
64
65
66
67
68
69
70
        parser.add_argument(
            '--model',
            type=str,
            default='facebook/opt-125m',
            help='name or path of the huggingface model to use')
        parser.add_argument(
            '--tokenizer',
            type=str,
            default=EngineArgs.tokenizer,
            help='name or path of the huggingface tokenizer to use')
Jasmond L's avatar
Jasmond L committed
71
72
73
74
75
76
77
        parser.add_argument(
            '--revision',
            type=str,
            default=None,
            help='the specific model version to use. It can be a branch '
            'name, a tag name, or a commit id. If unspecified, will use '
            'the default version.')
78
79
80
81
82
83
84
        parser.add_argument(
            '--tokenizer-revision',
            type=str,
            default=None,
            help='the specific tokenizer version to use. It can be a branch '
            'name, a tag name, or a commit id. If unspecified, will use '
            'the default version.')
85
86
        parser.add_argument('--tokenizer-mode',
                            type=str,
87
88
89
                            default=EngineArgs.tokenizer_mode,
                            choices=['auto', 'slow'],
                            help='tokenizer mode. "auto" will use the fast '
90
91
                            'tokenizer if available, and "slow" will '
                            'always use the slow tokenizer.')
92
93
94
        parser.add_argument('--trust-remote-code',
                            action='store_true',
                            help='trust remote code from huggingface')
95
96
        parser.add_argument('--download-dir',
                            type=str,
Zhuohan Li's avatar
Zhuohan Li committed
97
                            default=EngineArgs.download_dir,
98
                            help='directory to download and load the weights, '
99
100
                            'default to the default cache dir of '
                            'huggingface')
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
        parser.add_argument(
            '--load-format',
            type=str,
            default=EngineArgs.load_format,
            choices=['auto', 'pt', 'safetensors', 'npcache', 'dummy'],
            help='The format of the model weights to load. '
            '"auto" will try to load the weights in the safetensors format '
            'and fall back to the pytorch bin format if safetensors format '
            'is not available. '
            '"pt" will load the weights in the pytorch bin format. '
            '"safetensors" will load the weights in the safetensors format. '
            '"npcache" will load the weights in pytorch format and store '
            'a numpy cache to speed up the loading. '
            '"dummy" will initialize the weights with random values, '
            'which is mainly for profiling.')
116
117
118
119
        parser.add_argument(
            '--dtype',
            type=str,
            default=EngineArgs.dtype,
Woosuk Kwon's avatar
Woosuk Kwon committed
120
121
122
            choices=[
                'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
            ],
123
124
125
126
            help='data type for model weights and activations. '
            'The "auto" option will use FP16 precision '
            'for FP32 and FP16 models, and BF16 precision '
            'for BF16 models.')
127
128
129
130
        parser.add_argument(
            '--kv-cache-dtype',
            type=str,
            choices=['auto', 'fp8_e5m2'],
131
            default=EngineArgs.kv_cache_dtype,
132
133
134
            help='Data type for kv cache storage. If "auto", will use model '
            'data type. Note FP8 is not supported when cuda version is '
            'lower than 11.8.')
135
136
        parser.add_argument('--max-model-len',
                            type=int,
137
                            default=EngineArgs.max_model_len,
138
139
                            help='model context length. If unspecified, '
                            'will be automatically derived from the model.')
140
        # Parallel arguments
141
142
        parser.add_argument('--worker-use-ray',
                            action='store_true',
143
                            help='use Ray for distributed serving, will be '
144
145
146
147
                            'automatically set when using more than 1 GPU')
        parser.add_argument('--pipeline-parallel-size',
                            '-pp',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
148
                            default=EngineArgs.pipeline_parallel_size,
149
                            help='number of pipeline stages')
150
151
152
        parser.add_argument('--tensor-parallel-size',
                            '-tp',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
153
                            default=EngineArgs.tensor_parallel_size,
154
                            help='number of tensor parallel replicas')
155
156
157
        parser.add_argument(
            '--max-parallel-loading-workers',
            type=int,
158
            default=EngineArgs.max_parallel_loading_workers,
159
160
161
            help='load model sequentially in multiple batches, '
            'to avoid RAM OOM when using tensor '
            'parallel and large models')
162
        # KV cache arguments
163
164
        parser.add_argument('--block-size',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
165
                            default=EngineArgs.block_size,
Woosuk Kwon's avatar
Woosuk Kwon committed
166
                            choices=[8, 16, 32],
167
168
                            help='token block size')
        # TODO(woosuk): Support fine-grained seeds (e.g., seed per request).
169
170
171
        parser.add_argument('--seed',
                            type=int,
                            default=EngineArgs.seed,
172
                            help='random seed')
173
174
        parser.add_argument('--swap-space',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
175
                            default=EngineArgs.swap_space,
176
                            help='CPU swap space size (GiB) per GPU')
177
178
179
180
181
182
183
        parser.add_argument(
            '--gpu-memory-utilization',
            type=float,
            default=EngineArgs.gpu_memory_utilization,
            help='the fraction of GPU memory to be used for '
            'the model executor, which can range from 0 to 1.'
            'If unspecified, will use the default value of 0.9.')
184
185
        parser.add_argument('--max-num-batched-tokens',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
186
                            default=EngineArgs.max_num_batched_tokens,
187
                            help='maximum number of batched tokens per '
188
189
190
                            'iteration')
        parser.add_argument('--max-num-seqs',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
191
                            default=EngineArgs.max_num_seqs,
192
                            help='maximum number of sequences per iteration')
193
194
195
196
        parser.add_argument('--max-paddings',
                            type=int,
                            default=EngineArgs.max_paddings,
                            help='maximum number of paddings in a batch')
197
198
        parser.add_argument('--disable-log-stats',
                            action='store_true',
199
                            help='disable logging statistics')
200
201
202
203
        # Quantization settings.
        parser.add_argument('--quantization',
                            '-q',
                            type=str,
CHU Tianxiang's avatar
CHU Tianxiang committed
204
                            choices=['awq', 'gptq', 'squeezellm', None],
205
                            default=EngineArgs.quantization,
206
207
208
209
210
211
                            help='Method used to quantize the weights. If '
                            'None, we first check the `quantization_config` '
                            'attribute in the model config file. If that is '
                            'None, we assume the model weights are not '
                            'quantized and use `dtype` to determine the data '
                            'type of the weights.')
212
213
214
215
216
217
218
219
220
221
222
        parser.add_argument('--enforce-eager',
                            action='store_true',
                            help='Always use eager-mode PyTorch. If False, '
                            'will use eager mode and CUDA graph in hybrid '
                            'for maximal performance and flexibility.')
        parser.add_argument('--max-context-len-to-capture',
                            type=int,
                            default=EngineArgs.max_context_len_to_capture,
                            help='maximum context length covered by CUDA '
                            'graphs. When a sequence has context length '
                            'larger than this, we fall back to eager mode.')
223
224
225
226
        parser.add_argument('--disable-custom-all-reduce',
                            action='store_true',
                            default=EngineArgs.disable_custom_all_reduce,
                            help='See ParallelConfig')
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
        # LoRA related configs
        parser.add_argument('--enable-lora',
                            action='store_true',
                            help='If True, enable handling of LoRA adapters.')
        parser.add_argument('--max-loras',
                            type=int,
                            default=EngineArgs.max_loras,
                            help='Max number of LoRAs in a single batch.')
        parser.add_argument('--max-lora-rank',
                            type=int,
                            default=EngineArgs.max_lora_rank,
                            help='Max LoRA rank.')
        parser.add_argument(
            '--lora-extra-vocab-size',
            type=int,
            default=EngineArgs.lora_extra_vocab_size,
            help=('Maximum size of extra vocabulary that can be '
                  'present in a LoRA adapter (added to the base '
                  'model vocabulary).'))
        parser.add_argument(
            '--lora-dtype',
            type=str,
            default=EngineArgs.lora_dtype,
            choices=['auto', 'float16', 'bfloat16', 'float32'],
            help=('Data type for LoRA. If auto, will default to '
                  'base model dtype.'))
        parser.add_argument(
            '--max-cpu-loras',
            type=int,
            default=EngineArgs.max_cpu_loras,
            help=('Maximum number of LoRAs to store in CPU memory. '
                  'Must be >= than max_num_seqs. '
                  'Defaults to max_num_seqs.'))
260
261
262
263
264
265
266
        parser.add_argument(
            "--device",
            type=str,
            default=EngineArgs.device,
            choices=["cuda"],
            help=('Device type for vLLM execution. '
                  'Currently, only CUDA-compatible devices are supported.'))
267
        return parser
268
269

    @classmethod
270
    def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs':
271
272
273
        # Get the list of attributes of this dataclass.
        attrs = [attr.name for attr in dataclasses.fields(cls)]
        # Set the attributes from the parsed arguments.
Zhuohan Li's avatar
Zhuohan Li committed
274
275
        engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
        return engine_args
276

Zhuohan Li's avatar
Zhuohan Li committed
277
    def create_engine_configs(
278
        self,
279
    ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig,
280
281
               DeviceConfig, Optional[LoRAConfig]]:
        device_config = DeviceConfig(self.device)
282
        model_config = ModelConfig(self.model, self.tokenizer,
283
                                   self.tokenizer_mode, self.trust_remote_code,
284
                                   self.download_dir, self.load_format,
Jasmond L's avatar
Jasmond L committed
285
                                   self.dtype, self.seed, self.revision,
286
                                   self.tokenizer_revision, self.max_model_len,
287
288
                                   self.quantization, self.enforce_eager,
                                   self.max_context_len_to_capture)
289
290
        cache_config = CacheConfig(self.block_size,
                                   self.gpu_memory_utilization,
291
                                   self.swap_space, self.kv_cache_dtype,
292
                                   model_config.get_sliding_window())
293
294
        parallel_config = ParallelConfig(self.pipeline_parallel_size,
                                         self.tensor_parallel_size,
295
                                         self.worker_use_ray,
296
297
                                         self.max_parallel_loading_workers,
                                         self.disable_custom_all_reduce)
298
        scheduler_config = SchedulerConfig(self.max_num_batched_tokens,
299
                                           self.max_num_seqs,
300
301
                                           model_config.max_model_len,
                                           self.max_paddings)
302
303
304
305
306
307
308
        lora_config = LoRAConfig(
            max_lora_rank=self.max_lora_rank,
            max_loras=self.max_loras,
            lora_extra_vocab_size=self.lora_extra_vocab_size,
            lora_dtype=self.lora_dtype,
            max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
            and self.max_cpu_loras > 0 else None) if self.enable_lora else None
309
310
        return (model_config, cache_config, parallel_config, scheduler_config,
                device_config, lora_config)
311
312


313
@dataclass
Zhuohan Li's avatar
Zhuohan Li committed
314
class AsyncEngineArgs(EngineArgs):
Woosuk Kwon's avatar
Woosuk Kwon committed
315
    """Arguments for asynchronous vLLM engine."""
Zhuohan Li's avatar
Zhuohan Li committed
316
    engine_use_ray: bool = False
317
    disable_log_requests: bool = False
318
    max_log_len: Optional[int] = None
319
320
321

    @staticmethod
    def add_cli_args(
322
            parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
Zhuohan Li's avatar
Zhuohan Li committed
323
        parser = EngineArgs.add_cli_args(parser)
324
325
        parser.add_argument('--engine-use-ray',
                            action='store_true',
Zhuohan Li's avatar
Zhuohan Li committed
326
                            help='use Ray to start the LLM engine in a '
327
328
329
                            'separate process as the server process.')
        parser.add_argument('--disable-log-requests',
                            action='store_true',
330
                            help='disable logging requests')
331
332
333
334
335
336
        parser.add_argument('--max-log-len',
                            type=int,
                            default=None,
                            help='max number of prompt characters or prompt '
                            'ID numbers being printed in log. '
                            'Default: unlimited.')
337
        return parser