arg_utils.py 15 KB
Newer Older
1
import argparse
2
3
4
import dataclasses
from dataclasses import dataclass
from typing import Optional, Tuple
5

Woosuk Kwon's avatar
Woosuk Kwon committed
6
from vllm.config import (CacheConfig, ModelConfig, ParallelConfig,
7
                         SchedulerConfig, LoRAConfig)
8
9


10
@dataclass
Zhuohan Li's avatar
Zhuohan Li committed
11
class EngineArgs:
Woosuk Kwon's avatar
Woosuk Kwon committed
12
    """Arguments for vLLM engine."""
13
    model: str
14
    tokenizer: Optional[str] = None
15
    tokenizer_mode: str = 'auto'
16
    trust_remote_code: bool = False
17
    download_dir: Optional[str] = None
18
    load_format: str = 'auto'
19
    dtype: str = 'auto'
20
    kv_cache_dtype: str = 'auto'
21
    seed: int = 0
22
    max_model_len: Optional[int] = None
23
    worker_use_ray: bool = False
24
25
    pipeline_parallel_size: int = 1
    tensor_parallel_size: int = 1
26
    max_parallel_loading_workers: Optional[int] = None
27
28
    block_size: int = 16
    swap_space: int = 4  # GiB
29
    gpu_memory_utilization: float = 0.90
30
    max_num_batched_tokens: Optional[int] = None
31
    max_num_seqs: int = 256
32
    max_paddings: int = 256
33
    disable_log_stats: bool = False
Jasmond L's avatar
Jasmond L committed
34
    revision: Optional[str] = None
35
    tokenizer_revision: Optional[str] = None
36
    quantization: Optional[str] = None
37
38
    enforce_eager: bool = False
    max_context_len_to_capture: int = 8192
39
    disable_custom_all_reduce: bool = False
40
41
42
43
44
45
    enable_lora: bool = False
    max_loras: int = 1
    max_lora_rank: int = 16
    lora_extra_vocab_size: int = 256
    lora_dtype = 'auto'
    max_cpu_loras: Optional[int] = None
46

47
    def __post_init__(self):
48
49
        if self.tokenizer is None:
            self.tokenizer = self.model
50
51
52

    @staticmethod
    def add_cli_args(
53
            parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
Woosuk Kwon's avatar
Woosuk Kwon committed
54
        """Shared CLI arguments for vLLM engine."""
55
56
57
58

        # NOTE: If you update any of the arguments below, please also
        # make sure to update docs/source/models/engine_args.rst

59
        # Model arguments
60
61
62
63
64
65
66
67
68
69
        parser.add_argument(
            '--model',
            type=str,
            default='facebook/opt-125m',
            help='name or path of the huggingface model to use')
        parser.add_argument(
            '--tokenizer',
            type=str,
            default=EngineArgs.tokenizer,
            help='name or path of the huggingface tokenizer to use')
Jasmond L's avatar
Jasmond L committed
70
71
72
73
74
75
76
        parser.add_argument(
            '--revision',
            type=str,
            default=None,
            help='the specific model version to use. It can be a branch '
            'name, a tag name, or a commit id. If unspecified, will use '
            'the default version.')
77
78
79
80
81
82
83
        parser.add_argument(
            '--tokenizer-revision',
            type=str,
            default=None,
            help='the specific tokenizer version to use. It can be a branch '
            'name, a tag name, or a commit id. If unspecified, will use '
            'the default version.')
84
85
        parser.add_argument('--tokenizer-mode',
                            type=str,
86
87
88
                            default=EngineArgs.tokenizer_mode,
                            choices=['auto', 'slow'],
                            help='tokenizer mode. "auto" will use the fast '
89
90
                            'tokenizer if available, and "slow" will '
                            'always use the slow tokenizer.')
91
92
93
        parser.add_argument('--trust-remote-code',
                            action='store_true',
                            help='trust remote code from huggingface')
94
95
        parser.add_argument('--download-dir',
                            type=str,
Zhuohan Li's avatar
Zhuohan Li committed
96
                            default=EngineArgs.download_dir,
97
                            help='directory to download and load the weights, '
98
99
                            'default to the default cache dir of '
                            'huggingface')
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
        parser.add_argument(
            '--load-format',
            type=str,
            default=EngineArgs.load_format,
            choices=['auto', 'pt', 'safetensors', 'npcache', 'dummy'],
            help='The format of the model weights to load. '
            '"auto" will try to load the weights in the safetensors format '
            'and fall back to the pytorch bin format if safetensors format '
            'is not available. '
            '"pt" will load the weights in the pytorch bin format. '
            '"safetensors" will load the weights in the safetensors format. '
            '"npcache" will load the weights in pytorch format and store '
            'a numpy cache to speed up the loading. '
            '"dummy" will initialize the weights with random values, '
            'which is mainly for profiling.')
115
116
117
118
        parser.add_argument(
            '--dtype',
            type=str,
            default=EngineArgs.dtype,
Woosuk Kwon's avatar
Woosuk Kwon committed
119
120
121
            choices=[
                'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
            ],
122
123
124
125
            help='data type for model weights and activations. '
            'The "auto" option will use FP16 precision '
            'for FP32 and FP16 models, and BF16 precision '
            'for BF16 models.')
126
127
128
129
130
131
132
133
        parser.add_argument(
            '--kv-cache-dtype',
            type=str,
            choices=['auto', 'fp8_e5m2'],
            default='auto',
            help='Data type for kv cache storage. If "auto", will use model '
            'data type. Note FP8 is not supported when cuda version is '
            'lower than 11.8.')
134
135
136
137
138
        parser.add_argument('--max-model-len',
                            type=int,
                            default=None,
                            help='model context length. If unspecified, '
                            'will be automatically derived from the model.')
139
        # Parallel arguments
140
141
        parser.add_argument('--worker-use-ray',
                            action='store_true',
142
                            help='use Ray for distributed serving, will be '
143
144
145
146
                            'automatically set when using more than 1 GPU')
        parser.add_argument('--pipeline-parallel-size',
                            '-pp',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
147
                            default=EngineArgs.pipeline_parallel_size,
148
                            help='number of pipeline stages')
149
150
151
        parser.add_argument('--tensor-parallel-size',
                            '-tp',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
152
                            default=EngineArgs.tensor_parallel_size,
153
                            help='number of tensor parallel replicas')
154
155
156
157
158
159
        parser.add_argument(
            '--max-parallel-loading-workers',
            type=int,
            help='load model sequentially in multiple batches, '
            'to avoid RAM OOM when using tensor '
            'parallel and large models')
160
        # KV cache arguments
161
162
        parser.add_argument('--block-size',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
163
                            default=EngineArgs.block_size,
Woosuk Kwon's avatar
Woosuk Kwon committed
164
                            choices=[8, 16, 32],
165
166
                            help='token block size')
        # TODO(woosuk): Support fine-grained seeds (e.g., seed per request).
167
168
169
        parser.add_argument('--seed',
                            type=int,
                            default=EngineArgs.seed,
170
                            help='random seed')
171
172
        parser.add_argument('--swap-space',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
173
                            default=EngineArgs.swap_space,
174
                            help='CPU swap space size (GiB) per GPU')
175
176
177
178
179
180
181
        parser.add_argument(
            '--gpu-memory-utilization',
            type=float,
            default=EngineArgs.gpu_memory_utilization,
            help='the fraction of GPU memory to be used for '
            'the model executor, which can range from 0 to 1.'
            'If unspecified, will use the default value of 0.9.')
182
183
        parser.add_argument('--max-num-batched-tokens',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
184
                            default=EngineArgs.max_num_batched_tokens,
185
                            help='maximum number of batched tokens per '
186
187
188
                            'iteration')
        parser.add_argument('--max-num-seqs',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
189
                            default=EngineArgs.max_num_seqs,
190
                            help='maximum number of sequences per iteration')
191
192
193
194
        parser.add_argument('--max-paddings',
                            type=int,
                            default=EngineArgs.max_paddings,
                            help='maximum number of paddings in a batch')
195
196
        parser.add_argument('--disable-log-stats',
                            action='store_true',
197
                            help='disable logging statistics')
198
199
200
201
        # Quantization settings.
        parser.add_argument('--quantization',
                            '-q',
                            type=str,
CHU Tianxiang's avatar
CHU Tianxiang committed
202
                            choices=['awq', 'gptq', 'squeezellm', None],
203
                            default=None,
204
205
206
207
208
209
                            help='Method used to quantize the weights. If '
                            'None, we first check the `quantization_config` '
                            'attribute in the model config file. If that is '
                            'None, we assume the model weights are not '
                            'quantized and use `dtype` to determine the data '
                            'type of the weights.')
210
211
212
213
214
215
216
217
218
219
220
        parser.add_argument('--enforce-eager',
                            action='store_true',
                            help='Always use eager-mode PyTorch. If False, '
                            'will use eager mode and CUDA graph in hybrid '
                            'for maximal performance and flexibility.')
        parser.add_argument('--max-context-len-to-capture',
                            type=int,
                            default=EngineArgs.max_context_len_to_capture,
                            help='maximum context length covered by CUDA '
                            'graphs. When a sequence has context length '
                            'larger than this, we fall back to eager mode.')
221
222
223
224
        parser.add_argument('--disable-custom-all-reduce',
                            action='store_true',
                            default=EngineArgs.disable_custom_all_reduce,
                            help='See ParallelConfig')
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
        # LoRA related configs
        parser.add_argument('--enable-lora',
                            action='store_true',
                            help='If True, enable handling of LoRA adapters.')
        parser.add_argument('--max-loras',
                            type=int,
                            default=EngineArgs.max_loras,
                            help='Max number of LoRAs in a single batch.')
        parser.add_argument('--max-lora-rank',
                            type=int,
                            default=EngineArgs.max_lora_rank,
                            help='Max LoRA rank.')
        parser.add_argument(
            '--lora-extra-vocab-size',
            type=int,
            default=EngineArgs.lora_extra_vocab_size,
            help=('Maximum size of extra vocabulary that can be '
                  'present in a LoRA adapter (added to the base '
                  'model vocabulary).'))
        parser.add_argument(
            '--lora-dtype',
            type=str,
            default=EngineArgs.lora_dtype,
            choices=['auto', 'float16', 'bfloat16', 'float32'],
            help=('Data type for LoRA. If auto, will default to '
                  'base model dtype.'))
        parser.add_argument(
            '--max-cpu-loras',
            type=int,
            default=EngineArgs.max_cpu_loras,
            help=('Maximum number of LoRAs to store in CPU memory. '
                  'Must be >= than max_num_seqs. '
                  'Defaults to max_num_seqs.'))
258
        return parser
259
260

    @classmethod
261
    def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs':
262
263
264
        # Get the list of attributes of this dataclass.
        attrs = [attr.name for attr in dataclasses.fields(cls)]
        # Set the attributes from the parsed arguments.
Zhuohan Li's avatar
Zhuohan Li committed
265
266
        engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
        return engine_args
267

Zhuohan Li's avatar
Zhuohan Li committed
268
    def create_engine_configs(
269
        self,
270
271
    ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig,
               Optional[LoRAConfig]]:
272
        model_config = ModelConfig(self.model, self.tokenizer,
273
                                   self.tokenizer_mode, self.trust_remote_code,
274
                                   self.download_dir, self.load_format,
Jasmond L's avatar
Jasmond L committed
275
                                   self.dtype, self.seed, self.revision,
276
                                   self.tokenizer_revision, self.max_model_len,
277
278
                                   self.quantization, self.enforce_eager,
                                   self.max_context_len_to_capture)
279
280
        cache_config = CacheConfig(self.block_size,
                                   self.gpu_memory_utilization,
281
                                   self.swap_space, self.kv_cache_dtype,
282
                                   model_config.get_sliding_window())
283
284
        parallel_config = ParallelConfig(self.pipeline_parallel_size,
                                         self.tensor_parallel_size,
285
                                         self.worker_use_ray,
286
287
                                         self.max_parallel_loading_workers,
                                         self.disable_custom_all_reduce)
288
        scheduler_config = SchedulerConfig(self.max_num_batched_tokens,
289
                                           self.max_num_seqs,
290
291
                                           model_config.max_model_len,
                                           self.max_paddings)
292
293
294
295
296
297
298
299
        lora_config = LoRAConfig(
            max_lora_rank=self.max_lora_rank,
            max_loras=self.max_loras,
            lora_extra_vocab_size=self.lora_extra_vocab_size,
            lora_dtype=self.lora_dtype,
            max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
            and self.max_cpu_loras > 0 else None) if self.enable_lora else None
        return model_config, cache_config, parallel_config, scheduler_config, lora_config
300
301


302
@dataclass
Zhuohan Li's avatar
Zhuohan Li committed
303
class AsyncEngineArgs(EngineArgs):
Woosuk Kwon's avatar
Woosuk Kwon committed
304
    """Arguments for asynchronous vLLM engine."""
Zhuohan Li's avatar
Zhuohan Li committed
305
    engine_use_ray: bool = False
306
    disable_log_requests: bool = False
307
    max_log_len: Optional[int] = None
308
309
310

    @staticmethod
    def add_cli_args(
311
            parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
Zhuohan Li's avatar
Zhuohan Li committed
312
        parser = EngineArgs.add_cli_args(parser)
313
314
        parser.add_argument('--engine-use-ray',
                            action='store_true',
Zhuohan Li's avatar
Zhuohan Li committed
315
                            help='use Ray to start the LLM engine in a '
316
317
318
                            'separate process as the server process.')
        parser.add_argument('--disable-log-requests',
                            action='store_true',
319
                            help='disable logging requests')
320
321
322
323
324
325
        parser.add_argument('--max-log-len',
                            type=int,
                            default=None,
                            help='max number of prompt characters or prompt '
                            'ID numbers being printed in log. '
                            'Default: unlimited.')
326
        return parser