arg_utils.py 7.89 KB
Newer Older
1
import argparse
2
3
4
import dataclasses
from dataclasses import dataclass
from typing import Optional, Tuple
5

Woosuk Kwon's avatar
Woosuk Kwon committed
6
7
from vllm.config import (CacheConfig, ModelConfig, ParallelConfig,
                         SchedulerConfig)
8
9


10
@dataclass
Zhuohan Li's avatar
Zhuohan Li committed
11
class EngineArgs:
Woosuk Kwon's avatar
Woosuk Kwon committed
12
    """Arguments for vLLM engine."""
13
    model: str
14
    tokenizer: Optional[str] = None
15
    tokenizer_mode: str = 'auto'
16
17
18
    download_dir: Optional[str] = None
    use_np_weights: bool = False
    use_dummy_weights: bool = False
19
    dtype: str = 'auto'
20
    seed: int = 0
21
    worker_use_ray: bool = False
22
23
24
25
    pipeline_parallel_size: int = 1
    tensor_parallel_size: int = 1
    block_size: int = 16
    swap_space: int = 4  # GiB
26
    gpu_memory_utilization: float = 0.90
27
28
29
    max_num_batched_tokens: int = 2560
    max_num_seqs: int = 256
    disable_log_stats: bool = False
30

31
    def __post_init__(self):
32
33
        if self.tokenizer is None:
            self.tokenizer = self.model
34
35
36
37
        self.max_num_seqs = min(self.max_num_seqs, self.max_num_batched_tokens)

    @staticmethod
    def add_cli_args(
38
            parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
Woosuk Kwon's avatar
Woosuk Kwon committed
39
        """Shared CLI arguments for vLLM engine."""
40
        # Model arguments
41
42
43
44
45
46
47
48
49
50
51
52
        parser.add_argument(
            '--model',
            type=str,
            default='facebook/opt-125m',
            help='name or path of the huggingface model to use')
        parser.add_argument(
            '--tokenizer',
            type=str,
            default=EngineArgs.tokenizer,
            help='name or path of the huggingface tokenizer to use')
        parser.add_argument('--tokenizer-mode',
                            type=str,
53
54
55
                            default=EngineArgs.tokenizer_mode,
                            choices=['auto', 'slow'],
                            help='tokenizer mode. "auto" will use the fast '
56
57
58
59
                            'tokenizer if available, and "slow" will '
                            'always use the slow tokenizer.')
        parser.add_argument('--download-dir',
                            type=str,
Zhuohan Li's avatar
Zhuohan Li committed
60
                            default=EngineArgs.download_dir,
61
                            help='directory to download and load the weights, '
62
63
64
65
                            'default to the default cache dir of '
                            'huggingface')
        parser.add_argument('--use-np-weights',
                            action='store_true',
66
                            help='save a numpy copy of model weights for '
67
68
69
70
                            'faster loading. This can increase the disk '
                            'usage by up to 2x.')
        parser.add_argument('--use-dummy-weights',
                            action='store_true',
71
72
                            help='use dummy values for model weights')
        # TODO(woosuk): Support FP32.
73
74
75
76
77
78
79
80
81
        parser.add_argument(
            '--dtype',
            type=str,
            default=EngineArgs.dtype,
            choices=['auto', 'half', 'bfloat16', 'float'],
            help='data type for model weights and activations. '
            'The "auto" option will use FP16 precision '
            'for FP32 and FP16 models, and BF16 precision '
            'for BF16 models.')
82
        # Parallel arguments
83
84
        parser.add_argument('--worker-use-ray',
                            action='store_true',
85
                            help='use Ray for distributed serving, will be '
86
87
88
89
                            'automatically set when using more than 1 GPU')
        parser.add_argument('--pipeline-parallel-size',
                            '-pp',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
90
                            default=EngineArgs.pipeline_parallel_size,
91
                            help='number of pipeline stages')
92
93
94
        parser.add_argument('--tensor-parallel-size',
                            '-tp',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
95
                            default=EngineArgs.tensor_parallel_size,
96
97
                            help='number of tensor parallel replicas')
        # KV cache arguments
98
99
        parser.add_argument('--block-size',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
100
                            default=EngineArgs.block_size,
Woosuk Kwon's avatar
Woosuk Kwon committed
101
                            choices=[8, 16, 32],
102
103
                            help='token block size')
        # TODO(woosuk): Support fine-grained seeds (e.g., seed per request).
104
105
106
        parser.add_argument('--seed',
                            type=int,
                            default=EngineArgs.seed,
107
                            help='random seed')
108
109
        parser.add_argument('--swap-space',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
110
                            default=EngineArgs.swap_space,
111
                            help='CPU swap space size (GiB) per GPU')
112
113
        parser.add_argument('--gpu-memory-utilization',
                            type=float,
Zhuohan Li's avatar
Zhuohan Li committed
114
                            default=EngineArgs.gpu_memory_utilization,
115
                            help='the percentage of GPU memory to be used for'
116
117
118
                            'the model executor')
        parser.add_argument('--max-num-batched-tokens',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
119
                            default=EngineArgs.max_num_batched_tokens,
120
                            help='maximum number of batched tokens per '
121
122
123
                            'iteration')
        parser.add_argument('--max-num-seqs',
                            type=int,
Zhuohan Li's avatar
Zhuohan Li committed
124
                            default=EngineArgs.max_num_seqs,
125
                            help='maximum number of sequences per iteration')
126
127
        parser.add_argument('--disable-log-stats',
                            action='store_true',
128
129
                            help='disable logging statistics')
        return parser
130
131

    @classmethod
132
    def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs':
133
134
135
        # Get the list of attributes of this dataclass.
        attrs = [attr.name for attr in dataclasses.fields(cls)]
        # Set the attributes from the parsed arguments.
Zhuohan Li's avatar
Zhuohan Li committed
136
137
        engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
        return engine_args
138

Zhuohan Li's avatar
Zhuohan Li committed
139
    def create_engine_configs(
140
141
142
        self,
    ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]:
        # Initialize the configs.
143
144
145
146
147
148
        model_config = ModelConfig(self.model, self.tokenizer,
                                   self.tokenizer_mode, self.download_dir,
                                   self.use_np_weights, self.use_dummy_weights,
                                   self.dtype, self.seed)
        cache_config = CacheConfig(self.block_size,
                                   self.gpu_memory_utilization,
149
150
151
                                   self.swap_space)
        parallel_config = ParallelConfig(self.pipeline_parallel_size,
                                         self.tensor_parallel_size,
152
                                         self.worker_use_ray)
153
154
155
        model_max_len = getattr(model_config.hf_config,
                                'max_position_embeddings', float('inf'))
        max_seq_len = min(self.max_num_batched_tokens, model_max_len)
156
        scheduler_config = SchedulerConfig(self.max_num_batched_tokens,
Lily Liu's avatar
Lily Liu committed
157
                                           self.max_num_seqs, max_seq_len)
158
159
160
        return model_config, cache_config, parallel_config, scheduler_config


161
@dataclass
Zhuohan Li's avatar
Zhuohan Li committed
162
class AsyncEngineArgs(EngineArgs):
Woosuk Kwon's avatar
Woosuk Kwon committed
163
    """Arguments for asynchronous vLLM engine."""
Zhuohan Li's avatar
Zhuohan Li committed
164
    engine_use_ray: bool = False
165
    disable_log_requests: bool = False
166
167
168

    @staticmethod
    def add_cli_args(
169
            parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
Zhuohan Li's avatar
Zhuohan Li committed
170
        parser = EngineArgs.add_cli_args(parser)
171
172
        parser.add_argument('--engine-use-ray',
                            action='store_true',
Zhuohan Li's avatar
Zhuohan Li committed
173
                            help='use Ray to start the LLM engine in a '
174
175
176
                            'separate process as the server process.')
        parser.add_argument('--disable-log-requests',
                            action='store_true',
177
                            help='disable logging requests')
178
        return parser