server_args.py 12.3 KB
Newer Older
Lianmin Zheng's avatar
Lianmin Zheng committed
1
2
"""The arguments of the server."""

Lianmin Zheng's avatar
Lianmin Zheng committed
3
4
import argparse
import dataclasses
5
import random
6
from typing import List, Optional, Union
Lianmin Zheng's avatar
Lianmin Zheng committed
7
8
9
10


@dataclasses.dataclass
class ServerArgs:
Lianmin Zheng's avatar
Lianmin Zheng committed
11
    # Model and tokenizer
Lianmin Zheng's avatar
Lianmin Zheng committed
12
13
14
    model_path: str
    tokenizer_path: Optional[str] = None
    tokenizer_mode: str = "auto"
Lianmin Zheng's avatar
Lianmin Zheng committed
15
16
    load_format: str = "auto"
    dtype: str = "auto"
Lianmin Zheng's avatar
Lianmin Zheng committed
17
    trust_remote_code: bool = True
Lianmin Zheng's avatar
Lianmin Zheng committed
18
    context_length: Optional[int] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
19
    quantization: Optional[str] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
20
    chat_template: Optional[str] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
21
22
23
24
25
26
27

    # Port
    host: str = "127.0.0.1"
    port: int = 30000
    additional_ports: Optional[Union[List[int], int]] = None

    # Memory and scheduling
Lianmin Zheng's avatar
Lianmin Zheng committed
28
    mem_fraction_static: Optional[float] = None
29
30
    max_prefill_tokens: Optional[int] = None
    max_running_requests: Optional[int] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
31
    schedule_heuristic: str = "lpm"
32
    schedule_conservativeness: float = 0.8
Lianmin Zheng's avatar
Lianmin Zheng committed
33
34
35

    # Other runtime options
    tp_size: int = 1
36
    stream_interval: int = 8
37
    random_seed: Optional[int] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
38
39
40

    # Logging
    log_level: str = "info"
41
    log_level_http: Optional[str] = None
42
    log_requests: bool = False
Liangsheng Yin's avatar
Liangsheng Yin committed
43
    show_time_cost: bool = False
Liangsheng Yin's avatar
Liangsheng Yin committed
44

Lianmin Zheng's avatar
Lianmin Zheng committed
45
46
47
    # Other
    api_key: str = ""

48
49
50
51
    # Data parallelism
    dp_size: int = 1
    load_balance_method: str = "round_robin"

Lianmin Zheng's avatar
Lianmin Zheng committed
52
    # Optimization/debug options
53
    disable_flashinfer: bool = False
Lianmin Zheng's avatar
Lianmin Zheng committed
54
    disable_radix_cache: bool = False
55
    disable_regex_jump_forward: bool = False
56
    disable_cuda_graph: bool = False
57
    disable_disk_cache: bool = False
58
    attention_reduce_in_fp32: bool = False
59
    enable_p2p_check: bool = False
Lianmin Zheng's avatar
Lianmin Zheng committed
60

61
62
63
64
65
    # Distributed args
    nccl_init_addr: Optional[str] = None
    nnodes: int = 1
    node_rank: Optional[int] = None

Lianmin Zheng's avatar
Lianmin Zheng committed
66
67
68
    def __post_init__(self):
        if self.tokenizer_path is None:
            self.tokenizer_path = self.model_path
Lianmin Zheng's avatar
Lianmin Zheng committed
69
        if self.mem_fraction_static is None:
70
71
72
            if self.tp_size >= 16:
                self.mem_fraction_static = 0.74
            elif self.tp_size >= 8:
73
                self.mem_fraction_static = 0.78
Lianmin Zheng's avatar
Lianmin Zheng committed
74
            elif self.tp_size >= 4:
75
                self.mem_fraction_static = 0.82
Lianmin Zheng's avatar
Lianmin Zheng committed
76
77
            elif self.tp_size >= 2:
                self.mem_fraction_static = 0.85
Lianmin Zheng's avatar
Lianmin Zheng committed
78
            else:
79
                self.mem_fraction_static = 0.88
80
81
82
83
        if isinstance(self.additional_ports, int):
            self.additional_ports = [self.additional_ports]
        elif self.additional_ports is None:
            self.additional_ports = []
Lianmin Zheng's avatar
Lianmin Zheng committed
84

85
86
87
        if self.random_seed is None:
            self.random_seed = random.randint(0, 1 << 30)

Lianmin Zheng's avatar
Lianmin Zheng committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
    @staticmethod
    def add_cli_args(parser: argparse.ArgumentParser):
        parser.add_argument(
            "--model-path",
            type=str,
            help="The path of the model weights. This can be a local folder or a Hugging Face repo ID.",
            required=True,
        )
        parser.add_argument(
            "--tokenizer-path",
            type=str,
            default=ServerArgs.tokenizer_path,
            help="The path of the tokenizer.",
        )
Yuanhan Zhang's avatar
Yuanhan Zhang committed
102
103
104
105
106
107
        parser.add_argument(
            "--host", type=str, default=ServerArgs.host, help="The host of the server."
        )
        parser.add_argument(
            "--port", type=int, default=ServerArgs.port, help="The port of the server."
        )
108
109
110
111
112
        parser.add_argument(
            "--additional-ports",
            type=int,
            nargs="*",
            default=[],
113
            help="The additional ports specified for the server.",
114
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
115
116
117
118
119
120
121
122
123
        parser.add_argument(
            "--tokenizer-mode",
            type=str,
            default=ServerArgs.tokenizer_mode,
            choices=["auto", "slow"],
            help="Tokenizer mode. 'auto' will use the fast "
            "tokenizer if available, and 'slow' will "
            "always use the slow tokenizer.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
        parser.add_argument(
            "--load-format",
            type=str,
            default=ServerArgs.load_format,
            choices=["auto", "pt", "safetensors", "npcache", "dummy"],
            help="The format of the model weights to load. "
            '"auto" will try to load the weights in the safetensors format '
            "and fall back to the pytorch bin format if safetensors format "
            "is not available. "
            '"pt" will load the weights in the pytorch bin format. '
            '"safetensors" will load the weights in the safetensors format. '
            '"npcache" will load the weights in pytorch format and store '
            "a numpy cache to speed up the loading. "
            '"dummy" will initialize the weights with random values, '
            "which is mainly for profiling.",
        )
        parser.add_argument(
Lianmin Zheng's avatar
Lianmin Zheng committed
141
            "--dtype",
Cody Yu's avatar
Cody Yu committed
142
            type=str,
Lianmin Zheng's avatar
Lianmin Zheng committed
143
            default=ServerArgs.dtype,
Ying Sheng's avatar
Ying Sheng committed
144
145
            choices=["auto", "half", "float16", "bfloat16", "float", "float32"],
            help="Data type for model weights and activations.\n\n"
Lianmin Zheng's avatar
Lianmin Zheng committed
146
            '* "auto" will use FP16 precision for FP32 and FP16 models, and '
Ying Sheng's avatar
Ying Sheng committed
147
            "BF16 precision for BF16 models.\n"
Lianmin Zheng's avatar
Lianmin Zheng committed
148
149
150
151
            '* "half" for FP16. Recommended for AWQ quantization.\n'
            '* "float16" is the same as "half".\n'
            '* "bfloat16" for a balance between precision and range.\n'
            '* "float" is shorthand for FP32 precision.\n'
Ying Sheng's avatar
Ying Sheng committed
152
153
            '* "float32" for FP32 precision.',
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
154
155
156
157
158
        parser.add_argument(
            "--trust-remote-code",
            action="store_true",
            help="Whether or not to allow for custom models defined on the Hub in their own modeling files.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
159
160
161
162
163
164
        parser.add_argument(
            "--context-length",
            type=int,
            default=ServerArgs.context_length,
            help="The model's maximum context length. Defaults to None (will use the value from the model's config.json instead).",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
165
166
167
168
169
170
        parser.add_argument(
            "--quantization",
            type=str,
            default=ServerArgs.quantization,
            help="The quantization method.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
171
172
173
174
175
176
        parser.add_argument(
            "--chat-template",
            type=str,
            default=ServerArgs.chat_template,
            help="The buliltin chat template name or the path of the chat template file. This is only used for OpenAI-compatible API server.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
177
178
179
180
        parser.add_argument(
            "--mem-fraction-static",
            type=float,
            default=ServerArgs.mem_fraction_static,
181
            help="The fraction of the memory used for static allocation (model weights and KV cache memory pool). Use a smaller value if you see out-of-memory errors.",
Lianmin Zheng's avatar
Lianmin Zheng committed
182
        )
183
        parser.add_argument(
184
            "--max-prefill-tokens",
185
            type=int,
186
            default=ServerArgs.max_prefill_tokens,
187
            help="The maximum number of tokens in a prefill batch. The real bound will be the maximum of this value and the model's maximum context length.",
188
        )
189
190
191
192
193
194
        parser.add_argument(
            "--max-running-requests",
            type=int,
            default=ServerArgs.max_running_requests,
            help="The maximum number of running requests.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
195
196
197
198
        parser.add_argument(
            "--schedule-heuristic",
            type=str,
            default=ServerArgs.schedule_heuristic,
Liangsheng Yin's avatar
Liangsheng Yin committed
199
            choices=["lpm", "random", "fcfs", "dfs-weight"],
200
            help="The scheduling heuristic.",
Lianmin Zheng's avatar
Lianmin Zheng committed
201
        )
202
203
204
205
        parser.add_argument(
            "--schedule-conservativeness",
            type=float,
            default=ServerArgs.schedule_conservativeness,
206
            help="How conservative the schedule policy is. A larger value means more conservative scheduling. Use a larger value if you see requests being retracted frequently.",
207
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
208
        parser.add_argument(
Lianmin Zheng's avatar
Lianmin Zheng committed
209
            "--tp-size",
Lianmin Zheng's avatar
Lianmin Zheng committed
210
            type=int,
Lianmin Zheng's avatar
Lianmin Zheng committed
211
            default=ServerArgs.tp_size,
212
            help="The tensor parallelism size.",
213
        )
214
215
216
        parser.add_argument(
            "--stream-interval",
            type=int,
Lianmin Zheng's avatar
Lianmin Zheng committed
217
            default=ServerArgs.stream_interval,
218
            help="The interval (or buffer size) for streaming in terms of the token length. A smaller value makes streaming smoother, while a larger value makes the throughput higher",
219
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
220
221
222
223
        parser.add_argument(
            "--random-seed",
            type=int,
            default=ServerArgs.random_seed,
224
            help="The random seed.",
Lianmin Zheng's avatar
Lianmin Zheng committed
225
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
226
227
228
229
        parser.add_argument(
            "--log-level",
            type=str,
            default=ServerArgs.log_level,
230
            help="The logging level of all loggers.",
Lianmin Zheng's avatar
Lianmin Zheng committed
231
        )
232
        parser.add_argument(
233
234
235
236
            "--log-level-http",
            type=str,
            default=ServerArgs.log_level_http,
            help="The logging level of HTTP server. If not set, reuse --log-level by default.",
237
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
238
        parser.add_argument(
239
            "--log-requests",
Lianmin Zheng's avatar
Lianmin Zheng committed
240
            action="store_true",
241
            help="Log the inputs and outputs of all requests.",
Lianmin Zheng's avatar
Lianmin Zheng committed
242
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
243
244
245
246
247
        parser.add_argument(
            "--show-time-cost",
            action="store_true",
            help="Show time cost of custom marks",
        )
Liangsheng Yin's avatar
Liangsheng Yin committed
248
249
250
251
        parser.add_argument(
            "--api-key",
            type=str,
            default=ServerArgs.api_key,
Lianmin Zheng's avatar
Lianmin Zheng committed
252
            help="Set API key of the server",
Liangsheng Yin's avatar
Liangsheng Yin committed
253
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
254

255
256
257
258
259
        # Data parallelism
        parser.add_argument(
            "--dp-size",
            type=int,
            default=ServerArgs.dp_size,
260
            help="The data parallelism size.",
261
262
263
264
265
        )
        parser.add_argument(
            "--load-balance-method",
            type=str,
            default=ServerArgs.load_balance_method,
266
            help="The load balancing strategy for data parallelism.",
267
268
269
270
271
272
            choices=[
                "round_robin",
                "shortest_queue",
            ],
        )

273
274
275
276
        # Multi-node distributed serving args
        parser.add_argument(
            "--nccl-init-addr",
            type=str,
Ying Sheng's avatar
Ying Sheng committed
277
            help="The nccl init address of multi-node server.",
278
279
        )
        parser.add_argument(
Ying Sheng's avatar
Ying Sheng committed
280
            "--nnodes", type=int, default=1, help="The number of nodes."
281
        )
Ying Sheng's avatar
Ying Sheng committed
282
        parser.add_argument("--node-rank", type=int, help="The node rank.")
283

Lianmin Zheng's avatar
Lianmin Zheng committed
284
        # Optimization/debug options
Liangsheng Yin's avatar
Liangsheng Yin committed
285
        parser.add_argument(
286
            "--disable-flashinfer",
Liangsheng Yin's avatar
Liangsheng Yin committed
287
            action="store_true",
288
            help="Disable flashinfer inference kernels",
Liangsheng Yin's avatar
Liangsheng Yin committed
289
        )
Liangsheng Yin's avatar
Liangsheng Yin committed
290
        parser.add_argument(
Lianmin Zheng's avatar
Lianmin Zheng committed
291
            "--disable-radix-cache",
Liangsheng Yin's avatar
Liangsheng Yin committed
292
            action="store_true",
Lianmin Zheng's avatar
Lianmin Zheng committed
293
            help="Disable RadixAttention",
Liangsheng Yin's avatar
Liangsheng Yin committed
294
        )
Liangsheng Yin's avatar
Liangsheng Yin committed
295
        parser.add_argument(
296
            "--disable-regex-jump-forward",
Liangsheng Yin's avatar
Liangsheng Yin committed
297
            action="store_true",
Liangsheng Yin's avatar
Liangsheng Yin committed
298
            help="Disable regex jump-forward",
Liangsheng Yin's avatar
Liangsheng Yin committed
299
        )
300
301
302
303
304
        parser.add_argument(
            "--disable-cuda-graph",
            action="store_true",
            help="Disable cuda graph.",
        )
305
306
307
308
309
        parser.add_argument(
            "--disable-disk-cache",
            action="store_true",
            help="Disable disk cache to avoid possible crashes related to file system or high concurrency.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
310
311
312
313
314
315
        parser.add_argument(
            "--attention-reduce-in-fp32",
            action="store_true",
            help="Cast the intermidiate attention results to fp32 to avoid possible crashes related to fp16."
            "This only affects Triton attention kernels",
        )
316
317
318
319
320
        parser.add_argument(
            "--enable-p2p-check",
            action="store_true",
            help="Enable P2P check for GPU access, otherwise the p2p access is allowed by default.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
321
322
323
324
325
326
327
328
329

    @classmethod
    def from_cli_args(cls, args: argparse.Namespace):
        attrs = [attr.name for attr in dataclasses.fields(cls)]
        return cls(**{attr: getattr(args, attr) for attr in attrs})

    def url(self):
        return f"http://{self.host}:{self.port}"

Lianmin Zheng's avatar
Lianmin Zheng committed
330
    def print_mode_args(self):
Liangsheng Yin's avatar
Liangsheng Yin committed
331
        return (
332
            f"disable_flashinfer={self.disable_flashinfer}, "
333
            f"attention_reduce_in_fp32={self.attention_reduce_in_fp32}, "
Lianmin Zheng's avatar
Lianmin Zheng committed
334
            f"disable_radix_cache={self.disable_radix_cache}, "
Liangsheng Yin's avatar
Liangsheng Yin committed
335
336
337
338
            f"disable_regex_jump_forward={self.disable_regex_jump_forward}, "
            f"disable_disk_cache={self.disable_disk_cache}, "
        )

Lianmin Zheng's avatar
Lianmin Zheng committed
339

340
341
342
@dataclasses.dataclass
class ModelPortArgs:
    nccl_port: int
343
    model_tp_ips: List[str]
344
345
346
    model_tp_ports: List[int]


Lianmin Zheng's avatar
Lianmin Zheng committed
347
348
349
350
351
@dataclasses.dataclass
class PortArgs:
    tokenizer_port: int
    router_port: int
    detokenizer_port: int
352
    model_port_args: List[ModelPortArgs]