"tests/pipelines/audioldm2/test_audioldm2.py" did not exist on "5e3f8fff40604ed2332e9f07b3796b15b43b91bb"
server_args.py 54.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
# Copyright 2023-2024 SGLang Team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
Lianmin Zheng's avatar
Lianmin Zheng committed
14
15
"""The arguments of the server."""

Lianmin Zheng's avatar
Lianmin Zheng committed
16
17
import argparse
import dataclasses
18
import json
19
import logging
20
import os
21
import random
22
import tempfile
23
from typing import List, Literal, Optional
Lianmin Zheng's avatar
Lianmin Zheng committed
24

25
from sglang.srt.hf_transformers_utils import check_gguf_file, get_config
Xihuai Wang's avatar
Xihuai Wang committed
26
from sglang.srt.reasoning_parser import ReasoningParser
27
from sglang.srt.utils import (
Vincent's avatar
Vincent committed
28
    configure_ipv6,
29
    get_device,
Lianmin Zheng's avatar
Lianmin Zheng committed
30
    get_device_memory_capacity,
31
    is_flashinfer_available,
HAI's avatar
HAI committed
32
    is_hip,
33
    is_port_available,
34
    is_remote_url,
35
    is_valid_ipv6_address,
bjmsong's avatar
bjmsong committed
36
    nullable_str,
37
)
38

39
40
logger = logging.getLogger(__name__)

Lianmin Zheng's avatar
Lianmin Zheng committed
41
42
43

@dataclasses.dataclass
class ServerArgs:
Lianmin Zheng's avatar
Lianmin Zheng committed
44
    # Model and tokenizer
Lianmin Zheng's avatar
Lianmin Zheng committed
45
46
47
    model_path: str
    tokenizer_path: Optional[str] = None
    tokenizer_mode: str = "auto"
48
    skip_tokenizer_init: bool = False
49
    enable_tokenizer_batch_encode: bool = False
Lianmin Zheng's avatar
Lianmin Zheng committed
50
    load_format: str = "auto"
51
    trust_remote_code: bool = False
Lianmin Zheng's avatar
Lianmin Zheng committed
52
    dtype: str = "auto"
53
    kv_cache_dtype: str = "auto"
Lianmin Zheng's avatar
Lianmin Zheng committed
54
    quantization: Optional[str] = None
Vincent's avatar
Vincent committed
55
    quantization_param_path: Optional[str] = None
56
    context_length: Optional[int] = None
57
    device: Optional[str] = None
58
    served_model_name: Optional[str] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
59
    chat_template: Optional[str] = None
60
    completion_template: Optional[str] = None
61
    is_embedding: bool = False
62
    revision: Optional[str] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
63

64
    # Port for the HTTP server
Lianmin Zheng's avatar
Lianmin Zheng committed
65
66
67
68
    host: str = "127.0.0.1"
    port: int = 30000

    # Memory and scheduling
Lianmin Zheng's avatar
Lianmin Zheng committed
69
    mem_fraction_static: Optional[float] = None
70
    max_running_requests: Optional[int] = None
71
    max_total_tokens: Optional[int] = None
72
    chunked_prefill_size: Optional[int] = None
73
    max_prefill_tokens: int = 16384
74
    schedule_policy: str = "fcfs"
75
    schedule_conservativeness: float = 1.0
76
    cpu_offload_gb: int = 0
77
    page_size: int = 1
Lianmin Zheng's avatar
Lianmin Zheng committed
78
79
80

    # Other runtime options
    tp_size: int = 1
81
    stream_interval: int = 1
82
    stream_output: bool = False
83
    random_seed: Optional[int] = None
84
    constrained_json_whitespace_pattern: Optional[str] = None
85
    watchdog_timeout: float = 300
86
    dist_timeout: Optional[int] = None  # timeout for torch.distributed
87
    download_dir: Optional[str] = None
88
    base_gpu_id: int = 0
89
    gpu_id_step: int = 1
Lianmin Zheng's avatar
Lianmin Zheng committed
90
91
92

    # Logging
    log_level: str = "info"
93
    log_level_http: Optional[str] = None
94
    log_requests: bool = False
95
    log_requests_level: int = 0
Liangsheng Yin's avatar
Liangsheng Yin committed
96
    show_time_cost: bool = False
97
    enable_metrics: bool = False
98
    decode_log_interval: int = 40
Liangsheng Yin's avatar
Liangsheng Yin committed
99

100
    # API related
101
    api_key: Optional[str] = None
102
    file_storage_path: str = "sglang_storage"
103
    enable_cache_report: bool = False
Xihuai Wang's avatar
Xihuai Wang committed
104
    reasoning_parser: Optional[str] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
105

106
107
108
    # Data parallelism
    dp_size: int = 1
    load_balance_method: str = "round_robin"
109

xiaobochen's avatar
xiaobochen committed
110
111
    # Expert parallelism
    ep_size: int = 1
112

113
    # Multi-node distributed serving
114
    dist_init_addr: Optional[str] = None
Lianmin Zheng's avatar
Lianmin Zheng committed
115
    nnodes: int = 1
116
    node_rank: int = 0
Lianmin Zheng's avatar
Lianmin Zheng committed
117
118
119
120

    # Model override args in JSON
    json_model_override_args: str = "{}"

121
122
123
    # LoRA
    lora_paths: Optional[List[str]] = None
    max_loras_per_batch: int = 8
124
    lora_backend: str = "triton"
125
126

    # Kernel backend
127
128
    attention_backend: Optional[str] = None
    sampling_backend: Optional[str] = None
129
    grammar_backend: Optional[str] = None
130

131
132
    # Speculative decoding
    speculative_algorithm: Optional[str] = None
133
    speculative_draft_model_path: Optional[str] = None
134
135
136
    speculative_num_steps: Optional[int] = None
    speculative_eagle_topk: Optional[int] = None
    speculative_num_draft_tokens: Optional[int] = None
137
138
    speculative_accept_threshold_single: float = 1.0
    speculative_accept_threshold_acc: float = 1.0
139
    speculative_token_map: Optional[str] = None
140
141
142

    # Double Sparsity
    enable_double_sparsity: bool = False
Vincent's avatar
Vincent committed
143
    ds_channel_config_path: Optional[str] = None
144
145
146
147
148
    ds_heavy_channel_num: int = 32
    ds_heavy_token_num: int = 256
    ds_heavy_channel_type: str = "qk"
    ds_sparse_decode_threshold: int = 4096

149
    # Optimization/debug options
Lianmin Zheng's avatar
Lianmin Zheng committed
150
    disable_radix_cache: bool = False
151
    disable_cuda_graph: bool = False
152
    disable_cuda_graph_padding: bool = False
153
    enable_nccl_nvls: bool = False
154
    disable_outlines_disk_cache: bool = False
155
    disable_custom_all_reduce: bool = False
156
    enable_multimodal: Optional[bool] = None
157
    disable_overlap_schedule: bool = False
158
    enable_mixed_chunk: bool = False
Ke Bao's avatar
Ke Bao committed
159
    enable_dp_attention: bool = False
xiaobochen's avatar
xiaobochen committed
160
    enable_ep_moe: bool = False
161
    enable_deepep_moe: bool = False
162
    deepep_mode: Optional[Literal["auto", "normal", "low_latency"]] = "auto"
163
    enable_torch_compile: bool = False
164
    torch_compile_max_bs: int = 32
165
    cuda_graph_max_bs: Optional[int] = None
166
    cuda_graph_bs: Optional[List[int]] = None
167
    torchao_config: str = ""
168
    enable_nan_detection: bool = False
169
    enable_p2p_check: bool = False
170
    triton_attention_reduce_in_fp32: bool = False
171
    triton_attention_num_kv_splits: int = 8
172
    num_continuous_decode_steps: int = 1
173
    delete_ckpt_after_loading: bool = False
174
    enable_memory_saver: bool = False
175
    allow_auto_truncate: bool = False
176
    enable_custom_logit_processor: bool = False
Vincent's avatar
Vincent committed
177
    tool_call_parser: Optional[str] = None
178
    enable_hierarchical_cache: bool = False
179
    hicache_ratio: float = 2.0
Zhiqiang Xie's avatar
Zhiqiang Xie committed
180
181
    hicache_size: int = 0
    hicache_write_policy: str = "write_through_selective"
182
    flashinfer_mla_disable_ragged: bool = False
183
    warmups: Optional[str] = None
184
    moe_dense_tp_size: Optional[int] = None
185
    n_share_experts_fusion: int = 0
186
    disable_chunked_prefix_cache: bool = False
Lianmin Zheng's avatar
Lianmin Zheng committed
187
    disable_fast_image_processor: bool = False
188
189
190
191
192

    # Debug tensor dumps
    debug_tensor_dump_output_folder: Optional[str] = None
    debug_tensor_dump_input_file: Optional[str] = None
    debug_tensor_dump_inject: bool = False
193

Byron Hsu's avatar
Byron Hsu committed
194
195
196
    # For PD disaggregation: can be "null" (not disaggregated), "prefill" (prefill-only), or "decode" (decode-only)
    disaggregation_mode: str = "null"
    disaggregation_bootstrap_port: int = 8998
197
    disaggregation_transfer_backend: str = "mooncake"
198
    disaggregation_ib_device: Optional[str] = None
Byron Hsu's avatar
Byron Hsu committed
199

Lianmin Zheng's avatar
Lianmin Zheng committed
200
    def __post_init__(self):
201
202
203
        # Expert parallelism
        if self.enable_ep_moe:
            self.ep_size = self.tp_size
Lianmin Zheng's avatar
Lianmin Zheng committed
204
            logger.warning(
205
206
207
                f"EP MoE is enabled. The expert parallel size is adjusted to be the same as the tensor parallel size[{self.tp_size}]."
            )

208
        # Set missing default values
Lianmin Zheng's avatar
Lianmin Zheng committed
209
210
        if self.tokenizer_path is None:
            self.tokenizer_path = self.model_path
211

212
213
214
        if self.device is None:
            self.device = get_device()

215
216
217
        if self.served_model_name is None:
            self.served_model_name = self.model_path

218
219
220
        if self.random_seed is None:
            self.random_seed = random.randint(0, 1 << 30)

Lianmin Zheng's avatar
Lianmin Zheng committed
221
        gpu_mem = get_device_memory_capacity(self.device)
222
223

        # Set mem fraction static, which depends on the tensor parallelism size
Lianmin Zheng's avatar
Lianmin Zheng committed
224
        if self.mem_fraction_static is None:
225
226
227
228
229
230
231
232
            if self.tp_size >= 16:
                self.mem_fraction_static = 0.79
            elif self.tp_size >= 8:
                self.mem_fraction_static = 0.81
            elif self.tp_size >= 4:
                self.mem_fraction_static = 0.85
            elif self.tp_size >= 2:
                self.mem_fraction_static = 0.87
Ying Sheng's avatar
Ying Sheng committed
233
            else:
234
235
236
237
238
239
240
241
                self.mem_fraction_static = 0.88
            if gpu_mem > 96 * 1024:
                mem_fraction = self.mem_fraction_static
                self.mem_fraction_static = min(
                    mem_fraction + 48 * 1024 * (1 - mem_fraction) / gpu_mem,
                    (gpu_mem - 1024 * 18)
                    / gpu_mem,  # 15 GB + additional 3GB for cuda graph
                )
242

243
244
        # Set chunked prefill size, which depends on the gpu memory capacity
        if self.chunked_prefill_size is None:
245
            if gpu_mem is not None and gpu_mem < 25_000:
246
247
248
                self.chunked_prefill_size = 2048
            else:
                self.chunked_prefill_size = 8192
Lianmin Zheng's avatar
Lianmin Zheng committed
249
250
        assert self.chunked_prefill_size % self.page_size == 0

251
252
253
        assert self.moe_dense_tp_size in {
            1,
            None,
Lianmin Zheng's avatar
Lianmin Zheng committed
254
        }, "moe_dense_tp_size only support 1 and None currently"
255

256
        if self.attention_backend == "flashmla":
257
258
259
260
            logger.warning(
                "FlashMLA only supports a page_size of 64, change page_size to 64."
            )
            self.page_size = 64
Lianmin Zheng's avatar
Lianmin Zheng committed
261

262
263
264
265
266
267
        if self.attention_backend == "cutlass_mla":
            logger.warning(
                "Cutlass MLA only supports a page_size of 128, change page_size to 128."
            )
            self.page_size = 128

268
269
        # Set cuda graph max batch size
        if self.cuda_graph_max_bs is None:
270
            # Based on detailed statistics, when serving TP1/TP2 models on lower-end GPUs with HBM<25G, you can either disable cuda graph or set `cuda_graph_max_bs` to a very small value to reduce the memory overhead of creating cuda graphs, with almost no impact on performance. However, when serving models with TP4 or TP8, we need to enable cuda graph to maintain high performance. In this case, we can set `cuda_graph_max_bs` to 80 (half of the default value 160) to reduce the memory overhead of creating cuda graphs. Looking at the logs from TP4 serving of qwen2-72b, a value of 80 is sufficient and can reduce the memory overhead of creating cuda graphs on lower-end GPUs compared to the original 160, avoiding OOM issues.
271
            if gpu_mem is not None and gpu_mem < 25_000:
272
273
274
275
                if self.tp_size < 4:
                    self.cuda_graph_max_bs = 8
                else:
                    self.cuda_graph_max_bs = 80
276

277
        # Set kernel backends for hpu device
278
279
280
281
        if self.device == "hpu":
            self.attention_backend = "torch_native"
            self.sampling_backend = "pytorch"

Lianmin Zheng's avatar
Lianmin Zheng committed
282
        # Set kernel backends
283
        if self.sampling_backend is None:
284
285
286
287
288
            self.sampling_backend = (
                "flashinfer" if is_flashinfer_available() else "pytorch"
            )

        if self.attention_backend == "torch_native":
289
            logger.warning(
290
291
292
                "Cuda graph is disabled because of using torch native attention backend"
            )
            self.disable_cuda_graph = True
293

294
295
296
        # Choose grammar backend
        if self.grammar_backend is None:
            self.grammar_backend = "xgrammar"
297

298
        # Data parallelism attention
Ke Bao's avatar
Ke Bao committed
299
        if self.enable_dp_attention:
300
            self.schedule_conservativeness = self.schedule_conservativeness * 0.3
Lianmin Zheng's avatar
Lianmin Zheng committed
301
302
303
304
305
            assert (
                self.dp_size > 1
            ), "Please set a dp-size > 1. You can use 1 < dp-size <= tp-size "
            assert self.tp_size % self.dp_size == 0
            self.chunked_prefill_size = self.chunked_prefill_size // self.dp_size
306
            logger.warning(
307
                f"DP attention is enabled. The chunked prefill size is adjusted to {self.chunked_prefill_size} to avoid MoE kernel issues. "
308
            )
309
310

        # DeepEP MoE
Lianmin Zheng's avatar
Lianmin Zheng committed
311
        self.enable_sp_layernorm = False
312
        if self.enable_deepep_moe:
313
314
315
316
            if self.deepep_mode == "auto":
                assert (
                    not self.enable_dp_attention
                ), "DeepEP MoE `auto` mode is not supported with DP Attention."
317
318
319
320
            self.ep_size = self.tp_size
            self.enable_sp_layernorm = (
                self.dp_size < self.tp_size if self.enable_dp_attention else True
            )
Lianmin Zheng's avatar
Lianmin Zheng committed
321
            logger.warning(
322
323
                f"DeepEP MoE is enabled. The expert parallel size is adjusted to be the same as the tensor parallel size[{self.tp_size}]."
            )
324

325
        # Speculative Decoding
326
327
328
329
        if self.speculative_algorithm == "NEXTN":
            # NEXTN shares the same implementation of EAGLE
            self.speculative_algorithm = "EAGLE"

Lianmin Zheng's avatar
Lianmin Zheng committed
330
        if self.speculative_algorithm in ("EAGLE", "EAGLE3"):
331
            if self.max_running_requests is None:
332
                self.max_running_requests = 48
333
            self.disable_overlap_schedule = True
Lianmin Zheng's avatar
Lianmin Zheng committed
334
            logger.warning(
335
                "Overlap scheduler is disabled because of using "
336
                "eagle speculative decoding."
337
            )
338

339
340
341
342
343
344
345
346
            model_arch = get_model_arch(self)

            # Auto set draft_model_path DeepSeek-V3/R1
            if self.speculative_draft_model_path is None and model_arch in [
                "DeepseekV3ForCausalLM"
            ]:
                self.speculative_draft_model_path = self.model_path

347
348
349
350
351
352
353
354
355
356
            # Auto choose parameters
            if self.speculative_num_steps is None:
                assert (
                    self.speculative_eagle_topk is None
                    and self.speculative_num_draft_tokens is None
                )
                (
                    self.speculative_num_steps,
                    self.speculative_eagle_topk,
                    self.speculative_num_draft_tokens,
357
                ) = auto_choose_speculative_params(model_arch)
358
359
360

            if self.page_size > 1 and self.speculative_eagle_topk > 1:
                self.speculative_eagle_topk = 1
Lianmin Zheng's avatar
Lianmin Zheng committed
361
                logger.warning(
362
363
364
365
366
367
368
                    "speculative_eagle_topk is adjusted to 1 when page_size > 1"
                )

            if (
                self.speculative_eagle_topk == 1
                and self.speculative_num_draft_tokens != self.speculative_num_steps + 1
            ):
Lianmin Zheng's avatar
Lianmin Zheng committed
369
                logger.warning(
370
371
372
                    "speculative_num_draft_tokens is adjusted to speculative_num_steps + 1 when speculative_eagle_topk == 1"
                )
                self.speculative_num_draft_tokens = self.speculative_num_steps + 1
373

374
            # The token generated from the verify step is counted.
375
            # If sepculative_num_steps >= speculative_num_draft_tokens, the additional tokens will definitely be discarded.
376
            # assert self.speculative_num_steps < self.speculative_num_draft_tokens
377

378
379
380
381
382
383
        # GGUF
        if (
            self.load_format == "auto" or self.load_format == "gguf"
        ) and check_gguf_file(self.model_path):
            self.quantization = self.load_format = "gguf"

384
385
386
        if is_remote_url(self.model_path):
            self.load_format = "remote"

387
388
389
390
        # AMD-specific Triton attention KV splits default number
        if is_hip():
            self.triton_attention_num_kv_splits = 16

Byron Hsu's avatar
Byron Hsu committed
391
392
393
        # PD disaggregation
        if self.disaggregation_mode == "prefill":
            self.disable_cuda_graph = True
394
            logger.warning("Cuda graph is disabled for prefill server")
Byron Hsu's avatar
Byron Hsu committed
395
396
        elif self.disaggregation_mode == "decode":
            self.disable_radix_cache = True
397
            logger.warning("KV cache is forced as chunk cache for decode server")
Byron Hsu's avatar
Byron Hsu committed
398

399
400
401
        os.environ["SGLANG_ENABLE_TORCH_COMPILE"] = (
            "1" if self.enable_torch_compile else "0"
        )
402
403
404
405
        # Set env var before grammar backends init
        os.environ["SGLANG_DISABLE_OUTLINES_DISK_CACHE"] = (
            "1" if self.disable_outlines_disk_cache else "0"
        )
406

Lianmin Zheng's avatar
Lianmin Zheng committed
407
408
    @staticmethod
    def add_cli_args(parser: argparse.ArgumentParser):
409
        # Model and port args
Lianmin Zheng's avatar
Lianmin Zheng committed
410
411
412
413
414
415
416
417
418
419
420
421
        parser.add_argument(
            "--model-path",
            type=str,
            help="The path of the model weights. This can be a local folder or a Hugging Face repo ID.",
            required=True,
        )
        parser.add_argument(
            "--tokenizer-path",
            type=str,
            default=ServerArgs.tokenizer_path,
            help="The path of the tokenizer.",
        )
Yuanhan Zhang's avatar
Yuanhan Zhang committed
422
423
424
425
426
427
        parser.add_argument(
            "--host", type=str, default=ServerArgs.host, help="The host of the server."
        )
        parser.add_argument(
            "--port", type=int, default=ServerArgs.port, help="The port of the server."
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
428
429
430
431
432
433
434
435
436
        parser.add_argument(
            "--tokenizer-mode",
            type=str,
            default=ServerArgs.tokenizer_mode,
            choices=["auto", "slow"],
            help="Tokenizer mode. 'auto' will use the fast "
            "tokenizer if available, and 'slow' will "
            "always use the slow tokenizer.",
        )
437
438
439
        parser.add_argument(
            "--skip-tokenizer-init",
            action="store_true",
440
            help="If set, skip init tokenizer and pass input_ids in generate request.",
441
        )
442
443
444
445
446
        parser.add_argument(
            "--enable-tokenizer-batch-encode",
            action="store_true",
            help="Enable batch tokenization for improved performance when processing multiple text inputs. Do not use with image inputs, pre-tokenized input_ids, or input_embeds.",
        )
447
        parser.add_argument(
Lianmin Zheng's avatar
Lianmin Zheng committed
448
449
450
            "--load-format",
            type=str,
            default=ServerArgs.load_format,
451
452
453
454
455
456
            choices=[
                "auto",
                "pt",
                "safetensors",
                "npcache",
                "dummy",
457
                "sharded_state",
458
459
                "gguf",
                "bitsandbytes",
460
                "layered",
461
                "remote",
462
            ],
Lianmin Zheng's avatar
Lianmin Zheng committed
463
464
465
466
467
468
469
470
471
            help="The format of the model weights to load. "
            '"auto" will try to load the weights in the safetensors format '
            "and fall back to the pytorch bin format if safetensors format "
            "is not available. "
            '"pt" will load the weights in the pytorch bin format. '
            '"safetensors" will load the weights in the safetensors format. '
            '"npcache" will load the weights in pytorch format and store '
            "a numpy cache to speed up the loading. "
            '"dummy" will initialize the weights with random values, '
472
            "which is mainly for profiling."
473
474
            '"gguf" will load the weights in the gguf format. '
            '"bitsandbytes" will load the weights using bitsandbytes '
475
476
477
478
            "quantization."
            '"layered" loads weights layer by layer so that one can quantize a '
            "layer before loading another to make the peak memory envelope "
            "smaller.",
Lianmin Zheng's avatar
Lianmin Zheng committed
479
        )
480
481
482
483
484
        parser.add_argument(
            "--trust-remote-code",
            action="store_true",
            help="Whether or not to allow for custom models defined on the Hub in their own modeling files.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
485
        parser.add_argument(
Lianmin Zheng's avatar
Lianmin Zheng committed
486
            "--dtype",
Cody Yu's avatar
Cody Yu committed
487
            type=str,
Lianmin Zheng's avatar
Lianmin Zheng committed
488
            default=ServerArgs.dtype,
Ying Sheng's avatar
Ying Sheng committed
489
490
            choices=["auto", "half", "float16", "bfloat16", "float", "float32"],
            help="Data type for model weights and activations.\n\n"
Lianmin Zheng's avatar
Lianmin Zheng committed
491
            '* "auto" will use FP16 precision for FP32 and FP16 models, and '
Ying Sheng's avatar
Ying Sheng committed
492
            "BF16 precision for BF16 models.\n"
Lianmin Zheng's avatar
Lianmin Zheng committed
493
494
495
496
            '* "half" for FP16. Recommended for AWQ quantization.\n'
            '* "float16" is the same as "half".\n'
            '* "bfloat16" for a balance between precision and range.\n'
            '* "float" is shorthand for FP32 precision.\n'
Ying Sheng's avatar
Ying Sheng committed
497
498
            '* "float32" for FP32 precision.',
        )
499
500
501
502
        parser.add_argument(
            "--kv-cache-dtype",
            type=str,
            default=ServerArgs.kv_cache_dtype,
bjmsong's avatar
bjmsong committed
503
504
505
            choices=["auto", "fp8_e5m2", "fp8_e4m3"],
            help='Data type for kv cache storage. "auto" will use model data type. "fp8_e5m2" and "fp8_e4m3" is supported for CUDA 11.8+.',
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
506
507
508
509
        parser.add_argument(
            "--quantization",
            type=str,
            default=ServerArgs.quantization,
Ying Sheng's avatar
Ying Sheng committed
510
511
512
513
514
515
            choices=[
                "awq",
                "fp8",
                "gptq",
                "marlin",
                "gptq_marlin",
Ying Sheng's avatar
Ying Sheng committed
516
                "awq_marlin",
Ying Sheng's avatar
Ying Sheng committed
517
                "bitsandbytes",
518
                "gguf",
519
                "modelopt",
520
                "modelopt_fp4",
521
                "w8a8_int8",
HandH1998's avatar
HandH1998 committed
522
                "w8a8_fp8",
AniZpZ's avatar
AniZpZ committed
523
                "moe_wna16",
Ying Sheng's avatar
Ying Sheng committed
524
            ],
Lianmin Zheng's avatar
Lianmin Zheng committed
525
526
            help="The quantization method.",
        )
527
528
529
530
531
532
533
534
535
        parser.add_argument(
            "--quantization-param-path",
            type=nullable_str,
            default=None,
            help="Path to the JSON file containing the KV cache "
            "scaling factors. This should generally be supplied, when "
            "KV cache dtype is FP8. Otherwise, KV cache scaling factors "
            "default to 1.0, which may cause accuracy issues. ",
        )
536
537
538
539
540
541
542
543
544
        parser.add_argument(
            "--context-length",
            type=int,
            default=ServerArgs.context_length,
            help="The model's maximum context length. Defaults to None (will use the value from the model's config.json instead).",
        )
        parser.add_argument(
            "--device",
            type=str,
545
546
            default=ServerArgs.device,
            help="The device to use ('cuda', 'xpu', 'hpu', 'cpu'). Defaults to auto-detection if not specified.",
547
        )
548
549
550
551
552
553
        parser.add_argument(
            "--served-model-name",
            type=str,
            default=ServerArgs.served_model_name,
            help="Override the model name returned by the v1/models endpoint in OpenAI API server.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
554
555
556
557
558
559
        parser.add_argument(
            "--chat-template",
            type=str,
            default=ServerArgs.chat_template,
            help="The buliltin chat template name or the path of the chat template file. This is only used for OpenAI-compatible API server.",
        )
560
561
562
563
564
565
        parser.add_argument(
            "--completion-template",
            type=str,
            default=ServerArgs.completion_template,
            help="The buliltin completion template name or the path of the completion template file. This is only used for OpenAI-compatible API server. only for code completion currently.",
        )
566
567
568
569
570
        parser.add_argument(
            "--is-embedding",
            action="store_true",
            help="Whether to use a CausalLM as an embedding model.",
        )
571
572
573
574
575
576
577
578
        parser.add_argument(
            "--revision",
            type=str,
            default=None,
            help="The specific model version to use. It can be a branch "
            "name, a tag name, or a commit id. If unspecified, will use "
            "the default version.",
        )
579

580
        # Memory and scheduling
Lianmin Zheng's avatar
Lianmin Zheng committed
581
582
583
584
        parser.add_argument(
            "--mem-fraction-static",
            type=float,
            default=ServerArgs.mem_fraction_static,
585
            help="The fraction of the memory used for static allocation (model weights and KV cache memory pool). Use a smaller value if you see out-of-memory errors.",
Lianmin Zheng's avatar
Lianmin Zheng committed
586
        )
587
588
589
590
591
592
        parser.add_argument(
            "--max-running-requests",
            type=int,
            default=ServerArgs.max_running_requests,
            help="The maximum number of running requests.",
        )
593
594
595
596
        parser.add_argument(
            "--max-total-tokens",
            type=int,
            default=ServerArgs.max_total_tokens,
597
598
            help="The maximum number of tokens in the memory pool. If not specified, it will be automatically calculated based on the memory usage fraction. "
            "This option is typically used for development and debugging purposes.",
599
        )
600
601
602
603
        parser.add_argument(
            "--chunked-prefill-size",
            type=int,
            default=ServerArgs.chunked_prefill_size,
604
            help="The maximum number of tokens in a chunk for the chunked prefill. Setting this to -1 means disabling chunked prefill.",
605
606
607
608
609
610
611
        )
        parser.add_argument(
            "--max-prefill-tokens",
            type=int,
            default=ServerArgs.max_prefill_tokens,
            help="The maximum number of tokens in a prefill batch. The real bound will be the maximum of this value and the model's maximum context length.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
612
        parser.add_argument(
613
            "--schedule-policy",
Lianmin Zheng's avatar
Lianmin Zheng committed
614
            type=str,
615
            default=ServerArgs.schedule_policy,
Liangsheng Yin's avatar
Liangsheng Yin committed
616
            choices=["lpm", "random", "fcfs", "dfs-weight"],
617
            help="The scheduling policy of the requests.",
Lianmin Zheng's avatar
Lianmin Zheng committed
618
        )
619
620
621
622
        parser.add_argument(
            "--schedule-conservativeness",
            type=float,
            default=ServerArgs.schedule_conservativeness,
623
            help="How conservative the schedule policy is. A larger value means more conservative scheduling. Use a larger value if you see requests being retracted frequently.",
624
        )
625
626
627
628
        parser.add_argument(
            "--cpu-offload-gb",
            type=int,
            default=ServerArgs.cpu_offload_gb,
629
            help="How many GBs of RAM to reserve for CPU offloading.",
630
        )
631
632
633
634
635
636
        parser.add_argument(
            "--page-size",
            type=int,
            default=ServerArgs.page_size,
            help="The number of tokens in a page.",
        )
637

638
        # Other runtime options
Lianmin Zheng's avatar
Lianmin Zheng committed
639
        parser.add_argument(
640
            "--tensor-parallel-size",
Lianmin Zheng's avatar
Lianmin Zheng committed
641
            "--tp-size",
Lianmin Zheng's avatar
Lianmin Zheng committed
642
            type=int,
Lianmin Zheng's avatar
Lianmin Zheng committed
643
            default=ServerArgs.tp_size,
644
            help="The tensor parallelism size.",
645
        )
646
647
648
        parser.add_argument(
            "--stream-interval",
            type=int,
Lianmin Zheng's avatar
Lianmin Zheng committed
649
            default=ServerArgs.stream_interval,
650
            help="The interval (or buffer size) for streaming in terms of the token length. A smaller value makes streaming smoother, while a larger value makes the throughput higher",
651
        )
652
653
654
655
656
        parser.add_argument(
            "--stream-output",
            action="store_true",
            help="Whether to output as a sequence of disjoint segments.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
657
658
659
660
        parser.add_argument(
            "--random-seed",
            type=int,
            default=ServerArgs.random_seed,
661
            help="The random seed.",
Lianmin Zheng's avatar
Lianmin Zheng committed
662
        )
663
664
665
666
667
668
        parser.add_argument(
            "--constrained-json-whitespace-pattern",
            type=str,
            default=ServerArgs.constrained_json_whitespace_pattern,
            help=r"Regex pattern for syntactic whitespaces allowed in JSON constrained output. For example, to allow the model generate consecutive whitespaces, set the pattern to [\n\t ]*",
        )
669
670
671
672
673
674
        parser.add_argument(
            "--watchdog-timeout",
            type=float,
            default=ServerArgs.watchdog_timeout,
            help="Set watchdog timeout in seconds. If a forward batch takes longer than this, the server will crash to prevent hanging.",
        )
675
676
677
678
679
680
        parser.add_argument(
            "--dist-timeout",
            type=int,
            default=ServerArgs.dist_timeout,
            help="Set timeout for torch.distributed initialization.",
        )
681
682
683
684
        parser.add_argument(
            "--download-dir",
            type=str,
            default=ServerArgs.download_dir,
Lianmin Zheng's avatar
Lianmin Zheng committed
685
            help="Model download directory for huggingface.",
686
        )
687
688
689
690
691
692
        parser.add_argument(
            "--base-gpu-id",
            type=int,
            default=ServerArgs.base_gpu_id,
            help="The base GPU ID to start allocating GPUs from. Useful when running multiple instances on the same machine.",
        )
693
694
695
696
697
698
        parser.add_argument(
            "--gpu-id-step",
            type=int,
            default=ServerArgs.gpu_id_step,
            help="The delta between consecutive GPU IDs that are used. For example, setting it to 2 will use GPU 0,2,4,...",
        )
699
700

        # Logging
Lianmin Zheng's avatar
Lianmin Zheng committed
701
702
703
704
        parser.add_argument(
            "--log-level",
            type=str,
            default=ServerArgs.log_level,
705
            help="The logging level of all loggers.",
Lianmin Zheng's avatar
Lianmin Zheng committed
706
        )
707
        parser.add_argument(
708
709
710
711
            "--log-level-http",
            type=str,
            default=ServerArgs.log_level_http,
            help="The logging level of HTTP server. If not set, reuse --log-level by default.",
712
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
713
        parser.add_argument(
714
            "--log-requests",
Lianmin Zheng's avatar
Lianmin Zheng committed
715
            action="store_true",
716
717
718
719
720
721
722
723
            help="Log metadata, inputs, outputs of all requests. The verbosity is decided by --log-requests-level",
        )
        parser.add_argument(
            "--log-requests-level",
            type=int,
            default=0,
            help="0: Log metadata. 1. Log metadata and partial input/output. 2. Log every input/output.",
            choices=[0, 1, 2],
Lianmin Zheng's avatar
Lianmin Zheng committed
724
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
725
726
727
        parser.add_argument(
            "--show-time-cost",
            action="store_true",
Ying Sheng's avatar
Ying Sheng committed
728
            help="Show time cost of custom marks.",
Lianmin Zheng's avatar
Lianmin Zheng committed
729
        )
730
731
732
733
734
        parser.add_argument(
            "--enable-metrics",
            action="store_true",
            help="Enable log prometheus metrics.",
        )
735
736
737
738
        parser.add_argument(
            "--decode-log-interval",
            type=int,
            default=ServerArgs.decode_log_interval,
739
            help="The log interval of decode batch.",
740
        )
741

742
        # API related
Liangsheng Yin's avatar
Liangsheng Yin committed
743
744
745
746
        parser.add_argument(
            "--api-key",
            type=str,
            default=ServerArgs.api_key,
747
            help="Set API key of the server. It is also used in the OpenAI API compatible server.",
Liangsheng Yin's avatar
Liangsheng Yin committed
748
        )
749
        parser.add_argument(
750
            "--file-storage-path",
751
            type=str,
752
            default=ServerArgs.file_storage_path,
753
754
            help="The path of the file storage in backend.",
        )
755
756
757
758
759
        parser.add_argument(
            "--enable-cache-report",
            action="store_true",
            help="Return number of cached tokens in usage.prompt_tokens_details for each openai request.",
        )
Xihuai Wang's avatar
Xihuai Wang committed
760
761
762
763
764
765
766
        parser.add_argument(
            "--reasoning-parser",
            type=str,
            choices=list(ReasoningParser.DetectorMap.keys()),
            default=ServerArgs.reasoning_parser,
            help=f"Specify the parser for reasoning models, supported parsers are: {list(ReasoningParser.DetectorMap.keys())}.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
767

768
769
        # Data parallelism
        parser.add_argument(
770
            "--data-parallel-size",
771
772
773
            "--dp-size",
            type=int,
            default=ServerArgs.dp_size,
774
            help="The data parallelism size.",
775
776
777
778
779
        )
        parser.add_argument(
            "--load-balance-method",
            type=str,
            default=ServerArgs.load_balance_method,
780
            help="The load balancing strategy for data parallelism.",
781
782
783
784
785
            choices=[
                "round_robin",
                "shortest_queue",
            ],
        )
786

xiaobochen's avatar
xiaobochen committed
787
788
789
790
791
792
793
794
        # Expert parallelism
        parser.add_argument(
            "--expert-parallel-size",
            "--ep-size",
            type=int,
            default=ServerArgs.ep_size,
            help="The expert parallelism size.",
        )
795

796
        # Multi-node distributed serving
797
        parser.add_argument(
798
799
            "--dist-init-addr",
            "--nccl-init-addr",  # For backward compatbility. This will be removed in the future.
800
            type=str,
801
            help="The host address for initializing distributed backend (e.g., `192.168.0.2:25000`).",
802
803
        )
        parser.add_argument(
Liangsheng Yin's avatar
Liangsheng Yin committed
804
            "--nnodes", type=int, default=ServerArgs.nnodes, help="The number of nodes."
805
        )
806
807
808
        parser.add_argument(
            "--node-rank", type=int, default=ServerArgs.node_rank, help="The node rank."
        )
809

Lianmin Zheng's avatar
Lianmin Zheng committed
810
811
812
813
814
815
816
817
        # Model override args
        parser.add_argument(
            "--json-model-override-args",
            type=str,
            help="A dictionary in JSON string format used to override default model configurations.",
            default=ServerArgs.json_model_override_args,
        )

818
819
820
821
822
823
824
        # LoRA
        parser.add_argument(
            "--lora-paths",
            type=str,
            nargs="*",
            default=None,
            action=LoRAPathAction,
825
            help="The list of LoRA adapters. You can provide a list of either path in str or renamed path in the format {name}={path}.",
826
827
828
829
830
        )
        parser.add_argument(
            "--max-loras-per-batch",
            type=int,
            default=8,
831
832
833
834
835
836
837
            help="Maximum number of adapters for a running batch, include base-only request.",
        )
        parser.add_argument(
            "--lora-backend",
            type=str,
            default="triton",
            help="Choose the kernel backend for multi-LoRA serving.",
838
839
840
        )

        # Kernel backend
841
842
843
        parser.add_argument(
            "--attention-backend",
            type=str,
844
845
846
847
848
849
850
851
            choices=[
                "flashinfer",
                "triton",
                "torch_native",
                "fa3",
                "flashmla",
                "cutlass_mla",
            ],
852
853
854
            default=ServerArgs.attention_backend,
            help="Choose the kernels for attention layers.",
        )
855
856
857
858
859
860
861
        parser.add_argument(
            "--sampling-backend",
            type=str,
            choices=["flashinfer", "pytorch"],
            default=ServerArgs.sampling_backend,
            help="Choose the kernels for sampling layers.",
        )
862
863
864
        parser.add_argument(
            "--grammar-backend",
            type=str,
865
            choices=["xgrammar", "outlines", "llguidance", "none"],
866
            default=ServerArgs.grammar_backend,
Lianmin Zheng's avatar
Lianmin Zheng committed
867
            help="Choose the backend for grammar-guided decoding.",
868
        )
869
870
        parser.add_argument(
            "--enable-flashinfer-mla",
871
872
            action=DeprecatedAction,
            help="--enable-flashinfer-mla is deprecated. Please use '--attention-backend flashinfer' instead.",
873
        )
lukec's avatar
lukec committed
874
875
        parser.add_argument(
            "--enable-flashmla",
876
877
            action=DeprecatedAction,
            help="--enable-flashmla is deprecated. Please use '--attention-backend flashmla' instead.",
lukec's avatar
lukec committed
878
        )
879
880
881
882
883
        parser.add_argument(
            "--flashinfer-mla-disable-ragged",
            action="store_true",
            help="Not using ragged prefill wrapper when running flashinfer mla",
        )
884

885
886
887
888
        # Speculative decoding
        parser.add_argument(
            "--speculative-algorithm",
            type=str,
James Liu's avatar
James Liu committed
889
            choices=["EAGLE", "EAGLE3", "NEXTN"],
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
            help="Speculative algorithm.",
        )
        parser.add_argument(
            "--speculative-draft-model-path",
            type=str,
            help="The path of the draft model weights. This can be a local folder or a Hugging Face repo ID.",
        )
        parser.add_argument(
            "--speculative-num-steps",
            type=int,
            help="The number of steps sampled from draft model in Speculative Decoding.",
            default=ServerArgs.speculative_num_steps,
        )
        parser.add_argument(
            "--speculative-eagle-topk",
            type=int,
906
            help="The number of tokens sampled from the draft model in eagle2 each step.",
907
908
            default=ServerArgs.speculative_eagle_topk,
        )
909
910
911
        parser.add_argument(
            "--speculative-num-draft-tokens",
            type=int,
912
            help="The number of tokens sampled from the draft model in Speculative Decoding.",
913
914
            default=ServerArgs.speculative_num_draft_tokens,
        )
915
916
917
918
919
920
921
922
923
924
925
926
        parser.add_argument(
            "--speculative-accept-threshold-single",
            type=float,
            help="Accept a draft token if its probability in the target model is greater than this threshold.",
            default=ServerArgs.speculative_accept_threshold_single,
        )
        parser.add_argument(
            "--speculative-accept-threshold-acc",
            type=float,
            help="The accept probability of a draft token is raised from its target probability p to min(1, p / threshold_acc).",
            default=ServerArgs.speculative_accept_threshold_acc,
        )
927
928
929
930
931
932
        parser.add_argument(
            "--speculative-token-map",
            type=str,
            help="The path of the draft model's small vocab table.",
            default=ServerArgs.speculative_token_map,
        )
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970

        # Double Sparsity
        parser.add_argument(
            "--enable-double-sparsity",
            action="store_true",
            help="Enable double sparsity attention",
        )
        parser.add_argument(
            "--ds-channel-config-path",
            type=str,
            default=ServerArgs.ds_channel_config_path,
            help="The path of the double sparsity channel config",
        )
        parser.add_argument(
            "--ds-heavy-channel-num",
            type=int,
            default=ServerArgs.ds_heavy_channel_num,
            help="The number of heavy channels in double sparsity attention",
        )
        parser.add_argument(
            "--ds-heavy-token-num",
            type=int,
            default=ServerArgs.ds_heavy_token_num,
            help="The number of heavy tokens in double sparsity attention",
        )
        parser.add_argument(
            "--ds-heavy-channel-type",
            type=str,
            default=ServerArgs.ds_heavy_channel_type,
            help="The type of heavy channels in double sparsity attention",
        )
        parser.add_argument(
            "--ds-sparse-decode-threshold",
            type=int,
            default=ServerArgs.ds_sparse_decode_threshold,
            help="The type of heavy channels in double sparsity attention",
        )

971
        # Optimization/debug options
Liangsheng Yin's avatar
Liangsheng Yin committed
972
        parser.add_argument(
Lianmin Zheng's avatar
Lianmin Zheng committed
973
            "--disable-radix-cache",
Liangsheng Yin's avatar
Liangsheng Yin committed
974
            action="store_true",
Ying Sheng's avatar
Ying Sheng committed
975
            help="Disable RadixAttention for prefix caching.",
Liangsheng Yin's avatar
Liangsheng Yin committed
976
        )
977
978
979
980
981
        parser.add_argument(
            "--disable-cuda-graph",
            action="store_true",
            help="Disable cuda graph.",
        )
982
        parser.add_argument(
983
984
985
986
            "--disable-cuda-graph-padding",
            action="store_true",
            help="Disable cuda graph when padding is needed. Still uses cuda graph when padding is not needed.",
        )
987
988
989
990
991
        parser.add_argument(
            "--enable-nccl-nvls",
            action="store_true",
            help="Enable NCCL NVLS for prefill heavy requests when available.",
        )
992
        parser.add_argument(
993
            "--disable-outlines-disk-cache",
994
            action="store_true",
995
            help="Disable disk cache of outlines to avoid possible crashes related to file system or high concurrency.",
996
        )
997
998
999
1000
1001
        parser.add_argument(
            "--disable-custom-all-reduce",
            action="store_true",
            help="Disable the custom all-reduce kernel and fall back to NCCL.",
        )
1002
        parser.add_argument(
1003
1004
            "--enable-multimodal",
            default=ServerArgs.enable_multimodal,
1005
            action="store_true",
1006
            help="Enable the multimodal functionality for the served model. If the model being served is not multimodal, nothing will happen",
1007
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
1008
        parser.add_argument(
1009
            "--disable-overlap-schedule",
Lianmin Zheng's avatar
Lianmin Zheng committed
1010
            action="store_true",
1011
            help="Disable the overlap scheduler, which overlaps the CPU scheduler with GPU model worker.",
Lianmin Zheng's avatar
Lianmin Zheng committed
1012
        )
1013
1014
1015
        parser.add_argument(
            "--enable-mixed-chunk",
            action="store_true",
1016
            help="Enabling mixing prefill and decode in a batch when using chunked prefill.",
1017
        )
Ke Bao's avatar
Ke Bao committed
1018
1019
1020
1021
1022
        parser.add_argument(
            "--enable-dp-attention",
            action="store_true",
            help="Enabling data parallelism for attention and tensor parallelism for FFN. The dp size should be equal to the tp size. Currently only DeepSeek-V2 is supported.",
        )
xiaobochen's avatar
xiaobochen committed
1023
1024
1025
1026
1027
        parser.add_argument(
            "--enable-ep-moe",
            action="store_true",
            help="Enabling expert parallelism for moe. The ep size is equal to the tp size.",
        )
1028
1029
1030
        parser.add_argument(
            "--enable-torch-compile",
            action="store_true",
1031
1032
            help="Optimize the model with torch.compile. Experimental feature.",
        )
1033
        parser.add_argument(
1034
            "--torch-compile-max-bs",
1035
            type=int,
1036
            default=ServerArgs.torch_compile_max_bs,
1037
1038
            help="Set the maximum batch size when using torch compile.",
        )
1039
        parser.add_argument(
1040
            "--cuda-graph-max-bs",
1041
            type=int,
1042
            default=ServerArgs.cuda_graph_max_bs,
1043
1044
            help="Set the maximum batch size for cuda graph.",
        )
1045
1046
1047
1048
1049
1050
        parser.add_argument(
            "--cuda-graph-bs",
            type=int,
            nargs="+",
            help="Set the list of batch sizes for cuda graph.",
        )
1051
1052
1053
1054
        parser.add_argument(
            "--torchao-config",
            type=str,
            default=ServerArgs.torchao_config,
1055
            help="Optimize the model with torchao. Experimental feature. Current choices are: int8dq, int8wo, int4wo-<group_size>, fp8wo, fp8dq-per_tensor, fp8dq-per_row",
1056
        )
1057
1058
1059
1060
1061
        parser.add_argument(
            "--enable-nan-detection",
            action="store_true",
            help="Enable the NaN detection for debugging purposes.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
1062
        parser.add_argument(
1063
            "--enable-p2p-check",
Lianmin Zheng's avatar
Lianmin Zheng committed
1064
            action="store_true",
1065
            help="Enable P2P check for GPU access, otherwise the p2p access is allowed by default.",
Lianmin Zheng's avatar
Lianmin Zheng committed
1066
        )
1067
        parser.add_argument(
1068
            "--triton-attention-reduce-in-fp32",
1069
            action="store_true",
1070
            help="Cast the intermidiate attention results to fp32 to avoid possible crashes related to fp16."
1071
            "This only affects Triton attention kernels.",
1072
        )
1073
1074
1075
1076
1077
1078
        parser.add_argument(
            "--triton-attention-num-kv-splits",
            type=int,
            default=ServerArgs.triton_attention_num_kv_splits,
            help="The number of KV splits in flash decoding Triton kernel. Larger value is better in longer context scenarios. The default value is 8.",
        )
1079
1080
1081
1082
1083
1084
1085
1086
        parser.add_argument(
            "--num-continuous-decode-steps",
            type=int,
            default=ServerArgs.num_continuous_decode_steps,
            help="Run multiple continuous decoding steps to reduce scheduling overhead. "
            "This can potentially increase throughput but may also increase time-to-first-token latency. "
            "The default value is 1, meaning only run one decoding step at a time.",
        )
1087
1088
1089
1090
1091
        parser.add_argument(
            "--delete-ckpt-after-loading",
            action="store_true",
            help="Delete the model checkpoint after loading the model.",
        )
1092
1093
1094
1095
1096
        parser.add_argument(
            "--enable-memory-saver",
            action="store_true",
            help="Allow saving memory using release_memory_occupation and resume_memory_occupation",
        )
1097
1098
1099
1100
1101
        parser.add_argument(
            "--allow-auto-truncate",
            action="store_true",
            help="Allow automatically truncating requests that exceed the maximum input length instead of returning an error.",
        )
1102
1103
1104
1105
1106
        parser.add_argument(
            "--enable-custom-logit-processor",
            action="store_true",
            help="Enable users to pass custom logit processors to the server (disabled by default for security)",
        )
YAMY's avatar
YAMY committed
1107
1108
1109
        parser.add_argument(
            "--tool-call-parser",
            type=str,
1110
            choices=["qwen25", "mistral", "llama3", "deepseekv3"],
YAMY's avatar
YAMY committed
1111
1112
1113
            default=ServerArgs.tool_call_parser,
            help="Specify the parser for handling tool-call interactions. Options include: 'qwen25', 'mistral', and 'llama3'.",
        )
1114
1115
1116
1117
1118
        parser.add_argument(
            "--enable-hierarchical-cache",
            action="store_true",
            help="Enable hierarchical cache",
        )
1119
1120
1121
1122
1123
1124
        parser.add_argument(
            "--hicache-ratio",
            type=float,
            default=ServerArgs.hicache_ratio,
            help="The ratio of the size of host KV cache memory pool to the size of device pool.",
        )
Zhiqiang Xie's avatar
Zhiqiang Xie committed
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
        parser.add_argument(
            "--hicache-size",
            type=int,
            default=ServerArgs.hicache_size,
            help="The size of host KV cache memory pool in gigabytes, which will override the hicache_ratio if set.",
        )
        parser.add_argument(
            "--hicache-write-policy",
            type=str,
            choices=["write_back", "write_through", "write_through_selective"],
            default=ServerArgs.hicache_write_policy,
            help="The write policy of hierarchical cache.",
        )
1138
1139
1140
1141
1142
        parser.add_argument(
            "--enable-deepep-moe",
            action="store_true",
            help="Enabling DeepEP MoE implementation for EP MoE.",
        )
1143
1144
1145
1146
1147
1148
        parser.add_argument(
            "--moe-dense-tp-size",
            type=int,
            default=ServerArgs.moe_dense_tp_size,
            help="TP size for MoE dense MLP layers. This flag is useful when, with large TP size, there are errors caused by weights in MLP layers having dimension smaller than the min dimension GEMM supports.",
        )
1149
1150
1151
1152
        parser.add_argument(
            "--deepep-mode",
            type=str,
            choices=["normal", "low_latency", "auto"],
1153
            default="auto",
1154
1155
            help="Select the mode when enable DeepEP MoE, could be `normal`, `low_latency` or `auto`. Default is `auto`, which means `low_latency` for decode batch and `normal` for prefill batch.",
        )
1156

1157
1158
1159
        parser.add_argument(
            "--n-share-experts-fusion",
            type=int,
1160
            default=0,
1161
1162
            help="The number of shared_experts need to be replicated to fuse with normal experts in deepseek v3/r1, "
            "set it to tp_size can get best optimized performace.",
1163
        )
1164
1165
1166
1167
1168
        parser.add_argument(
            "--disable-chunked-prefix-cache",
            action="store_true",
            help="Disable chunked prefix cache feature for deepseek, which should save overhead for short sequences.",
        )
Lianmin Zheng's avatar
Lianmin Zheng committed
1169
1170
1171
1172
1173
        parser.add_argument(
            "--disable-fast-image-processor",
            action="store_true",
            help="Adopt base image processor instead of fast image processor.",
        )
1174

1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
        # Server warmups
        parser.add_argument(
            "--warmups",
            type=str,
            required=False,
            help="Specify custom warmup functions (csv) to run before server starts eg. --warmups=warmup_name1,warmup_name2 "
            "will run the functions `warmup_name1` and `warmup_name2` specified in warmup.py before the server starts listening for requests",
        )

        # Debug tensor dumps
        parser.add_argument(
            "--debug-tensor-dump-output-folder",
            type=str,
            default=ServerArgs.debug_tensor_dump_output_folder,
            help="The output folder for dumping tensors.",
        )
        parser.add_argument(
            "--debug-tensor-dump-input-file",
            type=str,
            default=ServerArgs.debug_tensor_dump_input_file,
            help="The input filename for dumping tensors",
        )
        parser.add_argument(
            "--debug-tensor-dump-inject",
            type=str,
            default=ServerArgs.debug_tensor_dump_inject,
            help="Inject the outputs from jax as the input of every layer.",
        )

Byron Hsu's avatar
Byron Hsu committed
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
        # Disaggregation
        parser.add_argument(
            "--disaggregation-mode",
            type=str,
            default="null",
            choices=["null", "prefill", "decode"],
            help='Only used for PD disaggregation. "prefill" for prefill-only server, and "decode" for decode-only server. If not specified, it is not PD disaggregated',
        )
        parser.add_argument(
            "--disaggregation-bootstrap-port",
            type=int,
            default=ServerArgs.disaggregation_bootstrap_port,
            help="Bootstrap server port on the prefill server. Default is 8998.",
        )
1218
1219
1220
1221
        parser.add_argument(
            "--disaggregation-transfer-backend",
            type=str,
            default=ServerArgs.disaggregation_transfer_backend,
1222
            choices=["mooncake", "nixl"],
1223
1224
            help="The backend for disaggregation transfer. Default is mooncake.",
        )
1225
1226
1227
1228
1229
1230
        parser.add_argument(
            "--disaggregation-ib-device",
            type=str,
            default=ServerArgs.disaggregation_ib_device,
            help="The ib device for disaggregation transfer. Default is None, it will be detected automatically if using the mooncake backend.",
        )
Byron Hsu's avatar
Byron Hsu committed
1231

Lianmin Zheng's avatar
Lianmin Zheng committed
1232
1233
    @classmethod
    def from_cli_args(cls, args: argparse.Namespace):
1234
1235
        args.tp_size = args.tensor_parallel_size
        args.dp_size = args.data_parallel_size
xiaobochen's avatar
xiaobochen committed
1236
        args.ep_size = args.expert_parallel_size
Lianmin Zheng's avatar
Lianmin Zheng committed
1237
1238
1239
1240
        attrs = [attr.name for attr in dataclasses.fields(cls)]
        return cls(**{attr: getattr(args, attr) for attr in attrs})

    def url(self):
1241
        if is_valid_ipv6_address(self.host):
1242
1243
1244
            return f"http://[{self.host}]:{self.port}"
        else:
            return f"http://{self.host}:{self.port}"
Lianmin Zheng's avatar
Lianmin Zheng committed
1245

1246
1247
1248
1249
1250
    def check_server_args(self):
        assert (
            self.tp_size % self.nnodes == 0
        ), "tp_size must be divisible by number of nodes"
        assert not (
1251
1252
            self.dp_size > 1 and self.nnodes != 1 and not self.enable_dp_attention
        ), "multi-node data parallel is not supported unless dp attention!"
1253
1254
1255
1256
1257
        assert (
            self.max_loras_per_batch > 0
            # FIXME
            and (self.lora_paths is None or self.disable_radix_cache)
        ), "compatibility of lora and cuda graph and radix attention is in progress"
1258
        assert self.base_gpu_id >= 0, "base_gpu_id must be non-negative"
1259
        assert self.gpu_id_step >= 1, "gpu_id_step must be positive"
1260

1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
        if isinstance(self.lora_paths, list):
            lora_paths = self.lora_paths
            self.lora_paths = {}
            for lora_path in lora_paths:
                if "=" in lora_path:
                    name, path = lora_path.split("=", 1)
                    self.lora_paths[name] = path
                else:
                    self.lora_paths[lora_path] = lora_path

Lianmin Zheng's avatar
Lianmin Zheng committed
1271

Lianmin Zheng's avatar
Lianmin Zheng committed
1272
def prepare_server_args(argv: List[str]) -> ServerArgs:
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
    """
    Prepare the server arguments from the command line arguments.

    Args:
        args: The command line arguments. Typically, it should be `sys.argv[1:]`
            to ensure compatibility with `parse_args` when no arguments are passed.

    Returns:
        The server arguments.
    """
    parser = argparse.ArgumentParser()
    ServerArgs.add_cli_args(parser)
Lianmin Zheng's avatar
Lianmin Zheng committed
1285
    raw_args = parser.parse_args(argv)
1286
1287
1288
1289
    server_args = ServerArgs.from_cli_args(raw_args)
    return server_args


1290
1291
1292
ZMQ_TCP_PORT_DELTA = 233


Lianmin Zheng's avatar
Lianmin Zheng committed
1293
1294
@dataclasses.dataclass
class PortArgs:
1295
1296
1297
1298
1299
1300
    # The ipc filename for tokenizer to receive inputs from detokenizer (zmq)
    tokenizer_ipc_name: str
    # The ipc filename for scheduler (rank 0) to receive inputs from tokenizer (zmq)
    scheduler_input_ipc_name: str
    # The ipc filename for detokenizer to receive inputs from scheduler (zmq)
    detokenizer_ipc_name: str
1301

1302
1303
    # The port for nccl initialization (torch.dist)
    nccl_port: int
1304

1305
1306
1307
    # The ipc filename for rpc call between Engine and Scheduler
    rpc_ipc_name: str

1308
    @staticmethod
1309
    def init_new(server_args, dp_rank: Optional[int] = None) -> "PortArgs":
1310
        port = server_args.port + random.randint(100, 1000)
1311
1312
1313
        while True:
            if is_port_available(port):
                break
TianYu GUO's avatar
TianYu GUO committed
1314
1315
1316
1317
            if port < 60000:
                port += 42
            else:
                port -= 43
1318

1319
1320
1321
1322
1323
1324
1325
        if not server_args.enable_dp_attention:
            # Normal case, use IPC within a single node
            return PortArgs(
                tokenizer_ipc_name=f"ipc://{tempfile.NamedTemporaryFile(delete=False).name}",
                scheduler_input_ipc_name=f"ipc://{tempfile.NamedTemporaryFile(delete=False).name}",
                detokenizer_ipc_name=f"ipc://{tempfile.NamedTemporaryFile(delete=False).name}",
                nccl_port=port,
1326
                rpc_ipc_name=f"ipc://{tempfile.NamedTemporaryFile(delete=False).name}",
1327
1328
1329
1330
1331
            )
        else:
            # DP attention. Use TCP + port to handle both single-node and multi-node.
            if server_args.nnodes == 1 and server_args.dist_init_addr is None:
                dist_init_addr = ("127.0.0.1", server_args.port + ZMQ_TCP_PORT_DELTA)
Vincent's avatar
Vincent committed
1332
1333
1334
            elif server_args.dist_init_addr.startswith("["):  # ipv6 address
                port_num, host = configure_ipv6(server_args.dist_init_addr)
                dist_init_addr = (host, str(port_num))
1335
1336
            else:
                dist_init_addr = server_args.dist_init_addr.split(":")
Vincent's avatar
Vincent committed
1337

1338
1339
1340
1341
1342
1343
1344
1345
            assert (
                len(dist_init_addr) == 2
            ), "please provide --dist-init-addr as host:port of head node"

            dist_init_host, dist_init_port = dist_init_addr
            port_base = int(dist_init_port) + 1
            if dp_rank is None:
                scheduler_input_port = (
1346
                    port_base + 3
1347
                )  # TokenizerManager to DataParallelController
1348
            else:
1349
                scheduler_input_port = port_base + 3 + 1 + dp_rank
1350
1351
1352
1353
1354
1355

            return PortArgs(
                tokenizer_ipc_name=f"tcp://{dist_init_host}:{port_base}",
                scheduler_input_ipc_name=f"tcp://{dist_init_host}:{scheduler_input_port}",
                detokenizer_ipc_name=f"tcp://{dist_init_host}:{port_base + 1}",
                nccl_port=port,
1356
                rpc_ipc_name=f"tcp://{dist_init_host}:{port_base + 2}",
1357
            )
1358

1359
1360
1361
1362
1363
1364
1365
1366
1367
1368

class LoRAPathAction(argparse.Action):
    def __call__(self, parser, namespace, values, option_string=None):
        setattr(namespace, self.dest, {})
        for lora_path in values:
            if "=" in lora_path:
                name, path = lora_path.split("=", 1)
                getattr(namespace, self.dest)[name] = path
            else:
                getattr(namespace, self.dest)[lora_path] = lora_path
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378


class DeprecatedAction(argparse.Action):
    def __init__(self, option_strings, dest, nargs=0, **kwargs):
        super(DeprecatedAction, self).__init__(
            option_strings, dest, nargs=nargs, **kwargs
        )

    def __call__(self, parser, namespace, values, option_string=None):
        raise ValueError(self.help)
1379
1380


1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
def get_model_arch(args: ServerArgs):
    hf_config = get_config(
        args.model_path,
        trust_remote_code=args.trust_remote_code,
        revision=args.revision,
        model_override_args=json.loads(args.json_model_override_args),
    )
    return hf_config.architectures[0]


def auto_choose_speculative_params(arch: str):
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
    """
    Automatically choose the parameters for speculative decoding.

    You can tune them on your own models and prompts with scripts/playground/bench_speculative.py
    """
    if arch in ["LlamaForCausalLM"]:
        # The default value for llama
        return (5, 4, 8)
    elif arch in ["DeepseekV3ForCausalLM", "DeepseekV2ForCausalLM"]:
        # The default value for deepseek
        return (5, 4, 8)
    elif arch in ["Grok1ForCausalLM", "Grok1VForCausalLM"]:
        return (5, 4, 8)
    else:
        # The default value for all other models
        return (5, 4, 8)