f"Shared experts fusion optimization is default enabled in DeepSeek V3/R1, and n_share_experts_fusion is set to {self.tp_size}. You can tune it by setting --n_share_experts_fusion or disable it by setting --disable_shared_experts_fusion."
# GPU memory is not known yet or no GPU is available.
gpu_mem=None
ifis_hip():
self.disable_shared_experts_fusion=True
# Set mem fraction static, which depends on the tensor parallelism size
ifself.mem_fraction_staticisNone:
ifself.tp_size>=16:
...
...
@@ -1102,6 +1107,19 @@ class ServerArgs:
help="Select the mode when enable DeepEP MoE, could be `normal`, `low_latency` or `auto`. Default is `auto`, which means `low_latency` for decode batch and `normal` for prefill batch.",
)
parser.add_argument(
"--n-share-experts-fusion",
type=int,
default=None,
help="The number of shared_experts need to be replica to fuse with normal experts in deepseek v3/r1 "
"we use tp_size by default.",
)
parser.add_argument(
"--disable-shared-experts-fusion",
action="store_true",
help="Disable shared experts fusion by setting n_share_experts_fusion to 0.",