Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
55561e25
Unverified
Commit
55561e25
authored
Jun 15, 2025
by
JieXin Liang
Committed by
GitHub
Jun 14, 2025
Browse files
[fix] fix determine_num_fused_shared_experts (#7180)
parent
44733203
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
29 additions
and
47 deletions
+29
-47
python/sglang/srt/models/deepseek_v2.py
python/sglang/srt/models/deepseek_v2.py
+29
-47
No files found.
python/sglang/srt/models/deepseek_v2.py
View file @
55561e25
...
@@ -1709,53 +1709,35 @@ class DeepseekV2ForCausalLM(nn.Module):
...
@@ -1709,53 +1709,35 @@ class DeepseekV2ForCausalLM(nn.Module):
def
determine_num_fused_shared_experts
(
def
determine_num_fused_shared_experts
(
self
,
architecture
:
str
=
"DeepseekV3ForCausalLM"
self
,
architecture
:
str
=
"DeepseekV3ForCausalLM"
):
):
self
.
num_fused_shared_experts
=
(
self
.
num_fused_shared_experts
=
0
0
if
global_server_args_dict
[
"disable_shared_experts_fusion"
]:
if
global_server_args_dict
[
"disable_shared_experts_fusion"
]
return
else
self
.
config
.
n_shared_experts
)
# Only Deepseek V3/R1 can use shared experts fusion optimization now.
if
self
.
num_fused_shared_experts
>
0
:
disable_reason
=
None
# Only Deepseek V3/R1 can use shared experts fusion optimization now.
if
(
if
(
not
_is_cuda
not
_is_cuda
or
torch
.
cuda
.
get_device_capability
(
"cuda"
)
<
(
9
,
0
)
or
self
.
config
.
architectures
[
0
]
!=
architecture
or
self
.
config
.
architectures
[
0
]
!=
architecture
or
self
.
config
.
n_routed_experts
!=
256
or
self
.
config
.
n_routed_experts
!=
256
):
or
self
.
config
.
n_shared_experts
!=
1
self
.
num_fused_shared_experts
=
0
):
global_server_args_dict
[
"disable_shared_experts_fusion"
]
=
True
disable_reason
=
"Only Deepseek V3/R1 on NV-platform with capability >= 90 can use shared experts fusion optimization."
log_info_on_rank0
(
elif
(
logger
,
global_server_args_dict
[
"enable_deepep_moe"
]
"Only Deepseek V3/R1 on NV-platform can use shared experts fusion optimization. Shared experts fusion optimization is disabled."
,
or
global_server_args_dict
[
"enable_ep_moe"
]
)
):
elif
(
disable_reason
=
"Deepseek V3/R1 can not use shared experts fusion optimization when in deepep_moe or ep_moe mode."
global_server_args_dict
[
"enable_deepep_moe"
]
or
global_server_args_dict
[
"enable_ep_moe"
]
if
disable_reason
is
not
None
:
):
global_server_args_dict
[
"disable_shared_experts_fusion"
]
=
True
self
.
num_fused_shared_experts
=
0
log_info_on_rank0
(
global_server_args_dict
[
"disable_shared_experts_fusion"
]
=
True
logger
,
log_info_on_rank0
(
f
"
{
disable_reason
}
Shared experts fusion optimization is disabled."
,
logger
,
)
"Deepseek V3/R1 can not use shared experts fusion optimization when in deepep_moe or ep_moe mode. Shared experts fusion optimization is disabled."
,
return
)
elif
self
.
num_fused_shared_experts
==
0
:
self
.
num_fused_shared_experts
=
self
.
config
.
n_shared_experts
if
(
_is_cuda
and
torch
.
cuda
.
get_device_capability
(
"cuda"
)
>=
(
9
,
0
)
and
self
.
config
.
architectures
[
0
]
==
architecture
and
self
.
config
.
n_routed_experts
==
256
and
(
not
(
global_server_args_dict
[
"enable_deepep_moe"
]
or
global_server_args_dict
[
"enable_ep_moe"
]
)
)
):
self
.
num_fused_shared_experts
=
self
.
config
.
n_shared_experts
global_server_args_dict
[
"disable_shared_experts_fusion"
]
=
False
log_info_on_rank0
(
logger
,
"Deepseek V3/R1 with fp8/fp4 can use shared experts fusion optimization when SM version >=90. Shared experts fusion optimization is enabled."
,
)
def
get_input_embeddings
(
self
)
->
nn
.
Embedding
:
def
get_input_embeddings
(
self
)
->
nn
.
Embedding
:
return
self
.
model
.
embed_tokens
return
self
.
model
.
embed_tokens
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment