"vscode:/vscode.git/clone" did not exist on "fbe2fe55785f9873cf582ab27d6489eab7d7b922"
Unverified Commit 55561e25 authored by JieXin Liang's avatar JieXin Liang Committed by GitHub
Browse files

[fix] fix determine_num_fused_shared_experts (#7180)

parent 44733203
...@@ -1709,53 +1709,35 @@ class DeepseekV2ForCausalLM(nn.Module): ...@@ -1709,53 +1709,35 @@ class DeepseekV2ForCausalLM(nn.Module):
def determine_num_fused_shared_experts( def determine_num_fused_shared_experts(
self, architecture: str = "DeepseekV3ForCausalLM" self, architecture: str = "DeepseekV3ForCausalLM"
): ):
self.num_fused_shared_experts = ( self.num_fused_shared_experts = 0
0 if global_server_args_dict["disable_shared_experts_fusion"]:
if global_server_args_dict["disable_shared_experts_fusion"] return
else self.config.n_shared_experts
) # Only Deepseek V3/R1 can use shared experts fusion optimization now.
if self.num_fused_shared_experts > 0: disable_reason = None
# Only Deepseek V3/R1 can use shared experts fusion optimization now. if (
if ( not _is_cuda
not _is_cuda or torch.cuda.get_device_capability("cuda") < (9, 0)
or self.config.architectures[0] != architecture or self.config.architectures[0] != architecture
or self.config.n_routed_experts != 256 or self.config.n_routed_experts != 256
): or self.config.n_shared_experts != 1
self.num_fused_shared_experts = 0 ):
global_server_args_dict["disable_shared_experts_fusion"] = True disable_reason = "Only Deepseek V3/R1 on NV-platform with capability >= 90 can use shared experts fusion optimization."
log_info_on_rank0( elif (
logger, global_server_args_dict["enable_deepep_moe"]
"Only Deepseek V3/R1 on NV-platform can use shared experts fusion optimization. Shared experts fusion optimization is disabled.", or global_server_args_dict["enable_ep_moe"]
) ):
elif ( disable_reason = "Deepseek V3/R1 can not use shared experts fusion optimization when in deepep_moe or ep_moe mode."
global_server_args_dict["enable_deepep_moe"]
or global_server_args_dict["enable_ep_moe"] if disable_reason is not None:
): global_server_args_dict["disable_shared_experts_fusion"] = True
self.num_fused_shared_experts = 0 log_info_on_rank0(
global_server_args_dict["disable_shared_experts_fusion"] = True logger,
log_info_on_rank0( f"{disable_reason} Shared experts fusion optimization is disabled.",
logger, )
"Deepseek V3/R1 can not use shared experts fusion optimization when in deepep_moe or ep_moe mode. Shared experts fusion optimization is disabled.", return
)
elif self.num_fused_shared_experts == 0: self.num_fused_shared_experts = self.config.n_shared_experts
if (
_is_cuda
and torch.cuda.get_device_capability("cuda") >= (9, 0)
and self.config.architectures[0] == architecture
and self.config.n_routed_experts == 256
and (
not (
global_server_args_dict["enable_deepep_moe"]
or global_server_args_dict["enable_ep_moe"]
)
)
):
self.num_fused_shared_experts = self.config.n_shared_experts
global_server_args_dict["disable_shared_experts_fusion"] = False
log_info_on_rank0(
logger,
"Deepseek V3/R1 with fp8/fp4 can use shared experts fusion optimization when SM version >=90. Shared experts fusion optimization is enabled.",
)
def get_input_embeddings(self) -> nn.Embedding: def get_input_embeddings(self) -> nn.Embedding:
return self.model.embed_tokens return self.model.embed_tokens
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment