Unverified Commit b57d87c2 authored by fzyzcjy's avatar fzyzcjy Committed by GitHub
Browse files

Fix shared experts fusion + weight requant (#7177)

parent 98538822
...@@ -1960,14 +1960,15 @@ class DeepseekV2ForCausalLM(nn.Module): ...@@ -1960,14 +1960,15 @@ class DeepseekV2ForCausalLM(nn.Module):
) )
if layer_id in moe_layers: if layer_id in moe_layers:
shared_experts = layer.mlp.shared_experts shared_experts = getattr(layer.mlp, "shared_experts", None)
for module in [ if shared_experts is not None:
shared_experts.gate_up_proj, for module in [
shared_experts.down_proj, shared_experts.gate_up_proj,
]: shared_experts.down_proj,
requant_weight_ue8m0_inplace( ]:
module.weight, module.weight_scale_inv, weight_block_size requant_weight_ue8m0_inplace(
) module.weight, module.weight_scale_inv, weight_block_size
)
experts = layer.mlp.experts experts = layer.mlp.experts
if isinstance(experts, DeepEPMoE): if isinstance(experts, DeepEPMoE):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment