Unverified Commit c3948ba6 authored by Ke Bao's avatar Ke Bao Committed by GitHub
Browse files

Reorder loop in shared expert weight loading (#5719)

parent 269c457e
......@@ -215,11 +215,11 @@ class DeepseekV3ForCausalLMNextN(DeepseekV3ForCausalLM):
"up_proj.weight_scale_inv",
]
names_to_remove = []
for num_repeat in range(self.n_share_experts_fusion):
for suffix in suffix_list:
shared_expert_weight_name = (
f"model.layers.0.mlp.shared_experts.{suffix}"
)
for num_repeat in range(self.n_share_experts_fusion):
weights_list.append(
(
f"model.layers.0."
......
......@@ -1650,11 +1650,11 @@ class DeepseekV2ForCausalLM(nn.Module):
desc=f"Cloning {self.n_share_experts_fusion} "
"replicas of the shared expert into MoE",
):
for num_repeat in range(self.n_share_experts_fusion):
for suffix in suffix_list:
shared_expert_weight_name = (
f"model.layers.{moe_layer}.mlp.shared_experts.{suffix}"
)
for num_repeat in range(self.n_share_experts_fusion):
weights_list.append(
(
f"model.layers.{moe_layer}."
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment