Unverified Commit 81964328 authored by Cheng Wan's avatar Cheng Wan Committed by GitHub
Browse files

Set `num_fused_shared_experts` as `num_shared_experts` when shared_experts...

Set `num_fused_shared_experts` as `num_shared_experts` when shared_experts fusion is not disabled (#6736)
parent f0f84975
......@@ -51,8 +51,8 @@ def moe_fused_gate(
# the #experts is decided by the input tensor shape and we currently only support power of 2 #experts
# and #experts should be divisible by num_expert_group. #expert/num_expert_group <= 32 is limited for now.
# for non-supported case, we suggest to use the biased_grouped_topk func in sglang.srt.layers.moe.topk
# num_fused_shared_experts: if > 0, the last expert will be replaced with a round-robin shared expert
# routed_scaling_factor: if > 0, the last expert will be scaled by this factor
# num_fused_shared_experts: if > 0, the last several experts will be replaced with shared experts
# routed_scaling_factor: if > 0, the shared experts will be scaled by this factor
return torch.ops.sgl_kernel.moe_fused_gate.default(
input_tensor,
bias,
......
......@@ -19,7 +19,7 @@ from sglang.srt.layers.moe.topk import biased_grouped_topk
(512, 16, 8, 16),
],
)
@pytest.mark.parametrize("num_fused_shared_experts", [0, 1])
@pytest.mark.parametrize("num_fused_shared_experts", [0, 1, 2])
def test_moe_fused_gate_combined(seq_length, dtype, params, num_fused_shared_experts):
num_experts, num_expert_group, topk_group, topk = params
......@@ -27,7 +27,7 @@ def test_moe_fused_gate_combined(seq_length, dtype, params, num_fused_shared_exp
tensor = torch.rand((seq_length, num_experts)).to(dtype).cuda()
scores = tensor.clone()
bias = torch.rand(num_experts).to(dtype).cuda()
topk = topk + min(1, num_fused_shared_experts)
topk = topk + num_fused_shared_experts
output, indices = moe_fused_gate(
tensor,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment