# TODO: moe_fused_gate kernel is not supported for n_share_experts_fusion > 0 now.
# TODO: moe_fused_gate kernel is not supported for n_share_experts_fusion > 0 now.
if(
if(
_is_cuda
_is_cuda
andgating_output.shape[1]//num_expert_group
<=32# moe_fused_gate kernel ensure that num_experts/num_expert_group does not exceed MAX_VPT=32 now. And when kernel can handle MAX_VPT > 32, we can remove this assertion.