import torch def moe_align_block_size( topk_ids, num_experts, block_size, sorted_token_ids, experts_ids, num_tokens_post_pad, token_cnts_buffer, cumsum_buffer, ): torch.ops.sgl_kernel.moe_align_block_size.default( topk_ids, num_experts, block_size, sorted_token_ids, experts_ids, num_tokens_post_pad, token_cnts_buffer, cumsum_buffer, ) def topk_softmax( topk_weights: torch.Tensor, topk_ids: torch.Tensor, token_expert_indices: torch.Tensor, gating_output: float, ) -> None: torch.ops.sgl_kernel.topk_softmax.default( topk_weights, topk_ids, token_expert_indices, gating_output ) def moe_fused_gate( input_tensor, bias, num_expert_group, topk_group, topk, n_share_experts_fusion=0, routed_scaling_factor=0, ): # This fused kernel function is used to select topk expert in a hierarchical 2-layer fashion # it split group of expert into num_expert_group, and use top2 expert weight sum in each group # as the group weight to select expert groups and then select topk experts within the selected groups # the #experts is decided by the input tensor shape and we currently only support power of 2 #experts # and #experts should be divisible by num_expert_group. #expert/num_expert_group <= 32 is limited for now. # for non-supported case, we suggest to use the biased_grouped_topk func in sglang.srt.layers.moe.topk # n_share_experts_fusion: if > 0, the last expert will be replaced with a round-robin shared expert # routed_scaling_factor: if > 0, the last expert will be scaled by this factor return torch.ops.sgl_kernel.moe_fused_gate.default( input_tensor, bias, num_expert_group, topk_group, topk, n_share_experts_fusion, routed_scaling_factor, ) def fp8_blockwise_scaled_grouped_mm( output, a_ptrs, b_ptrs, out_ptrs, a_scales_ptrs, b_scales_ptrs, a, b, scales_a, scales_b, stride_a, stride_b, stride_c, layout_sfa, layout_sfb, problem_sizes, expert_offsets, workspace, ): torch.ops.sgl_kernel.fp8_blockwise_scaled_grouped_mm.default( output, a_ptrs, b_ptrs, out_ptrs, a_scales_ptrs, b_scales_ptrs, a, b, scales_a, scales_b, stride_a, stride_b, stride_c, layout_sfa, layout_sfb, problem_sizes, expert_offsets, workspace, ) def prepare_moe_input( topk_ids, expert_offsets, problem_sizes1, problem_sizes2, input_permutation, output_permutation, num_experts, n, k, ): torch.ops.sgl_kernel.prepare_moe_input.default( topk_ids, expert_offsets, problem_sizes1, problem_sizes2, input_permutation, output_permutation, num_experts, n, k, )