moe.py 1.51 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
import torch


def moe_align_block_size(
    topk_ids,
    num_experts,
    block_size,
    sorted_token_ids,
    experts_ids,
    num_tokens_post_pad,
    token_cnts_buffer,
    cumsum_buffer,
):
14
    torch.ops.sgl_kernel.moe_align_block_size.default(
15
16
17
18
19
20
21
22
23
        topk_ids,
        num_experts,
        block_size,
        sorted_token_ids,
        experts_ids,
        num_tokens_post_pad,
        token_cnts_buffer,
        cumsum_buffer,
    )
24
25
26
27
28
29
30
31


def topk_softmax(
    topk_weights: torch.Tensor,
    topk_ids: torch.Tensor,
    token_expert_indices: torch.Tensor,
    gating_output: float,
) -> None:
32
    torch.ops.sgl_kernel.topk_softmax.default(
33
34
        topk_weights, topk_ids, token_expert_indices, gating_output
    )
35
36
37
38
39
40
41
42
43
44
45
46


def moe_fused_gate(input_tensor, bias, num_expert_group, topk_group, topk):
    # This fused kernel function is used to select topk expert in a hierarchical 2-layer fashion
    # it split group of expert into num_expert_group, and use top2 expert weight sum in each group
    # as the group weight to select exerpt groups and then select topk experts within the selected groups
    # the #experts is decided by the input tensor shape and we currently only support power of 2 #experts
    # and #experts should be divisible by num_expert_group. #expert/num_expert_group <= 32 is limitted for now.
    # for non-supported case, we suggestion to use the biased_grouped_topk func in sglang.srt.layers.moe.topk
    return torch.ops.sgl_kernel.moe_fused_gate(
        input_tensor, bias, num_expert_group, topk_group, topk
    )