moe.py 435 Bytes
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
import torch


def moe_align_block_size(
    topk_ids,
    num_experts,
    block_size,
    sorted_token_ids,
    experts_ids,
    num_tokens_post_pad,
    token_cnts_buffer,
    cumsum_buffer,
):
14
    torch.ops.sgl_kernel.moe_align_block_size(
15
16
17
18
19
20
21
22
23
        topk_ids,
        num_experts,
        block_size,
        sorted_token_ids,
        experts_ids,
        num_tokens_post_pad,
        token_cnts_buffer,
        cumsum_buffer,
    )