__init__.py 2.86 KB
Newer Older
1
2
import ctypes
import os
3
import platform
4

5
6
import torch

7
8
9
10
11
SYSTEM_ARCH = platform.machine()

cuda_path = f"/usr/local/cuda/targets/{SYSTEM_ARCH}-linux/lib/libcudart.so.12"
if os.path.exists(cuda_path):
    ctypes.CDLL(cuda_path, mode=ctypes.RTLD_GLOBAL)
12

13
14
from sgl_kernel import common_ops
from sgl_kernel.allreduce import *
15
16
17
18
from sgl_kernel.attention import (
    cutlass_mla_decode,
    cutlass_mla_get_workspace_size,
    lightning_attention_decode,
Yineng Zhang's avatar
Yineng Zhang committed
19
    merge_state,
20
    merge_state_v2,
21
)
22
from sgl_kernel.cutlass_moe import cutlass_w4a8_moe_mm, get_cutlass_w4a8_moe_mm_data
23
from sgl_kernel.elementwise import (
24
    FusedSetKVBufferArg,
25
26
27
28
29
30
31
32
33
    apply_rope_with_cos_sin_cache_inplace,
    fused_add_rmsnorm,
    gelu_and_mul,
    gelu_tanh_and_mul,
    gemma_fused_add_rmsnorm,
    gemma_rmsnorm,
    rmsnorm,
    silu_and_mul,
)
34
35
36
37

if torch.version.hip is not None:
    from sgl_kernel.elementwise import gelu_quick

38
from sgl_kernel.fused_moe import fused_marlin_moe
39
from sgl_kernel.gemm import (
40
    awq_dequantize,
41
    bmm_fp8,
Trevor Morris's avatar
Trevor Morris committed
42
    cutlass_scaled_fp4_mm,
43
    dsv3_fused_a_gemm,
44
    dsv3_router_gemm,
45
46
    fp8_blockwise_scaled_mm,
    fp8_scaled_mm,
47
48
49
    gptq_gemm,
    gptq_marlin_gemm,
    gptq_shuffle,
50
    int8_scaled_mm,
HandH1998's avatar
HandH1998 committed
51
52
    qserve_w4a8_per_chn_gemm,
    qserve_w4a8_per_group_gemm,
53
    scaled_fp4_experts_quant,
Trevor Morris's avatar
Trevor Morris committed
54
    scaled_fp4_quant,
55
    sgl_per_tensor_quant_fp8,
56
    sgl_per_token_group_quant_fp8,
57
    sgl_per_token_group_quant_int8,
58
    sgl_per_token_quant_fp8,
59
    shuffle_rows,
60
)
61
from sgl_kernel.grammar import apply_token_bitmask_inplace_cuda
62
63
64
65
66
67
from sgl_kernel.kvcacheio import (
    transfer_kv_all_layer,
    transfer_kv_all_layer_mla,
    transfer_kv_per_layer,
    transfer_kv_per_layer_mla,
)
68
69
70
71
72
from sgl_kernel.marlin import (
    awq_marlin_moe_repack,
    awq_marlin_repack,
    gptq_marlin_repack,
)
73
from sgl_kernel.memory import set_kv_buffer_kernel
74
from sgl_kernel.moe import (
75
    apply_shuffle_mul_sum,
76
    cutlass_fp4_group_mm,
77
    ep_moe_post_reorder,
78
    ep_moe_pre_reorder,
79
    ep_moe_silu_and_mul,
80
81
82
    fp8_blockwise_scaled_grouped_mm,
    moe_align_block_size,
    moe_fused_gate,
83
    prepare_moe_input,
84
85
    topk_softmax,
)
86
from sgl_kernel.sampling import (
87
    min_p_sampling_from_probs,
88
    top_k_mask_logits,
89
    top_k_renorm_prob,
90
    top_k_top_p_sampling_from_logits,
91
92
93
94
    top_k_top_p_sampling_from_probs,
    top_p_renorm_prob,
    top_p_sampling_from_probs,
)
95
96
97
98
99
100
101
102
103
104
105
106
107
108


def create_greenctx_stream_by_value(*args, **kwargs):
    from sgl_kernel.spatial import create_greenctx_stream_by_value as _impl

    return _impl(*args, **kwargs)


def get_sm_available(*args, **kwargs):
    from sgl_kernel.spatial import get_sm_available as _impl

    return _impl(*args, **kwargs)


109
from sgl_kernel.speculative import (
110
    build_tree_kernel_efficient,
111
    segment_packbits,
112
    tree_speculative_sampling_target_only,
113
    verify_tree_greedy,
114
)
115
from sgl_kernel.top_k import fast_topk
Lianmin Zheng's avatar
Lianmin Zheng committed
116
from sgl_kernel.version import __version__