Unverified Commit d88ef4a3 authored by Jinyan Chen's avatar Jinyan Chen Committed by GitHub
Browse files

limit sgl-kernel causal conv1d to cuda only (#10648)


Co-authored-by: default avatarJinyan Chen <jinyanc@nvidia.com>
parent 6f993e8b
...@@ -13,9 +13,6 @@ from sglang.srt.layers.attention.fla.fused_recurrent import ( ...@@ -13,9 +13,6 @@ from sglang.srt.layers.attention.fla.fused_recurrent import (
from sglang.srt.layers.attention.fla.fused_sigmoid_gating_recurrent import ( from sglang.srt.layers.attention.fla.fused_sigmoid_gating_recurrent import (
fused_sigmoid_gating_delta_rule_update, fused_sigmoid_gating_delta_rule_update,
) )
from sglang.srt.layers.attention.mamba.causal_conv1d import (
causal_conv1d_fn as causal_conv1d_fn_sgl,
)
from sglang.srt.layers.attention.mamba.causal_conv1d_triton import ( from sglang.srt.layers.attention.mamba.causal_conv1d_triton import (
causal_conv1d_fn, causal_conv1d_fn,
causal_conv1d_update, causal_conv1d_update,
...@@ -26,9 +23,15 @@ from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMo ...@@ -26,9 +23,15 @@ from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMo
from sglang.srt.model_executor.model_runner import ModelRunner from sglang.srt.model_executor.model_runner import ModelRunner
from sglang.srt.models.qwen3_next import Qwen3HybridLinearDecoderLayer, fused_gdn_gating from sglang.srt.models.qwen3_next import Qwen3HybridLinearDecoderLayer, fused_gdn_gating
from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput from sglang.srt.speculative.eagle_utils import EagleDraftInput, EagleVerifyInput
from sglang.srt.utils import is_npu from sglang.srt.utils import is_cuda, is_npu
if is_cuda():
from sglang.srt.layers.attention.mamba.causal_conv1d import (
causal_conv1d_fn as causal_conv1d_fn_cuda,
)
if is_npu(): causal_conv1d_fn = causal_conv1d_fn_cuda
elif is_npu():
from sgl_kernel_npu.fla.chunk import chunk_gated_delta_rule_npu from sgl_kernel_npu.fla.chunk import chunk_gated_delta_rule_npu
from sgl_kernel_npu.fla.fused_sigmoid_gating_recurrent import ( from sgl_kernel_npu.fla.fused_sigmoid_gating_recurrent import (
fused_sigmoid_gating_delta_rule_update_npu, fused_sigmoid_gating_delta_rule_update_npu,
...@@ -350,7 +353,7 @@ class MambaAttnBackend(AttentionBackend): ...@@ -350,7 +353,7 @@ class MambaAttnBackend(AttentionBackend):
mixed_qkv_processed.transpose(1, 2).contiguous().view(seq_len, -1) mixed_qkv_processed.transpose(1, 2).contiguous().view(seq_len, -1)
) )
else: else:
mixed_qkv = causal_conv1d_fn_sgl( mixed_qkv = causal_conv1d_fn(
mixed_qkv.transpose(0, 1), mixed_qkv.transpose(0, 1),
conv_weights, conv_weights,
bias, bias,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment