Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
d88ef4a3
Unverified
Commit
d88ef4a3
authored
Sep 20, 2025
by
Jinyan Chen
Committed by
GitHub
Sep 19, 2025
Browse files
limit sgl-kernel causal conv1d to cuda only (#10648)
Co-authored-by:
Jinyan Chen
<
jinyanc@nvidia.com
>
parent
6f993e8b
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
9 additions
and
6 deletions
+9
-6
python/sglang/srt/layers/attention/hybrid_linear_attn_backend.py
...sglang/srt/layers/attention/hybrid_linear_attn_backend.py
+9
-6
No files found.
python/sglang/srt/layers/attention/hybrid_linear_attn_backend.py
View file @
d88ef4a3
...
@@ -13,9 +13,6 @@ from sglang.srt.layers.attention.fla.fused_recurrent import (
...
@@ -13,9 +13,6 @@ from sglang.srt.layers.attention.fla.fused_recurrent import (
from
sglang.srt.layers.attention.fla.fused_sigmoid_gating_recurrent
import
(
from
sglang.srt.layers.attention.fla.fused_sigmoid_gating_recurrent
import
(
fused_sigmoid_gating_delta_rule_update
,
fused_sigmoid_gating_delta_rule_update
,
)
)
from
sglang.srt.layers.attention.mamba.causal_conv1d
import
(
causal_conv1d_fn
as
causal_conv1d_fn_sgl
,
)
from
sglang.srt.layers.attention.mamba.causal_conv1d_triton
import
(
from
sglang.srt.layers.attention.mamba.causal_conv1d_triton
import
(
causal_conv1d_fn
,
causal_conv1d_fn
,
causal_conv1d_update
,
causal_conv1d_update
,
...
@@ -26,9 +23,15 @@ from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMo
...
@@ -26,9 +23,15 @@ from sglang.srt.model_executor.forward_batch_info import ForwardBatch, ForwardMo
from
sglang.srt.model_executor.model_runner
import
ModelRunner
from
sglang.srt.model_executor.model_runner
import
ModelRunner
from
sglang.srt.models.qwen3_next
import
Qwen3HybridLinearDecoderLayer
,
fused_gdn_gating
from
sglang.srt.models.qwen3_next
import
Qwen3HybridLinearDecoderLayer
,
fused_gdn_gating
from
sglang.srt.speculative.eagle_utils
import
EagleDraftInput
,
EagleVerifyInput
from
sglang.srt.speculative.eagle_utils
import
EagleDraftInput
,
EagleVerifyInput
from
sglang.srt.utils
import
is_npu
from
sglang.srt.utils
import
is_cuda
,
is_npu
if
is_cuda
():
from
sglang.srt.layers.attention.mamba.causal_conv1d
import
(
causal_conv1d_fn
as
causal_conv1d_fn_cuda
,
)
if
is_npu
():
causal_conv1d_fn
=
causal_conv1d_fn_cuda
elif
is_npu
():
from
sgl_kernel_npu.fla.chunk
import
chunk_gated_delta_rule_npu
from
sgl_kernel_npu.fla.chunk
import
chunk_gated_delta_rule_npu
from
sgl_kernel_npu.fla.fused_sigmoid_gating_recurrent
import
(
from
sgl_kernel_npu.fla.fused_sigmoid_gating_recurrent
import
(
fused_sigmoid_gating_delta_rule_update_npu
,
fused_sigmoid_gating_delta_rule_update_npu
,
...
@@ -350,7 +353,7 @@ class MambaAttnBackend(AttentionBackend):
...
@@ -350,7 +353,7 @@ class MambaAttnBackend(AttentionBackend):
mixed_qkv_processed
.
transpose
(
1
,
2
).
contiguous
().
view
(
seq_len
,
-
1
)
mixed_qkv_processed
.
transpose
(
1
,
2
).
contiguous
().
view
(
seq_len
,
-
1
)
)
)
else
:
else
:
mixed_qkv
=
causal_conv1d_fn
_sgl
(
mixed_qkv
=
causal_conv1d_fn
(
mixed_qkv
.
transpose
(
0
,
1
),
mixed_qkv
.
transpose
(
0
,
1
),
conv_weights
,
conv_weights
,
bias
,
bias
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment