Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
063c3791
Unverified
Commit
063c3791
authored
Sep 23, 2025
by
fzyzcjy
Committed by
GitHub
Sep 22, 2025
Browse files
Fix trtllm_mla slow concat kernel in MTP (#10777)
parent
632b7d8c
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
9 additions
and
5 deletions
+9
-5
python/sglang/srt/layers/attention/trtllm_mla_backend.py
python/sglang/srt/layers/attention/trtllm_mla_backend.py
+9
-5
No files found.
python/sglang/srt/layers/attention/trtllm_mla_backend.py
View file @
063c3791
...
...
@@ -505,10 +505,7 @@ class TRTLLMMLABackend(FlashInferMLAAttnBackend):
q_rope_reshaped
=
q_rope
.
view
(
-
1
,
layer
.
tp_q_head_num
,
layer
.
head_dim
-
layer
.
v_head_dim
)
if
_is_cuda
and
q_nope
.
shape
[
-
1
]
==
512
and
q_rope_reshaped
.
shape
[
-
1
]
==
64
:
query
=
concat_mla_absorb_q
(
q_nope
,
q_rope_reshaped
)
else
:
query
=
torch
.
cat
([
q_nope
,
q_rope_reshaped
],
dim
=-
1
)
query
=
_concat_mla_absorb_q_general
(
q_nope
,
q_rope_reshaped
)
else
:
# For FP8 path, we already have the query and rope parts merged because of the quantize_and_rope_for_fp8 function
query
=
q
.
view
(
-
1
,
layer
.
tp_q_head_num
,
layer
.
head_dim
)
...
...
@@ -591,7 +588,7 @@ class TRTLLMMLABackend(FlashInferMLAAttnBackend):
q_rope
=
q_rope
.
view
(
-
1
,
layer
.
tp_q_head_num
,
layer
.
head_dim
-
layer
.
v_head_dim
)
q
=
torch
.
cat
([
q
,
q_rope
],
dim
=-
1
)
q
=
_concat_mla_absorb_q_general
(
q
,
q_rope
)
q
=
q
.
view
(
-
1
,
layer
.
tp_q_head_num
,
layer
.
head_dim
)
...
...
@@ -716,3 +713,10 @@ class TRTLLMMLAMultiStepDraftBackend(FlashInferMLAMultiStepDraftBackend):
kv_indptr_buf
=
self
.
kv_indptr
[
i
],
q_indptr_decode_buf
=
self
.
q_indptr_decode
,
)
def
_concat_mla_absorb_q_general
(
q_nope
,
q_rope
):
if
_is_cuda
and
q_nope
.
shape
[
-
1
]
==
512
and
q_rope
.
shape
[
-
1
]
==
64
:
return
concat_mla_absorb_q
(
q_nope
,
q_rope
)
else
:
return
torch
.
cat
([
q_nope
,
q_rope
],
dim
=-
1
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment