Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
27778010
"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "87cf88ed3d351cc4e2b7cdd462ddf7a4ebf2109e"
Unverified
Commit
27778010
authored
Sep 12, 2025
by
Yi Zhang
Committed by
GitHub
Sep 11, 2025
Browse files
fix dual stream bug (#10352)
parent
46d8fb1c
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
1 deletion
+3
-1
python/sglang/srt/models/qwen2_moe.py
python/sglang/srt/models/qwen2_moe.py
+3
-1
No files found.
python/sglang/srt/models/qwen2_moe.py
View file @
27778010
...
@@ -62,6 +62,7 @@ from sglang.srt.layers.vocab_parallel_embedding import (
...
@@ -62,6 +62,7 @@ from sglang.srt.layers.vocab_parallel_embedding import (
VocabParallelEmbedding
,
VocabParallelEmbedding
,
)
)
from
sglang.srt.managers.schedule_batch
import
global_server_args_dict
from
sglang.srt.managers.schedule_batch
import
global_server_args_dict
from
sglang.srt.model_executor.cuda_graph_runner
import
get_is_capture_mode
from
sglang.srt.model_executor.forward_batch_info
import
ForwardBatch
,
PPProxyTensors
from
sglang.srt.model_executor.forward_batch_info
import
ForwardBatch
,
PPProxyTensors
from
sglang.srt.model_loader.weight_utils
import
default_weight_loader
from
sglang.srt.model_loader.weight_utils
import
default_weight_loader
from
sglang.srt.two_batch_overlap
import
model_forward_maybe_tbo
from
sglang.srt.two_batch_overlap
import
model_forward_maybe_tbo
...
@@ -194,7 +195,7 @@ class Qwen2MoeSparseMoeBlock(nn.Module):
...
@@ -194,7 +195,7 @@ class Qwen2MoeSparseMoeBlock(nn.Module):
)
->
torch
.
Tensor
:
)
->
torch
.
Tensor
:
current_stream
=
torch
.
cuda
.
current_stream
()
current_stream
=
torch
.
cuda
.
current_stream
()
self
.
alt_stream
.
wait_stream
(
current_stream
)
self
.
alt_stream
.
wait_stream
(
current_stream
)
shared_output
=
self
.
_forward_shared_experts
(
hidden_states
)
shared_output
=
self
.
_forward_shared_experts
(
hidden_states
.
clone
()
)
with
torch
.
cuda
.
stream
(
self
.
alt_stream
):
with
torch
.
cuda
.
stream
(
self
.
alt_stream
):
router_output
=
self
.
_forward_router_experts
(
hidden_states
)
router_output
=
self
.
_forward_router_experts
(
hidden_states
)
...
@@ -217,6 +218,7 @@ class Qwen2MoeSparseMoeBlock(nn.Module):
...
@@ -217,6 +218,7 @@ class Qwen2MoeSparseMoeBlock(nn.Module):
self
.
alt_stream
is
not
None
self
.
alt_stream
is
not
None
and
hidden_states
.
shape
[
0
]
>
0
and
hidden_states
.
shape
[
0
]
>
0
and
hidden_states
.
shape
[
0
]
<=
DUAL_STREAM_TOKEN_THRESHOLD
and
hidden_states
.
shape
[
0
]
<=
DUAL_STREAM_TOKEN_THRESHOLD
and
get_is_capture_mode
()
):
):
final_hidden_states
,
shared_output
=
self
.
forward_normal_dual_stream
(
final_hidden_states
,
shared_output
=
self
.
forward_normal_dual_stream
(
hidden_states
hidden_states
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment