Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
6c903611
"docker/vscode:/vscode.git/clone" did not exist on "cefc2cf82dbdb5e4f725374420f0f6a91eb69048"
Unverified
Commit
6c903611
authored
Jul 05, 2025
by
Cheng Wan
Committed by
GitHub
Jul 05, 2025
Browse files
Fix incorrect spec_num_draft_tokens in draft_extend (#7757)
parent
77cfea68
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
9 additions
and
1 deletion
+9
-1
python/sglang/srt/layers/dp_attention.py
python/sglang/srt/layers/dp_attention.py
+8
-0
python/sglang/srt/speculative/eagle_worker.py
python/sglang/srt/speculative/eagle_worker.py
+1
-1
No files found.
python/sglang/srt/layers/dp_attention.py
View file @
6c903611
...
...
@@ -237,6 +237,10 @@ def _dp_gather(
assert
(
local_tokens
.
untyped_storage
()
is
not
global_tokens
.
untyped_storage
()
),
"aliasing between global_tokens and local_tokens not allowed"
# NOTE: During draft extend, the gathered_buffer is padded to num_tokens * (speculative_num_steps + 1).
# But the size of local_tokens is total accepted tokens. We need to reduce the local_num_tokens to the
# actual size of the accepted tokens.
if
forward_batch
.
forward_mode
.
is_draft_extend
():
shape_tensor
=
local_num_tokens
.
new_full
((),
local_tokens
.
shape
[
0
])
local_num_tokens
=
torch
.
minimum
(
local_num_tokens
,
shape_tensor
)
...
...
@@ -291,6 +295,10 @@ def dp_scatter(
assert
(
local_tokens
.
untyped_storage
()
is
not
global_tokens
.
untyped_storage
()
),
"aliasing between local_tokens and global_tokens not allowed"
# NOTE: During draft extend, the gathered_buffer is padded to num_tokens * (speculative_num_steps + 1).
# But the size of local_tokens is total accepted tokens. We need to reduce the local_num_tokens to the
# actual size of the accepted tokens.
if
forward_batch
.
forward_mode
.
is_draft_extend
():
shape_tensor
=
local_num_tokens
.
new_full
((),
local_tokens
.
shape
[
0
])
local_num_tokens
=
torch
.
minimum
(
local_num_tokens
,
shape_tensor
)
...
...
python/sglang/srt/speculative/eagle_worker.py
View file @
6c903611
...
...
@@ -844,7 +844,7 @@ class EAGLEWorker(TpModelWorker):
)
batch
.
return_hidden_states
=
False
model_worker_batch
=
batch
.
get_model_worker_batch
()
model_worker_batch
.
spec_num_draft_tokens
=
self
.
speculative_num_
draft_tokens
model_worker_batch
.
spec_num_draft_tokens
=
self
.
speculative_num_
steps
+
1
assert
model_worker_batch
.
capture_hidden_mode
==
CaptureHiddenMode
.
LAST
forward_batch
=
ForwardBatch
.
init_new
(
model_worker_batch
,
self
.
draft_model_runner
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment