Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
b7a065ea
Unverified
Commit
b7a065ea
authored
Nov 19, 2024
by
Lianmin Zheng
Committed by
GitHub
Nov 19, 2024
Browse files
Use cuda event wait and synchronization instead of busy waiting (#2089)
parent
b1104538
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
28 additions
and
26 deletions
+28
-26
python/sglang/srt/managers/schedule_batch.py
python/sglang/srt/managers/schedule_batch.py
+1
-1
python/sglang/srt/managers/scheduler.py
python/sglang/srt/managers/scheduler.py
+0
-3
python/sglang/srt/managers/tp_worker.py
python/sglang/srt/managers/tp_worker.py
+3
-3
python/sglang/srt/managers/tp_worker_overlap_thread.py
python/sglang/srt/managers/tp_worker_overlap_thread.py
+17
-11
test/srt/test_large_max_new_tokens.py
test/srt/test_large_max_new_tokens.py
+1
-1
test/srt/test_srt_engine.py
test/srt/test_srt_engine.py
+6
-7
No files found.
python/sglang/srt/managers/schedule_batch.py
View file @
b7a065ea
...
...
@@ -1063,7 +1063,7 @@ class ScheduleBatch:
out_cache_loc
=
self
.
out_cache_loc
,
return_logprob
=
self
.
return_logprob
,
decoding_reqs
=
self
.
decoding_reqs
,
sampling_info
=
dataclasses
.
replace
(
self
.
sampling_info
)
,
sampling_info
=
self
.
sampling_info
,
)
def
__str__
(
self
):
...
...
python/sglang/srt/managers/scheduler.py
View file @
b7a065ea
...
...
@@ -387,9 +387,6 @@ class Scheduler:
batch
=
self
.
get_next_batch_to_run
()
self
.
cur_batch
=
batch
if
batch
:
# We need a stream synchronization here. Otherwise, there will be cuda illegal memory access errors.
_
=
batch
.
seq_lens
[
0
].
item
()
result
=
self
.
run_batch
(
batch
)
result_queue
.
append
((
batch
.
copy
(),
result
))
...
...
python/sglang/srt/managers/tp_worker.py
View file @
b7a065ea
...
...
@@ -142,12 +142,12 @@ class TpModelWorker:
def
forward_batch_generation
(
self
,
model_worker_batch
:
ModelWorkerBatch
,
launch_
event
:
Optional
[
threading
.
Event
]
=
None
,
launch_
done
:
Optional
[
threading
.
Event
]
=
None
,
):
forward_batch
=
ForwardBatch
.
init_new
(
model_worker_batch
,
self
.
model_runner
)
logits_output
=
self
.
model_runner
.
forward
(
forward_batch
)
if
launch_
event
:
launch_
event
.
set
()
if
launch_
done
:
launch_
done
.
set
()
next_token_ids
=
self
.
model_runner
.
sample
(
logits_output
,
model_worker_batch
)
return
logits_output
,
next_token_ids
...
...
python/sglang/srt/managers/tp_worker_overlap_thread.py
View file @
b7a065ea
...
...
@@ -96,19 +96,22 @@ class TpModelWorkerClient:
@
torch
.
no_grad
()
def
forward_thread_func_
(
self
):
while
True
:
model_worker_batch
,
future_token_ids_ct
=
self
.
input_queue
.
get
()
model_worker_batch
,
future_token_ids_ct
,
compute_info_done
=
(
self
.
input_queue
.
get
()
)
if
not
model_worker_batch
:
break
self
.
launch_
event
=
threading
.
Event
()
copy_
event
=
torch
.
cuda
.
Event
()
self
.
launch_
done
=
threading
.
Event
()
copy_
done
=
torch
.
cuda
.
Event
()
# Resolve future tokens in the input
input_ids
=
model_worker_batch
.
input_ids
resolve_future_token_ids
(
input_ids
,
self
.
future_token_ids_map
)
# Run forward
compute_info_done
.
wait
()
logits_output
,
next_token_ids
=
self
.
worker
.
forward_batch_generation
(
model_worker_batch
,
self
.
launch_
event
model_worker_batch
,
self
.
launch_
done
)
# Update the future token ids map
...
...
@@ -133,15 +136,14 @@ class TpModelWorkerClient:
)
)
next_token_ids
=
next_token_ids
.
to
(
"cpu"
,
non_blocking
=
True
)
copy_
event
.
record
()
copy_
done
.
record
()
self
.
output_queue
.
put
((
copy_
event
,
logits_output
,
next_token_ids
))
self
.
output_queue
.
put
((
copy_
done
,
logits_output
,
next_token_ids
))
def
resolve_batch_result
(
self
,
bid
:
int
):
copy_event
,
logits_output
,
next_token_ids
=
self
.
output_queue
.
get
()
while
not
copy_event
.
query
():
time
.
sleep
(
1e-5
)
self
.
launch_event
.
wait
()
copy_done
,
logits_output
,
next_token_ids
=
self
.
output_queue
.
get
()
copy_done
.
synchronize
()
self
.
launch_done
.
wait
()
if
logits_output
.
next_token_logprobs
is
not
None
:
logits_output
.
next_token_logprobs
=
(
...
...
@@ -162,7 +164,11 @@ class TpModelWorkerClient:
model_worker_batch
.
sampling_info
=
dataclasses
.
replace
(
model_worker_batch
.
sampling_info
)
self
.
input_queue
.
put
((
model_worker_batch
,
self
.
future_token_ids_ct
))
compute_info_done
=
torch
.
cuda
.
Event
()
compute_info_done
.
record
()
self
.
input_queue
.
put
(
(
model_worker_batch
,
self
.
future_token_ids_ct
,
compute_info_done
)
)
# Allocate output future objects
bs
=
len
(
model_worker_batch
.
seq_lens
)
...
...
test/srt/test_large_max_new_tokens.py
View file @
b7a065ea
...
...
@@ -38,7 +38,7 @@ class TestLargeMaxNewTokens(unittest.TestCase):
api_key
=
cls
.
api_key
,
other_args
=
(
"--max-total-token"
,
"1
024
"
,
"1
536
"
,
"--context-len"
,
"8192"
,
"--decode-log-interval"
,
...
...
test/srt/test_srt_engine.py
View file @
b7a065ea
...
...
@@ -29,7 +29,7 @@ class TestSRTEngine(unittest.TestCase):
sampling_params
=
{
"temperature"
:
0
,
"max_new_tokens"
:
8
}
engine
=
sgl
.
Engine
(
model_path
=
model_path
,
random_seed
=
42
,
log_level
=
"error"
)
engine
=
sgl
.
Engine
(
model_path
=
model_path
,
random_seed
=
42
)
out1
=
engine
.
generate
(
prompt
,
sampling_params
)[
"text"
]
engine
.
shutdown
()
...
...
@@ -51,7 +51,7 @@ class TestSRTEngine(unittest.TestCase):
sampling_params
=
{
"temperature"
:
0
,
"max_new_tokens"
:
8
}
engine
=
sgl
.
Engine
(
model_path
=
model_path
,
random_seed
=
42
,
log_level
=
"error"
)
engine
=
sgl
.
Engine
(
model_path
=
model_path
,
random_seed
=
42
)
engine
.
generate
(
prompt
,
sampling_params
)
engine
.
generate
(
prompt
,
sampling_params
)
engine
.
shutdown
()
...
...
@@ -74,7 +74,6 @@ class TestSRTEngine(unittest.TestCase):
# Create an LLM.
llm
=
sgl
.
Engine
(
model_path
=
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
,
log_level
=
"error"
,
)
# 1. sync + non streaming
...
...
@@ -118,7 +117,9 @@ class TestSRTEngine(unittest.TestCase):
prompt
=
"The capital of UK is"
model_path
=
DEFAULT_SMALL_MODEL_NAME_FOR_TEST
engine
=
sgl
.
Engine
(
model_path
=
model_path
,
random_seed
=
42
,
log_level
=
"error"
)
engine
=
sgl
.
Engine
(
model_path
=
model_path
,
random_seed
=
42
,
disable_radix_cache
=
True
)
sampling_params
=
{
"temperature"
:
0
,
"max_new_tokens"
:
8
}
out1
=
engine
.
generate
(
prompt
,
sampling_params
)[
"text"
]
...
...
@@ -141,9 +142,7 @@ class TestSRTEngine(unittest.TestCase):
prompt
=
"Today is a sunny day and I like"
model_path
=
DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST
engine
=
sgl
.
Engine
(
model_path
=
model_path
,
is_embedding
=
True
,
random_seed
=
42
,
log_level
=
"error"
)
engine
=
sgl
.
Engine
(
model_path
=
model_path
,
is_embedding
=
True
,
random_seed
=
42
)
out1
=
torch
.
tensor
(
engine
.
encode
(
prompt
)[
"embedding"
])
engine
.
shutdown
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment