Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
norm
vllm
Commits
eb52db1b
Commit
eb52db1b
authored
Feb 13, 2023
by
Woosuk Kwon
Browse files
Fix can_swap_in
parent
a2a9869c
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
10 additions
and
4 deletions
+10
-4
cacheflow/master/block_manager.py
cacheflow/master/block_manager.py
+10
-4
No files found.
cacheflow/master/block_manager.py
View file @
eb52db1b
...
@@ -70,13 +70,14 @@ class BlockSpaceManager:
...
@@ -70,13 +70,14 @@ class BlockSpaceManager:
self
.
block_tables
:
Dict
[
int
,
BlockTable
]
=
{}
self
.
block_tables
:
Dict
[
int
,
BlockTable
]
=
{}
def
can_allocate
(
self
,
seq_group
:
SequenceGroup
)
->
bool
:
def
can_allocate
(
self
,
seq_group
:
SequenceGroup
)
->
bool
:
# NOTE: Here we assume that all sequences in the group have the same prompt.
seq
=
seq_group
.
seqs
[
0
]
seq
=
seq_group
.
seqs
[
0
]
num_required_blocks
=
len
(
seq
.
logical_token_blocks
)
num_required_blocks
=
len
(
seq
.
logical_token_blocks
)
num_free_gpu_blocks
=
self
.
gpu_allocator
.
get_num_free_blocks
()
num_free_gpu_blocks
=
self
.
gpu_allocator
.
get_num_free_blocks
()
return
num_required_blocks
<=
num_free_gpu_blocks
return
num_required_blocks
<=
num_free_gpu_blocks
def
allocate
(
self
,
seq_group
:
SequenceGroup
)
->
None
:
def
allocate
(
self
,
seq_group
:
SequenceGroup
)
->
None
:
# Here
,
we assume that all sequences in the group have the same prompt.
#
NOTE:
Here we assume that all sequences in the group have the same prompt.
seq
=
seq_group
.
seqs
[
0
]
seq
=
seq_group
.
seqs
[
0
]
# Allocate new physical token blocks that will store the prompt tokens.
# Allocate new physical token blocks that will store the prompt tokens.
...
@@ -124,10 +125,10 @@ class BlockSpaceManager:
...
@@ -124,10 +125,10 @@ class BlockSpaceManager:
self
.
gpu_allocator
.
free
(
last_block
)
self
.
gpu_allocator
.
free
(
last_block
)
return
last_block
.
block_number
,
new_block
.
block_number
return
last_block
.
block_number
,
new_block
.
block_number
def
fork
(
self
,
src
_seq
:
Sequence
,
child_seq
:
Sequence
)
->
None
:
def
fork
(
self
,
parent
_seq
:
Sequence
,
child_seq
:
Sequence
)
->
None
:
# NOTE: fork does not allocate a new physical block.
# NOTE: fork does not allocate a new physical block.
# Thus, it is always safe from OOM.
# Thus, it is always safe from OOM.
src_block_table
=
self
.
block_tables
[
src
_seq
.
seq_id
]
src_block_table
=
self
.
block_tables
[
parent
_seq
.
seq_id
]
self
.
block_tables
[
child_seq
.
seq_id
]
=
src_block_table
.
copy
()
self
.
block_tables
[
child_seq
.
seq_id
]
=
src_block_table
.
copy
()
for
block
in
src_block_table
:
for
block
in
src_block_table
:
block
.
ref_count
+=
1
block
.
ref_count
+=
1
...
@@ -146,7 +147,12 @@ class BlockSpaceManager:
...
@@ -146,7 +147,12 @@ class BlockSpaceManager:
def
can_swap_in
(
self
,
seq_group
:
SequenceGroup
)
->
bool
:
def
can_swap_in
(
self
,
seq_group
:
SequenceGroup
)
->
bool
:
blocks
=
self
.
_get_physical_blocks
(
seq_group
)
blocks
=
self
.
_get_physical_blocks
(
seq_group
)
return
len
(
blocks
)
<=
self
.
gpu_allocator
.
get_num_free_blocks
()
num_running_seqs
=
seq_group
.
num_seqs
(
status
=
SequenceStatus
.
RUNNING
)
num_free_blocks
=
self
.
gpu_allocator
.
get_num_free_blocks
()
# NOTE: Conservatively, we assume that every sequence will allocate
# at least one free block right after the swap-in.
# NOTE: This should match the logic in can_append().
return
len
(
blocks
)
+
num_running_seqs
<=
num_free_blocks
def
swap_in
(
self
,
seq_group
:
SequenceGroup
)
->
Dict
[
int
,
int
]:
def
swap_in
(
self
,
seq_group
:
SequenceGroup
)
->
Dict
[
int
,
int
]:
# src_block_number -> dst_block_number
# src_block_number -> dst_block_number
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment