Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
change
sglang
Commits
7906d1d2
Unverified
Commit
7906d1d2
authored
Jan 18, 2025
by
Lianmin Zheng
Committed by
GitHub
Jan 18, 2025
Browse files
Remove the unused write_with_records (#2972)
parent
81d27c8e
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
1 addition
and
29 deletions
+1
-29
python/sglang/srt/managers/schedule_batch.py
python/sglang/srt/managers/schedule_batch.py
+0
-1
python/sglang/srt/mem_cache/memory_pool.py
python/sglang/srt/mem_cache/memory_pool.py
+1
-27
python/sglang/srt/model_executor/model_runner.py
python/sglang/srt/model_executor/model_runner.py
+0
-1
No files found.
python/sglang/srt/managers/schedule_batch.py
View file @
7906d1d2
...
@@ -158,7 +158,6 @@ class ImageInputs:
...
@@ -158,7 +158,6 @@ class ImageInputs:
im_end_id
:
Optional
[
torch
.
Tensor
]
=
None
im_end_id
:
Optional
[
torch
.
Tensor
]
=
None
slice_start_id
:
Optional
[
torch
.
Tensor
]
=
None
slice_start_id
:
Optional
[
torch
.
Tensor
]
=
None
slice_end_id
:
Optional
[
torch
.
Tensor
]
=
None
slice_end_id
:
Optional
[
torch
.
Tensor
]
=
None
tgt_sizes
:
Optional
[
list
]
=
None
tgt_sizes
:
Optional
[
list
]
=
None
@
staticmethod
@
staticmethod
...
...
python/sglang/srt/mem_cache/memory_pool.py
View file @
7906d1d2
...
@@ -49,7 +49,6 @@ class ReqToTokenPool:
...
@@ -49,7 +49,6 @@ class ReqToTokenPool:
size
:
int
,
size
:
int
,
max_context_len
:
int
,
max_context_len
:
int
,
device
:
str
,
device
:
str
,
use_records
:
bool
,
enable_memory_saver
:
bool
,
enable_memory_saver
:
bool
,
):
):
memory_saver_adapter
=
TorchMemorySaverAdapter
.
create
(
memory_saver_adapter
=
TorchMemorySaverAdapter
.
create
(
...
@@ -64,17 +63,9 @@ class ReqToTokenPool:
...
@@ -64,17 +63,9 @@ class ReqToTokenPool:
(
size
,
max_context_len
),
dtype
=
torch
.
int32
,
device
=
device
(
size
,
max_context_len
),
dtype
=
torch
.
int32
,
device
=
device
)
)
self
.
free_slots
=
list
(
range
(
size
))
self
.
free_slots
=
list
(
range
(
size
))
self
.
write_records
=
[]
self
.
use_records
=
use_records
if
self
.
use_records
:
self
.
write
=
self
.
write_with_records
else
:
self
.
write
=
self
.
write_without_records
def
write
(
self
,
indices
,
values
):
def
write
(
self
,
indices
,
values
):
# Keep the signature for type checking. It will be assigned during runtime.
self
.
req_to_token
[
indices
]
=
values
raise
NotImplementedError
()
def
available_size
(
self
):
def
available_size
(
self
):
return
len
(
self
.
free_slots
)
return
len
(
self
.
free_slots
)
...
@@ -96,23 +87,6 @@ class ReqToTokenPool:
...
@@ -96,23 +87,6 @@ class ReqToTokenPool:
def
clear
(
self
):
def
clear
(
self
):
self
.
free_slots
=
list
(
range
(
self
.
size
))
self
.
free_slots
=
list
(
range
(
self
.
size
))
self
.
write_records
=
[]
def
write_without_records
(
self
,
indices
,
values
):
self
.
req_to_token
[
indices
]
=
values
def
write_with_records
(
self
,
indices
,
values
):
self
.
req_to_token
[
indices
]
=
values
self
.
write_records
.
append
((
indices
,
values
))
def
get_write_records
(
self
):
ret
=
self
.
write_records
self
.
write_records
=
[]
return
ret
def
apply_write_records
(
self
,
write_records
:
List
[
Tuple
]):
for
indices
,
values
in
write_records
:
self
.
req_to_token
[
indices
]
=
values
class
BaseTokenToKVPool
:
class
BaseTokenToKVPool
:
...
...
python/sglang/srt/model_executor/model_runner.py
View file @
7906d1d2
...
@@ -617,7 +617,6 @@ class ModelRunner:
...
@@ -617,7 +617,6 @@ class ModelRunner:
size
=
max_num_reqs
+
1
,
size
=
max_num_reqs
+
1
,
max_context_len
=
self
.
model_config
.
context_len
+
4
,
max_context_len
=
self
.
model_config
.
context_len
+
4
,
device
=
self
.
device
,
device
=
self
.
device
,
use_records
=
False
,
enable_memory_saver
=
self
.
server_args
.
enable_memory_saver
,
enable_memory_saver
=
self
.
server_args
.
enable_memory_saver
,
)
)
if
(
if
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment