Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
e5491709
Commit
e5491709
authored
Nov 22, 2023
by
baberabb
Browse files
fix greedy_until
parent
24f4e8d7
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
4 additions
and
2 deletions
+4
-2
lm_eval/models/vllm_causallms.py
lm_eval/models/vllm_causallms.py
+4
-2
No files found.
lm_eval/models/vllm_causallms.py
View file @
e5491709
...
...
@@ -161,7 +161,9 @@ class VLLM(LM):
# batch tokenize contexts
context
,
all_gen_kwargs
=
zip
(
*
(
req
.
args
for
req
in
requests
))
context_encoding
=
self
.
tokenizer
(
context
)
requests
=
list
(
zip
((
context
,
context_encoding
.
input_ids
),
all_gen_kwargs
))
requests
=
[
((
a
,
b
),
c
)
for
a
,
b
,
c
in
zip
(
context
,
context_encoding
,
all_gen_kwargs
)
]
def
_collate_gen
(
_requests
):
# the negative sign on len(toks) sorts descending - this has a few advantages:
...
...
@@ -190,7 +192,7 @@ class VLLM(LM):
)
for
chunk
in
chunks
:
context_and_encoding
,
all_gen_kwargs
=
zip
(
*
chunk
)
context
,
context_encoding
=
context_and_encoding
context
,
context_encoding
=
zip
(
*
context_and_encoding
)
# we assume all gen kwargs in the batch are the same
# this is safe to assume because the `grouper` object ensures it.
gen_kwargs
=
all_gen_kwargs
[
0
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment