Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
bf2abb41
"vscode:/vscode.git/clone" did not exist on "79961afa8281f98f380d11db45c8d4b6e66a574f"
Unverified
Commit
bf2abb41
authored
Nov 06, 2024
by
Rob Geada
Committed by
GitHub
Nov 06, 2024
Browse files
Fix 'loglikelihood' typos in the api models file (#2459)
parent
26f607f5
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
5 additions
and
5 deletions
+5
-5
lm_eval/models/api_models.py
lm_eval/models/api_models.py
+5
-5
No files found.
lm_eval/models/api_models.py
View file @
bf2abb41
...
@@ -58,7 +58,7 @@ class TemplateAPI(TemplateLM):
...
@@ -58,7 +58,7 @@ class TemplateAPI(TemplateLM):
pretrained
:
str
=
None
,
# `model` takes precedence over `pretrained` when passed.
pretrained
:
str
=
None
,
# `model` takes precedence over `pretrained` when passed.
base_url
:
str
=
None
,
base_url
:
str
=
None
,
tokenizer
:
Optional
[
str
]
=
None
,
tokenizer
:
Optional
[
str
]
=
None
,
# Loglik
l
ehood tasks require a tokenizer to calculate context lengths,
# Loglike
li
hood tasks require a tokenizer to calculate context lengths,
# however the requests can be sent as a string if the API doesn't support token inputs.
# however the requests can be sent as a string if the API doesn't support token inputs.
# use tokenized_requests=False
# use tokenized_requests=False
tokenizer_backend
:
Optional
[
tokenizer_backend
:
Optional
[
...
@@ -196,7 +196,7 @@ class TemplateAPI(TemplateLM):
...
@@ -196,7 +196,7 @@ class TemplateAPI(TemplateLM):
if
not
self
.
tokenized_requests
:
if
not
self
.
tokenized_requests
:
# if messages are tokenized:
# if messages are tokenized:
if
isinstance
(
messages
[
0
][
0
],
int
):
if
isinstance
(
messages
[
0
][
0
],
int
):
# assuming decoding is lossless. However, this is only for loglik
l
ehood requests
# assuming decoding is lossless. However, this is only for loglike
li
hood requests
# as we need to compute the context length. For generations, we don't need to tokenize.
# as we need to compute the context length. For generations, we don't need to tokenize.
messages
=
self
.
decode_batch
(
messages
)
messages
=
self
.
decode_batch
(
messages
)
if
self
.
_batch_size
<=
1
:
if
self
.
_batch_size
<=
1
:
...
@@ -415,7 +415,7 @@ class TemplateAPI(TemplateLM):
...
@@ -415,7 +415,7 @@ class TemplateAPI(TemplateLM):
)
)
return
None
return
None
def
batch_loglik
l
ehood_requests
(
def
batch_loglike
li
hood_requests
(
self
,
chunks
:
Iterable
[
List
[
LogLikelihoodInputs
]]
self
,
chunks
:
Iterable
[
List
[
LogLikelihoodInputs
]]
)
->
Tuple
[
List
[
List
[
int
]],
List
[
int
],
List
[
Tuple
[
str
,
str
]]]:
)
->
Tuple
[
List
[
List
[
int
]],
List
[
int
],
List
[
Tuple
[
str
,
str
]]]:
inputs
=
[]
inputs
=
[]
...
@@ -500,7 +500,7 @@ class TemplateAPI(TemplateLM):
...
@@ -500,7 +500,7 @@ class TemplateAPI(TemplateLM):
if
self
.
_concurrent
<=
1
:
if
self
.
_concurrent
<=
1
:
pbar
=
tqdm
(
desc
=
"Requesting API"
,
total
=
len
(
requests
))
pbar
=
tqdm
(
desc
=
"Requesting API"
,
total
=
len
(
requests
))
for
chunk
in
chunked
:
for
chunk
in
chunked
:
inputs
,
ctxlens
,
cache_keys
=
self
.
batch_loglik
l
ehood_requests
([
chunk
])
inputs
,
ctxlens
,
cache_keys
=
self
.
batch_loglike
li
hood_requests
([
chunk
])
outputs
=
retry
(
outputs
=
retry
(
stop
=
stop_after_attempt
(
self
.
max_retries
),
stop
=
stop_after_attempt
(
self
.
max_retries
),
...
@@ -524,7 +524,7 @@ class TemplateAPI(TemplateLM):
...
@@ -524,7 +524,7 @@ class TemplateAPI(TemplateLM):
)
)
pbar
.
update
(
1
)
pbar
.
update
(
1
)
else
:
else
:
inputs
,
ctxlens
,
cache_keys
=
self
.
batch_loglik
l
ehood_requests
(
chunked
)
inputs
,
ctxlens
,
cache_keys
=
self
.
batch_loglike
li
hood_requests
(
chunked
)
res
=
itertools
.
chain
.
from_iterable
(
res
=
itertools
.
chain
.
from_iterable
(
asyncio
.
run
(
asyncio
.
run
(
self
.
get_batched_requests
(
self
.
get_batched_requests
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment