Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
775d5d85
"comfy/ldm/modules/vscode:/vscode.git/clone" did not exist on "aae9fe0cf9fe3e430bfdac72acab1a5e092ff229"
Commit
775d5d85
authored
Jul 03, 2024
by
Nathan Habib
Browse files
add doc
parent
6a6068f8
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
6 additions
and
2 deletions
+6
-2
lm_eval/models/huggingface.py
lm_eval/models/huggingface.py
+6
-2
No files found.
lm_eval/models/huggingface.py
View file @
775d5d85
...
@@ -683,6 +683,8 @@ class HFLM(TemplateLM):
...
@@ -683,6 +683,8 @@ class HFLM(TemplateLM):
SECURITY_MARGIN_FACTOR_GENERATE_UNTIL
=
1
SECURITY_MARGIN_FACTOR_GENERATE_UNTIL
=
1
if
len
(
requests
[
0
])
==
3
:
# logprob evals
if
len
(
requests
[
0
])
==
3
:
# logprob evals
# for logprob evals, we use the maximum context length + continuation length
# as the default for computing batch size
_
,
context_enc
,
continuation_enc
=
requests
[
pos
]
_
,
context_enc
,
continuation_enc
=
requests
[
pos
]
max_length
=
len
(
max_length
=
len
(
(
context_enc
+
continuation_enc
)[
-
(
self
.
max_length
+
1
)
:][:
-
1
]
(
context_enc
+
continuation_enc
)[
-
(
self
.
max_length
+
1
)
:][:
-
1
]
...
@@ -691,7 +693,9 @@ class HFLM(TemplateLM):
...
@@ -691,7 +693,9 @@ class HFLM(TemplateLM):
max_cont_enc
=
len
(
continuation_enc
[
-
(
self
.
max_length
+
1
)
:])
max_cont_enc
=
len
(
continuation_enc
[
-
(
self
.
max_length
+
1
)
:])
security_margin_factor
=
SECURITY_MARGIN_FACTOR_LOG_PROBS
security_margin_factor
=
SECURITY_MARGIN_FACTOR_LOG_PROBS
elif
len
(
requests
[
0
])
==
2
:
# generative evals
elif
len
(
requests
[
0
])
==
2
:
# generative evals
# using rolling window with maximum context
# for generative evals, we use either the maximum context length of the model
# or the longest context of the requests, whichever is shorter as it will be truncated
# + the allowed maximum generation length
longest_context
=
max
(
longest_context
=
max
(
[
[
len
(
self
.
tok_encode
(
request
[
0
]))
len
(
self
.
tok_encode
(
request
[
0
]))
...
@@ -731,7 +735,7 @@ class HFLM(TemplateLM):
...
@@ -731,7 +735,7 @@ class HFLM(TemplateLM):
(
batch_size
+
security_margin
,
max_length
),
device
=
self
.
device
(
batch_size
+
security_margin
,
max_length
),
device
=
self
.
device
).
long
()
).
long
()
for
_
in
range
(
5
*
security_margin_factor
):
for
_
in
range
(
5
):
logits
=
self
.
_model_call
(
inps
=
test_batch
,
**
call_kwargs
).
float
()
logits
=
self
.
_model_call
(
inps
=
test_batch
,
**
call_kwargs
).
float
()
scores
=
F
.
log_softmax
(
logits
,
dim
=-
1
)
# noqa: F841
scores
=
F
.
log_softmax
(
logits
,
dim
=-
1
)
# noqa: F841
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment