Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
42401fa2
"vscode:/vscode.git/clone" did not exist on "be36de16903333e994211733d8b50c8d320048c7"
Commit
42401fa2
authored
Jul 08, 2024
by
lintangsutawika
Browse files
revert parts back to main
parent
eb9f6788
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
11 additions
and
7 deletions
+11
-7
lm_eval/api/metrics.py
lm_eval/api/metrics.py
+1
-1
lm_eval/api/task.py
lm_eval/api/task.py
+9
-5
lm_eval/filters/extraction.py
lm_eval/filters/extraction.py
+1
-1
No files found.
lm_eval/api/metrics.py
View file @
42401fa2
...
@@ -256,7 +256,7 @@ def mcc_fn(items): # This is a passthrough function
...
@@ -256,7 +256,7 @@ def mcc_fn(items): # This is a passthrough function
@
register_metric
(
@
register_metric
(
metric
=
"f1"
,
metric
=
"f1"
,
higher_is_better
=
True
,
higher_is_better
=
True
,
output_type
=
[
"multiple_choice"
]
,
output_type
=
"multiple_choice"
,
aggregation
=
"f1"
,
aggregation
=
"f1"
,
)
)
def
f1_fn
(
items
):
# This is a passthrough function
def
f1_fn
(
items
):
# This is a passthrough function
...
...
lm_eval/api/task.py
View file @
42401fa2
...
@@ -1190,6 +1190,7 @@ class ConfigurableTask(Task):
...
@@ -1190,6 +1190,7 @@ class ConfigurableTask(Task):
eval_logger
.
warning
(
"Applied prompt returns empty string"
)
eval_logger
.
warning
(
"Applied prompt returns empty string"
)
return
self
.
config
.
fewshot_delimiter
return
self
.
config
.
fewshot_delimiter
else
:
else
:
print
(
type
(
doc_to_text
))
raise
TypeError
raise
TypeError
def
doc_to_target
(
self
,
doc
:
Mapping
)
->
Union
[
int
,
str
,
list
]:
def
doc_to_target
(
self
,
doc
:
Mapping
)
->
Union
[
int
,
str
,
list
]:
...
@@ -1279,6 +1280,7 @@ class ConfigurableTask(Task):
...
@@ -1279,6 +1280,7 @@ class ConfigurableTask(Task):
else
:
else
:
# Otherwise they are placed in the continuation
# Otherwise they are placed in the continuation
arguments
=
[(
ctx
,
f
"
{
target_delimiter
}{
cont
}
"
)
for
cont
in
choices
]
arguments
=
[(
ctx
,
f
"
{
target_delimiter
}{
cont
}
"
)
for
cont
in
choices
]
request_list
=
[
request_list
=
[
Instance
(
Instance
(
request_type
=
"loglikelihood"
,
request_type
=
"loglikelihood"
,
...
@@ -1432,6 +1434,7 @@ class ConfigurableTask(Task):
...
@@ -1432,6 +1434,7 @@ class ConfigurableTask(Task):
]
]
acc_mutual_info
=
1.0
if
np
.
argmax
(
lls_mutual_info
)
==
gold
else
0.0
acc_mutual_info
=
1.0
if
np
.
argmax
(
lls_mutual_info
)
==
gold
else
0.0
result_dict
[
"acc_mutual_info"
]
=
acc_mutual_info
result_dict
[
"acc_mutual_info"
]
=
acc_mutual_info
elif
self
.
OUTPUT_TYPE
==
"generate_until"
:
elif
self
.
OUTPUT_TYPE
==
"generate_until"
:
gold
=
self
.
doc_to_target
(
doc
)
gold
=
self
.
doc_to_target
(
doc
)
result
=
results
[
0
]
result
=
results
[
0
]
...
@@ -1455,6 +1458,7 @@ class ConfigurableTask(Task):
...
@@ -1455,6 +1458,7 @@ class ConfigurableTask(Task):
scores
=
[]
scores
=
[]
if
not
isinstance
(
gold
,
list
):
if
not
isinstance
(
gold
,
list
):
# sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
# sometimes, a multiple_target dataset has exceptions where one doc has only one string answer
# print(gold)
gold
=
[
gold
]
gold
=
[
gold
]
if
metric
==
"exact_match"
:
if
metric
==
"exact_match"
:
result
=
[
result
for
_
in
range
(
len
(
gold
))]
result
=
[
result
for
_
in
range
(
len
(
gold
))]
...
@@ -1489,10 +1493,10 @@ class ConfigurableTask(Task):
...
@@ -1489,10 +1493,10 @@ class ConfigurableTask(Task):
else
:
else
:
try
:
try
:
result_score
=
self
.
_metric_fn_list
[
metric
](
result_score
=
self
.
_metric_fn_list
[
metric
](
references
=
[
gold
],
references
=
[
gold
],
predictions
=
[
result
],
predictions
=
[
result
],
**
self
.
_metric_fn_kwargs
[
metric
],
**
self
.
_metric_fn_kwargs
[
metric
],
)
)
except
TypeError
:
# needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
except
TypeError
:
# needed for now in order to use a different interface between our own metrics and HF Evaluate metrics
result_score
=
self
.
_metric_fn_list
[
metric
]([
gold
,
result
])
result_score
=
self
.
_metric_fn_list
[
metric
]([
gold
,
result
])
if
isinstance
(
result_score
,
dict
):
if
isinstance
(
result_score
,
dict
):
...
@@ -1646,4 +1650,4 @@ class PerplexityTask(Task):
...
@@ -1646,4 +1650,4 @@ class PerplexityTask(Task):
@
classmethod
@
classmethod
def
count_words
(
cls
,
doc
)
->
int
:
def
count_words
(
cls
,
doc
)
->
int
:
"""Downstream tasks with custom word boundaries should override this!"""
"""Downstream tasks with custom word boundaries should override this!"""
return
len
(
re
.
split
(
r
"\s+"
,
doc
))
return
len
(
re
.
split
(
r
"\s+"
,
doc
))
\ No newline at end of file
lm_eval/filters/extraction.py
View file @
42401fa2
...
@@ -182,4 +182,4 @@ class MultiChoiceRegexFilter(RegexFilter):
...
@@ -182,4 +182,4 @@ class MultiChoiceRegexFilter(RegexFilter):
filtered
.
append
(
match
)
filtered
.
append
(
match
)
filtered_resps
.
append
(
filtered
)
filtered_resps
.
append
(
filtered
)
return
filtered_resps
return
filtered_resps
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment