Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
e37698df
Commit
e37698df
authored
Aug 01, 2023
by
lintangsutawika
Browse files
update on metrics and delet files
parent
1bc408ff
Changes
6
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
33 additions
and
197916 deletions
+33
-197916
lm_eval/api/task.py
lm_eval/api/task.py
+10
-4
lm_eval/evaluator.py
lm_eval/evaluator.py
+8
-4
lm_eval/tasks/benchmarks/t0_eval.yaml
lm_eval/tasks/benchmarks/t0_eval.yaml
+12
-11
lm_eval/tasks/super_glue/cb/t5-prompt.yaml
lm_eval/tasks/super_glue/cb/t5-prompt.yaml
+2
-2
lm_eval/tasks/super_glue/rte/default.yaml
lm_eval/tasks/super_glue/rte/default.yaml
+1
-1
trivia_qaterm_frequency-pythia-v1.1-2.8b-deduped-143000-04shot.json
...erm_frequency-pythia-v1.1-2.8b-deduped-143000-04shot.json
+0
-197894
No files found.
lm_eval/api/task.py
View file @
e37698df
...
@@ -555,12 +555,17 @@ class ConfigurableTask(Task):
...
@@ -555,12 +555,17 @@ class ConfigurableTask(Task):
if
key
not
in
[
"metric"
,
"aggregation"
,
"higher_is_better"
]
if
key
not
in
[
"metric"
,
"aggregation"
,
"higher_is_better"
]
}
}
if
self
.
_config
.
process_results
is
None
:
if
self
.
_config
.
process_results
is
not
None
:
self
.
_metric_fn_list
[
metric_name
]
=
get_metric
(
metric_name
)
self
.
_metric_fn_kwargs
[
metric_name
]
=
kwargs
else
:
self
.
_metric_fn_list
[
metric_name
]
=
None
self
.
_metric_fn_list
[
metric_name
]
=
None
self
.
_metric_fn_kwargs
[
metric_name
]
=
{}
self
.
_metric_fn_kwargs
[
metric_name
]
=
{}
elif
callable
(
metric_name
):
metric_fn
=
metric_name
.
__call__
metric_name
=
metric_name
.
__name__
self
.
_metric_fn_list
[
metric_name
]
=
metric_fn
self
.
_metric_fn_kwargs
[
metric_name
]
=
kwargs
else
:
self
.
_metric_fn_list
[
metric_name
]
=
get_metric
(
metric_name
)
self
.
_metric_fn_kwargs
[
metric_name
]
=
kwargs
if
"aggregation"
in
metric_config
:
if
"aggregation"
in
metric_config
:
agg_name
=
metric_config
[
"aggregation"
]
agg_name
=
metric_config
[
"aggregation"
]
...
@@ -987,6 +992,7 @@ class ConfigurableTask(Task):
...
@@ -987,6 +992,7 @@ class ConfigurableTask(Task):
choices
=
self
.
doc_to_choice
(
doc
)
choices
=
self
.
doc_to_choice
(
doc
)
gold
=
choices
[
gold
]
gold
=
choices
[
gold
]
print
(
self
.
_metric_fn_list
)
for
key
,
result
in
zip
(
self
.
_metric_fn_list
.
keys
(),
results
):
for
key
,
result
in
zip
(
self
.
_metric_fn_list
.
keys
(),
results
):
if
self
.
multiple_target
:
if
self
.
multiple_target
:
# in the case where we have multiple targets,
# in the case where we have multiple targets,
...
...
lm_eval/evaluator.py
View file @
e37698df
...
@@ -419,10 +419,14 @@ def evaluate(
...
@@ -419,10 +419,14 @@ def evaluate(
versions
[
group
]
=
"N/A"
versions
[
group
]
=
"N/A"
results_dict
=
{
results_dict
=
{
"results"
:
dict
(
results
),
"results"
:
dict
(
sorted
(
results
.
items
())),
**
({
"aggregate"
:
dict
(
aggregate
)}
if
bool
(
aggregate
)
else
{}),
**
(
"configs"
:
dict
(
configs
),
{
"aggregate"
:
dict
(
sorted
(
aggregate
.
items
()))}
"versions"
:
dict
(
versions
),
if
bool
(
aggregate
)
else
{}
),
"configs"
:
dict
(
sorted
(
configs
.
items
())),
"versions"
:
dict
(
sorted
(
versions
.
items
())),
}
}
if
log_samples
:
if
log_samples
:
results_dict
[
"samples"
]
=
dict
(
samples
)
results_dict
[
"samples"
]
=
dict
(
samples
)
...
...
lm_eval/tasks/benchmarks/t0_eval.yaml
View file @
e37698df
...
@@ -30,6 +30,7 @@ task:
...
@@ -30,6 +30,7 @@ task:
use_prompt
:
promptsource:*
use_prompt
:
promptsource:*
training_split
:
train
training_split
:
train
validation_split
:
validation
validation_split
:
validation
output_type
:
greedy_until
metric_list
:
metric_list
:
-
metric
:
exact_match
-
metric
:
exact_match
aggregation
:
mean
aggregation
:
mean
...
@@ -37,17 +38,17 @@ task:
...
@@ -37,17 +38,17 @@ task:
ignore_case
:
true
ignore_case
:
true
ignore_punctuation
:
true
ignore_punctuation
:
true
# Natural Language Inference
# Natural Language Inference
-
dataset_path
:
super_glue
#
- dataset_path: super_glue
dataset_name
:
rte
#
dataset_name: rte
use_prompt
:
promptsource:*
#
use_prompt: promptsource:*
training_split
:
train
#
training_split: train
validation_split
:
validation
#
validation_split: validation
metric_list
:
#
metric_list:
-
metric
:
exact_match
#
- metric: exact_match
aggregation
:
mean
#
aggregation: mean
higher_is_better
:
true
#
higher_is_better: true
ignore_case
:
true
#
ignore_case: true
ignore_punctuation
:
true
#
ignore_punctuation: true
# # Natural Language Inference
# # Natural Language Inference
# # - dataset_path: anli
# # - dataset_path: anli
# # use_prompt: promptsource:*
# # use_prompt: promptsource:*
...
...
lm_eval/tasks/super_glue/cb/t5-prompt.yaml
View file @
e37698df
...
@@ -15,5 +15,5 @@ metric_list:
...
@@ -15,5 +15,5 @@ metric_list:
higher_is_better
:
true
higher_is_better
:
true
ignore_case
:
true
ignore_case
:
true
ignore_punctuation
:
true
ignore_punctuation
:
true
#
- metric: f1
-
metric
:
f1
#
aggregation: !function "aggregate.cb_multi_fi"
aggregation
:
!function
"
aggregate.cb_multi_fi"
lm_eval/tasks/super_glue/rte/default.yaml
View file @
e37698df
...
@@ -6,7 +6,7 @@ dataset_name: rte
...
@@ -6,7 +6,7 @@ dataset_name: rte
output_type
:
multiple_choice
output_type
:
multiple_choice
training_split
:
train
training_split
:
train
validation_split
:
validation
validation_split
:
validation
doc_to_text
:
"
{{
sentence1
}}
\n
Question:
{{
sentence2
}}
True
or
False?
\n
Answer:"
doc_to_text
:
"
{{
premise
}}
\n
Question:
{{
hypothesis
}}
True
or
False?
\n
Answer:"
doc_to_target
:
label
doc_to_target
:
label
doc_to_choice
:
[
'
True'
,
'
False'
]
doc_to_choice
:
[
'
True'
,
'
False'
]
metric_list
:
metric_list
:
...
...
trivia_qaterm_frequency-pythia-v1.1-2.8b-deduped-143000-04shot.json
deleted
100644 → 0
View file @
1bc408ff
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment