Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
99ce4eff
Commit
99ce4eff
authored
Dec 28, 2023
by
lintangsutawika
Browse files
process hf evaluate metrics
parent
150f11f6
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
19 additions
and
18 deletions
+19
-18
lm_eval/api/metrics.py
lm_eval/api/metrics.py
+0
-15
lm_eval/api/registry.py
lm_eval/api/registry.py
+19
-3
No files found.
lm_eval/api/metrics.py
View file @
99ce4eff
...
@@ -159,21 +159,6 @@ def acc_mutual_info_fn(items):
...
@@ -159,21 +159,6 @@ def acc_mutual_info_fn(items):
return
mean
(
items
)
return
mean
(
items
)
class
HFEvaluateAdaptor
:
def
__init__
(
self
,
*
metric_args
,
**
kwargs
):
metric_object
=
evaluate
.
load
(
*
metric_args
)
self
.
hf_evaluate_fn
=
partial
(
metric_object
,
**
kwargs
)
def
__call__
(
self
,
items
):
refs
=
list
(
zip
(
*
items
))[
0
]
preds
=
list
(
zip
(
*
items
))[
1
]
return
self
.
hf_evaluate_fn
(
references
=
refs
,
predictions
=
preds
)
exact_match
=
evaluate
.
load
(
"exact_match"
)
exact_match
=
evaluate
.
load
(
"exact_match"
)
@
register_metric
(
@
register_metric
(
...
...
lm_eval/api/registry.py
View file @
99ce4eff
import
os
import
os
import
logging
import
evaluate
import
evaluate
from
functools
import
partial
from
lm_eval.api.model
import
LM
from
lm_eval.api.model
import
LM
from
lm_eval.api.metrics
import
HFEvaluateAdaptor
import
logging
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
MODEL_REGISTRY
=
{}
MODEL_REGISTRY
=
{}
class
HFEvaluateAdaptor
:
def
__init__
(
self
,
name
,
**
kwargs
):
self
.
name
=
name
metric_object
=
evaluate
.
load
(
name
)
self
.
hf_evaluate_fn
=
partial
(
metric_object
.
compute
,
**
kwargs
)
def
__call__
(
self
,
items
):
refs
=
list
(
zip
(
*
items
))[
0
]
preds
=
list
(
zip
(
*
items
))[
1
]
return
self
.
hf_evaluate_fn
(
references
=
refs
,
predictions
=
preds
)[
self
.
name
]
def
register_model
(
*
names
):
def
register_model
(
*
names
):
# either pass a list or a single alias.
# either pass a list or a single alias.
...
@@ -126,7 +142,7 @@ def get_metric(name, hf_evaluate_metric=False, **kwargs):
...
@@ -126,7 +142,7 @@ def get_metric(name, hf_evaluate_metric=False, **kwargs):
)
)
try
:
try
:
from
lm_eval.metrics
import
HFEvaluateAdaptor
#
from lm_eval.metrics import HFEvaluateAdaptor
return
HFEvaluateAdaptor
(
name
,
**
kwargs
)
return
HFEvaluateAdaptor
(
name
,
**
kwargs
)
except
Exception
:
except
Exception
:
eval_logger
.
error
(
eval_logger
.
error
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment