Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
dfb036b7
Commit
dfb036b7
authored
Jan 02, 2024
by
lintangsutawika
Browse files
resolved again
parents
470fb31c
cda25fef
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
6 additions
and
21 deletions
+6
-21
lm_eval/api/registry.py
lm_eval/api/registry.py
+0
-21
lm_eval/api/task.py
lm_eval/api/task.py
+6
-0
No files found.
lm_eval/api/registry.py
View file @
dfb036b7
<<<<<<<
HEAD
import
os
import
os
import
logging
import
logging
import
evaluate
import
evaluate
...
@@ -6,14 +5,6 @@ import collections
...
@@ -6,14 +5,6 @@ import collections
from
functools
import
partial
from
functools
import
partial
from
lm_eval.api.model
import
LM
from
lm_eval.api.model
import
LM
=======
import
logging
import
evaluate
from
lm_eval.api.model
import
LM
>>>>>>>
4
d10ad56b1ffe569467eee2297e2317c99313118
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
...
@@ -129,7 +120,6 @@ def register_metric(
...
@@ -129,7 +120,6 @@ def register_metric(
return
decorate
return
decorate
<<<<<<<
HEAD
def
get_metric
(
name
):
def
get_metric
(
name
):
if
name
in
METRIC_REGISTRY
:
if
name
in
METRIC_REGISTRY
:
...
@@ -139,17 +129,6 @@ def get_metric(name):
...
@@ -139,17 +129,6 @@ def get_metric(name):
def
get_evaluate
(
name
,
**
kwargs
):
def
get_evaluate
(
name
,
**
kwargs
):
=======
def
get_metric
(
name
,
hf_evaluate_metric
=
False
):
if
not
hf_evaluate_metric
:
if
name
in
METRIC_REGISTRY
:
return
METRIC_REGISTRY
[
name
]
else
:
eval_logger
.
warning
(
f
"Could not find registered metric '
{
name
}
' in lm-eval, searching in HF Evaluate library..."
)
>>>>>>>
4
d10ad56b1ffe569467eee2297e2317c99313118
try
:
try
:
class
HFEvaluateAdaptor
:
class
HFEvaluateAdaptor
:
...
...
lm_eval/api/task.py
View file @
dfb036b7
...
@@ -19,6 +19,9 @@ from lm_eval.api.metrics import (
...
@@ -19,6 +19,9 @@ from lm_eval.api.metrics import (
mean
,
mean
,
weighted_perplexity
,
weighted_perplexity
,
<<<<<<<
HEAD
<<<<<<<
HEAD
<<<<<<<
HEAD
=======
>>>>>>>
cda25fef4e1df2f4bc2dab3ec6668ae9f5bf7296
bits_per_byte
,
bits_per_byte
,
)
)
from
lm_eval.api.registry
import
(
from
lm_eval.api.registry
import
(
...
@@ -27,6 +30,7 @@ from lm_eval.api.registry import (
...
@@ -27,6 +30,7 @@ from lm_eval.api.registry import (
get_aggregation
,
get_aggregation
,
METRIC_REGISTRY
,
METRIC_REGISTRY
,
DEFAULT_METRIC_REGISTRY
,
DEFAULT_METRIC_REGISTRY
,
<<<<<<<
HEAD
=======
=======
)
)
from
lm_eval.api.registry
import
(
from
lm_eval.api.registry
import
(
...
@@ -37,6 +41,8 @@ from lm_eval.api.registry import (
...
@@ -37,6 +41,8 @@ from lm_eval.api.registry import (
get_metric_aggregation
,
get_metric_aggregation
,
is_higher_better
,
is_higher_better
,
>>>>>>>
4
d10ad56b1ffe569467eee2297e2317c99313118
>>>>>>>
4
d10ad56b1ffe569467eee2297e2317c99313118
=======
>>>>>>>
cda25fef4e1df2f4bc2dab3ec6668ae9f5bf7296
)
)
from
lm_eval.filters
import
build_filter_ensemble
from
lm_eval.filters
import
build_filter_ensemble
from
lm_eval.prompts
import
get_prompt
from
lm_eval.prompts
import
get_prompt
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment