Commit dfb036b7 authored by lintangsutawika's avatar lintangsutawika
Browse files

resolved again

parents 470fb31c cda25fef
<<<<<<< HEAD
import os
import logging
import evaluate
......@@ -6,14 +5,6 @@ import collections
from functools import partial
from lm_eval.api.model import LM
=======
import logging
import evaluate
from lm_eval.api.model import LM
>>>>>>> 4d10ad56b1ffe569467eee2297e2317c99313118
eval_logger = logging.getLogger("lm-eval")
......@@ -129,7 +120,6 @@ def register_metric(
return decorate
<<<<<<< HEAD
def get_metric(name):
if name in METRIC_REGISTRY:
......@@ -139,17 +129,6 @@ def get_metric(name):
def get_evaluate(name, **kwargs):
=======
def get_metric(name, hf_evaluate_metric=False):
if not hf_evaluate_metric:
if name in METRIC_REGISTRY:
return METRIC_REGISTRY[name]
else:
eval_logger.warning(
f"Could not find registered metric '{name}' in lm-eval, searching in HF Evaluate library..."
)
>>>>>>> 4d10ad56b1ffe569467eee2297e2317c99313118
try:
class HFEvaluateAdaptor:
......
......@@ -19,6 +19,9 @@ from lm_eval.api.metrics import (
mean,
weighted_perplexity,
<<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> cda25fef4e1df2f4bc2dab3ec6668ae9f5bf7296
bits_per_byte,
)
from lm_eval.api.registry import (
......@@ -27,6 +30,7 @@ from lm_eval.api.registry import (
get_aggregation,
METRIC_REGISTRY,
DEFAULT_METRIC_REGISTRY,
<<<<<<< HEAD
=======
)
from lm_eval.api.registry import (
......@@ -37,6 +41,8 @@ from lm_eval.api.registry import (
get_metric_aggregation,
is_higher_better,
>>>>>>> 4d10ad56b1ffe569467eee2297e2317c99313118
=======
>>>>>>> cda25fef4e1df2f4bc2dab3ec6668ae9f5bf7296
)
from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment