"ml/backend/vscode:/vscode.git/clone" did not exist on "a59f66523561dc195a37b78b454d8cd1b8b1fdd7"
Commit dfb036b7 authored by lintangsutawika's avatar lintangsutawika
Browse files

resolved again

parents 470fb31c cda25fef
<<<<<<< HEAD
import os import os
import logging import logging
import evaluate import evaluate
...@@ -6,14 +5,6 @@ import collections ...@@ -6,14 +5,6 @@ import collections
from functools import partial from functools import partial
from lm_eval.api.model import LM from lm_eval.api.model import LM
=======
import logging
import evaluate
from lm_eval.api.model import LM
>>>>>>> 4d10ad56b1ffe569467eee2297e2317c99313118
eval_logger = logging.getLogger("lm-eval") eval_logger = logging.getLogger("lm-eval")
...@@ -129,7 +120,6 @@ def register_metric( ...@@ -129,7 +120,6 @@ def register_metric(
return decorate return decorate
<<<<<<< HEAD
def get_metric(name): def get_metric(name):
if name in METRIC_REGISTRY: if name in METRIC_REGISTRY:
...@@ -139,17 +129,6 @@ def get_metric(name): ...@@ -139,17 +129,6 @@ def get_metric(name):
def get_evaluate(name, **kwargs): def get_evaluate(name, **kwargs):
=======
def get_metric(name, hf_evaluate_metric=False):
if not hf_evaluate_metric:
if name in METRIC_REGISTRY:
return METRIC_REGISTRY[name]
else:
eval_logger.warning(
f"Could not find registered metric '{name}' in lm-eval, searching in HF Evaluate library..."
)
>>>>>>> 4d10ad56b1ffe569467eee2297e2317c99313118
try: try:
class HFEvaluateAdaptor: class HFEvaluateAdaptor:
......
...@@ -19,6 +19,9 @@ from lm_eval.api.metrics import ( ...@@ -19,6 +19,9 @@ from lm_eval.api.metrics import (
mean, mean,
weighted_perplexity, weighted_perplexity,
<<<<<<< HEAD <<<<<<< HEAD
<<<<<<< HEAD
=======
>>>>>>> cda25fef4e1df2f4bc2dab3ec6668ae9f5bf7296
bits_per_byte, bits_per_byte,
) )
from lm_eval.api.registry import ( from lm_eval.api.registry import (
...@@ -27,6 +30,7 @@ from lm_eval.api.registry import ( ...@@ -27,6 +30,7 @@ from lm_eval.api.registry import (
get_aggregation, get_aggregation,
METRIC_REGISTRY, METRIC_REGISTRY,
DEFAULT_METRIC_REGISTRY, DEFAULT_METRIC_REGISTRY,
<<<<<<< HEAD
======= =======
) )
from lm_eval.api.registry import ( from lm_eval.api.registry import (
...@@ -37,6 +41,8 @@ from lm_eval.api.registry import ( ...@@ -37,6 +41,8 @@ from lm_eval.api.registry import (
get_metric_aggregation, get_metric_aggregation,
is_higher_better, is_higher_better,
>>>>>>> 4d10ad56b1ffe569467eee2297e2317c99313118 >>>>>>> 4d10ad56b1ffe569467eee2297e2317c99313118
=======
>>>>>>> cda25fef4e1df2f4bc2dab3ec6668ae9f5bf7296
) )
from lm_eval.filters import build_filter_ensemble from lm_eval.filters import build_filter_ensemble
from lm_eval.prompts import get_prompt from lm_eval.prompts import get_prompt
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment