Commit ca8b00cc authored by lintangsutawika's avatar lintangsutawika
Browse files

very rough way of utilzing verbosity

parent ae74b808
from .evaluator import evaluate, simple_evaluate
# from .evaluator import evaluate, simple_evaluate
# from .logger import eval_logger, SPACING
\ No newline at end of file
import os
import re
import json
import fnmatch
import argparse
import logging
from pathlib import Path
import argparse
import numpy as np
from lm_eval import evaluator, utils
from lm_eval.api.registry import ALL_TASKS
from lm_eval.logger import eval_logger, SPACING
from lm_eval.tasks import include_path
from pathlib import Path
from typing import Union
import logging
logging.basicConfig(
format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d:%H:%M:%S",
level=logging.INFO,
)
eval_logger = logging.getLogger("lm-eval")
SPACING = " " * 47
def _handle_non_serializable(o):
if isinstance(o, np.int64) or isinstance(o, np.int32):
......@@ -29,7 +35,7 @@ def parse_eval_args() -> argparse.Namespace:
parser.add_argument(
"--tasks",
default=None,
help="Available Tasks:\n - {}".format("\n - ".join(sorted(ALL_TASKS))),
# help="Available Tasks:\n - {}".format("\n - ".join(sorted(ALL_TASKS))),
)
parser.add_argument(
"--model_args",
......@@ -115,13 +121,19 @@ def parse_eval_args() -> argparse.Namespace:
def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
if not args:
# we allow for args to be passed externally, else we parse them ourselves
args = parse_eval_args()
# if not args:
# # we allow for args to be passed externally, else we parse them ourselves
# from lm_eval.logger import eval_logger, SPACING
args = parse_eval_args()
eval_logger.setLevel(getattr(logging, f"{args.verbosity}"))
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from lm_eval import evaluator, utils
from lm_eval.api.registry import ALL_TASKS
from lm_eval.tasks import include_path
if args.limit:
eval_logger.warning(
" --limit SHOULD ONLY BE USED FOR TESTING."
......
......@@ -25,10 +25,6 @@ from lm_eval.utils import (
from lm_eval.logger import eval_logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
@positional_deprecated
def simple_evaluate(
......@@ -46,6 +42,7 @@ def simple_evaluate(
decontamination_ngrams_path=None,
write_out: bool = False,
log_samples: bool = True,
verbosity: str = "INFO",
):
"""Instantiate and evaluate a model on a list of tasks.
......
......@@ -16,7 +16,9 @@ from lm_eval.api.registry import (
import logging
eval_logger = logging.getLogger("lm-eval")
# from lm_eval.logger import eval_logger
# print("tasks.py eval_logger.level")
print(eval_logger.level)
def register_configurable_task(config: Dict[str, str]) -> int:
SubClass = type(
......@@ -141,8 +143,11 @@ def include_task_folder(task_dir: str, register_task: bool = True) -> None:
else:
if type(config["task"]) == list:
register_configurable_group(config, yaml_path)
# Log this silently and show it only when
# the user defines the appropriate verbosity.
except ModuleNotFoundError as e:
eval_logger.warning(
eval_logger.debug(
f"{yaml_path}: {e}. Config will not be added to registry."
)
except Exception as error:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment