Commit e33a7d92 authored by lintangsutawika's avatar lintangsutawika
Browse files

error set to DEBUG

parent 9894597c
...@@ -98,9 +98,9 @@ def parse_eval_args() -> argparse.Namespace: ...@@ -98,9 +98,9 @@ def parse_eval_args() -> argparse.Namespace:
help="Additional path to include if there are external tasks to include.", help="Additional path to include if there are external tasks to include.",
) )
parser.add_argument( parser.add_argument(
"--verbose", "--verbosity",
type=bool, type=str,
default=False, default="INFO",
help="Log error when tasks are not registered.", help="Log error when tasks are not registered.",
) )
return parser.parse_args() return parser.parse_args()
...@@ -112,6 +112,7 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: ...@@ -112,6 +112,7 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
# we allow for args to be passed externally, else we parse them ourselves # we allow for args to be passed externally, else we parse them ourselves
args = parse_eval_args() args = parse_eval_args()
eval_logger.setLevel(getattr(logging, f"{args.verbosity}"))
os.environ["TOKENIZERS_PARALLELISM"] = "false" os.environ["TOKENIZERS_PARALLELISM"] = "false"
if args.limit: if args.limit:
...@@ -173,7 +174,6 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: ...@@ -173,7 +174,6 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
assert args.output_path, "Specify --output_path" assert args.output_path, "Specify --output_path"
eval_logger.info(f"Selected Tasks: {task_names}") eval_logger.info(f"Selected Tasks: {task_names}")
eval_logger.verbose = args.verbose
results = evaluator.simple_evaluate( results = evaluator.simple_evaluate(
model=args.model, model=args.model,
......
...@@ -4,7 +4,7 @@ from typing import List, Union, Dict ...@@ -4,7 +4,7 @@ from typing import List, Union, Dict
from lm_eval import utils from lm_eval import utils
from lm_eval import prompts from lm_eval import prompts
from lm_eval.logger import eval_logger # from lm_eval.logger import eval_logger
from lm_eval.api.task import TaskConfig, Task, ConfigurableTask from lm_eval.api.task import TaskConfig, Task, ConfigurableTask
from lm_eval.api.registry import ( from lm_eval.api.registry import (
register_task, register_task,
...@@ -14,6 +14,9 @@ from lm_eval.api.registry import ( ...@@ -14,6 +14,9 @@ from lm_eval.api.registry import (
ALL_TASKS, ALL_TASKS,
) )
import logging
eval_logger = logging.getLogger('lm-eval')
def register_configurable_task(config: Dict[str, str]) -> int: def register_configurable_task(config: Dict[str, str]) -> int:
SubClass = type( SubClass = type(
...@@ -139,18 +142,15 @@ def include_task_folder(task_dir: str, register_task: bool = True) -> None: ...@@ -139,18 +142,15 @@ def include_task_folder(task_dir: str, register_task: bool = True) -> None:
register_configurable_group(config, yaml_path) register_configurable_group(config, yaml_path)
except Exception as error: except Exception as error:
if eval_logger.verbose: import traceback
import traceback
eval_logger.debug(
eval_logger.warning( "Failed to load config in\n"
"Failed to load config in\n" f" {yaml_path}\n"
f" {yaml_path}\n" " Config will not be added to registry\n"
" Config will not be added to registry\n" f" Error: {error}\n"
f" Error: {error}\n" f" Traceback: {traceback.format_exc()}"
f" Traceback: {traceback.format_exc()}" )
)
else:
eval_logger.warning("Yaml failed to register {yaml_path}\n")
return 0 return 0
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment