Unverified Commit 0ff6ab99 authored by Hailey Schoelkopf's avatar Hailey Schoelkopf Committed by GitHub
Browse files

Rename `lm_eval.logging -> lm_eval.loggers` (#1858)

* rename lm_eval.logging module

* fix evaluation tracker args
parent 78a215e0
...@@ -8,7 +8,7 @@ from typing import Union ...@@ -8,7 +8,7 @@ from typing import Union
from lm_eval import evaluator, utils from lm_eval import evaluator, utils
from lm_eval.evaluator import request_caching_arg_to_dict from lm_eval.evaluator import request_caching_arg_to_dict
from lm_eval.logging import EvaluationTracker, WandbLogger from lm_eval.loggers import EvaluationTracker, WandbLogger
from lm_eval.tasks import TaskManager from lm_eval.tasks import TaskManager
from lm_eval.utils import handle_non_serializable, make_table, simple_parse_args_string from lm_eval.utils import handle_non_serializable, make_table, simple_parse_args_string
...@@ -255,7 +255,10 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: ...@@ -255,7 +255,10 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
os.environ["TOKENIZERS_PARALLELISM"] = "false" os.environ["TOKENIZERS_PARALLELISM"] = "false"
# update the evaluation tracker args with the output path and the HF token # update the evaluation tracker args with the output path and the HF token
args.hf_hub_log_args = f"output_path={args.output_path},token={os.environ.get('HF_TOKEN')},{args.hf_hub_log_args}" if args.output_path:
args.hf_hub_log_args += f",output_path={args.output_path}"
if os.environ.get("HF_TOKEN", None):
args.hf_hub_log_args += f",token={os.environ.get('HF_TOKEN')}"
evaluation_tracker_args = simple_parse_args_string(args.hf_hub_log_args) evaluation_tracker_args = simple_parse_args_string(args.hf_hub_log_args)
evaluation_tracker = EvaluationTracker(**evaluation_tracker_args) evaluation_tracker = EvaluationTracker(**evaluation_tracker_args)
evaluation_tracker.general_config_tracker.log_experiment_args( evaluation_tracker.general_config_tracker.log_experiment_args(
......
...@@ -21,7 +21,7 @@ from lm_eval.evaluator_utils import ( ...@@ -21,7 +21,7 @@ from lm_eval.evaluator_utils import (
print_writeout, print_writeout,
run_task_tests, run_task_tests,
) )
from lm_eval.logging.utils import add_env_info, get_git_commit_hash from lm_eval.loggers.utils import add_env_info, get_git_commit_hash
from lm_eval.tasks import TaskManager, get_task_dict from lm_eval.tasks import TaskManager, get_task_dict
from lm_eval.utils import ( from lm_eval.utils import (
eval_logger, eval_logger,
......
...@@ -7,7 +7,7 @@ import numpy as np ...@@ -7,7 +7,7 @@ import numpy as np
import pandas as pd import pandas as pd
from packaging.version import Version from packaging.version import Version
from lm_eval.logging.utils import _handle_non_serializable, remove_none_pattern from lm_eval.loggers.utils import _handle_non_serializable, remove_none_pattern
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment