Unverified Commit cffc1bd3 authored by Baber Abbasi's avatar Baber Abbasi Committed by GitHub
Browse files

add logging of model args (#1619)

* add logging of model args

* nit

* Add warnings.

* nit

* add warning

* nit
parent 34c9b7e4
...@@ -333,7 +333,6 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None: ...@@ -333,7 +333,6 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
) )
eval_logger.info(f"Selected Tasks: {task_names}") eval_logger.info(f"Selected Tasks: {task_names}")
eval_logger.info("Loading selected tasks...")
request_caching_args = request_caching_arg_to_dict( request_caching_args = request_caching_arg_to_dict(
cache_requests=args.cache_requests cache_requests=args.cache_requests
......
...@@ -148,9 +148,22 @@ def simple_evaluate( ...@@ -148,9 +148,22 @@ def simple_evaluate(
if isinstance(model, str): if isinstance(model, str):
if model_args is None: if model_args is None:
eval_logger.warning("model_args not specified. Using defaults.")
model_args = "" model_args = ""
if "pretrained" not in model_args and model in [
"hf-auto",
"hf",
"huggingface",
"vllm",
]:
eval_logger.warning(
"pretrained not specified. Using default pretrained=gpt2."
)
if isinstance(model_args, dict): if isinstance(model_args, dict):
eval_logger.info(
f"Initializing {model} model, with arguments: {model_args}"
)
lm = lm_eval.api.registry.get_model(model).create_from_arg_obj( lm = lm_eval.api.registry.get_model(model).create_from_arg_obj(
model_args, model_args,
{ {
...@@ -161,6 +174,9 @@ def simple_evaluate( ...@@ -161,6 +174,9 @@ def simple_evaluate(
) )
else: else:
eval_logger.info(
f"Initializing {model} model, with arguments: {simple_parse_args_string(model_args)}"
)
lm = lm_eval.api.registry.get_model(model).create_from_arg_string( lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
model_args, model_args,
{ {
...@@ -172,6 +188,7 @@ def simple_evaluate( ...@@ -172,6 +188,7 @@ def simple_evaluate(
else: else:
if not isinstance(model, lm_eval.api.model.LM): if not isinstance(model, lm_eval.api.model.LM):
raise TypeError raise TypeError
eval_logger.info("Using pre-initialized model")
lm = model lm = model
if use_cache is not None: if use_cache is not None:
......
...@@ -397,7 +397,8 @@ class WandbLogger: ...@@ -397,7 +397,8 @@ class WandbLogger:
self.run.log({f"{group}_eval_results": grouped_df}) self.run.log({f"{group}_eval_results": grouped_df})
def get_commit_from_path(repo_path: Path) -> Optional[str]: def get_commit_from_path(repo_path: Union[Path, str]) -> Optional[str]:
try:
git_folder = Path(repo_path, ".git") git_folder = Path(repo_path, ".git")
if git_folder.is_file(): if git_folder.is_file():
git_folder = Path( git_folder = Path(
...@@ -415,6 +416,11 @@ def get_commit_from_path(repo_path: Path) -> Optional[str]: ...@@ -415,6 +416,11 @@ def get_commit_from_path(repo_path: Path) -> Optional[str]:
git_hash = head_ref.read_text(encoding="utf-8").replace("\n", "") git_hash = head_ref.read_text(encoding="utf-8").replace("\n", "")
else: else:
git_hash = None git_hash = None
except Exception as err:
logger.debug(
f"Failed to retrieve a Git commit hash from path: {str(repo_path)}. Error: {err}"
)
return None
return git_hash return git_hash
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment