Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
e33a7d92
Commit
e33a7d92
authored
Oct 10, 2023
by
lintangsutawika
Browse files
error set to DEBUG
parent
9894597c
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
17 additions
and
17 deletions
+17
-17
lm_eval/__main__.py
lm_eval/__main__.py
+4
-4
lm_eval/tasks/__init__.py
lm_eval/tasks/__init__.py
+13
-13
No files found.
lm_eval/__main__.py
View file @
e33a7d92
...
...
@@ -98,9 +98,9 @@ def parse_eval_args() -> argparse.Namespace:
help
=
"Additional path to include if there are external tasks to include."
,
)
parser
.
add_argument
(
"--verbos
e
"
,
type
=
bool
,
default
=
False
,
"--verbos
ity
"
,
type
=
str
,
default
=
"INFO"
,
help
=
"Log error when tasks are not registered."
,
)
return
parser
.
parse_args
()
...
...
@@ -112,6 +112,7 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
# we allow for args to be passed externally, else we parse them ourselves
args
=
parse_eval_args
()
eval_logger
.
setLevel
(
getattr
(
logging
,
f
"
{
args
.
verbosity
}
"
))
os
.
environ
[
"TOKENIZERS_PARALLELISM"
]
=
"false"
if
args
.
limit
:
...
...
@@ -173,7 +174,6 @@ def cli_evaluate(args: Union[argparse.Namespace, None] = None) -> None:
assert
args
.
output_path
,
"Specify --output_path"
eval_logger
.
info
(
f
"Selected Tasks:
{
task_names
}
"
)
eval_logger
.
verbose
=
args
.
verbose
results
=
evaluator
.
simple_evaluate
(
model
=
args
.
model
,
...
...
lm_eval/tasks/__init__.py
View file @
e33a7d92
...
...
@@ -4,7 +4,7 @@ from typing import List, Union, Dict
from
lm_eval
import
utils
from
lm_eval
import
prompts
from
lm_eval.logger
import
eval_logger
#
from lm_eval.logger import eval_logger
from
lm_eval.api.task
import
TaskConfig
,
Task
,
ConfigurableTask
from
lm_eval.api.registry
import
(
register_task
,
...
...
@@ -14,6 +14,9 @@ from lm_eval.api.registry import (
ALL_TASKS
,
)
import
logging
eval_logger
=
logging
.
getLogger
(
'lm-eval'
)
def
register_configurable_task
(
config
:
Dict
[
str
,
str
])
->
int
:
SubClass
=
type
(
...
...
@@ -139,18 +142,15 @@ def include_task_folder(task_dir: str, register_task: bool = True) -> None:
register_configurable_group
(
config
,
yaml_path
)
except
Exception
as
error
:
if
eval_logger
.
verbose
:
import
traceback
eval_logger
.
warning
(
"Failed to load config in
\n
"
f
"
{
yaml_path
}
\n
"
" Config will not be added to registry
\n
"
f
" Error:
{
error
}
\n
"
f
" Traceback:
{
traceback
.
format_exc
()
}
"
)
else
:
eval_logger
.
warning
(
"Yaml failed to register {yaml_path}
\n
"
)
import
traceback
eval_logger
.
debug
(
"Failed to load config in
\n
"
f
"
{
yaml_path
}
\n
"
" Config will not be added to registry
\n
"
f
" Error:
{
error
}
\n
"
f
" Traceback:
{
traceback
.
format_exc
()
}
"
)
return
0
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment