Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
f701ba7d
Commit
f701ba7d
authored
Nov 02, 2023
by
lintangsutawika
Browse files
eval_logger is not imported from logger.py anymore
parent
7ec82485
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
14 additions
and
19 deletions
+14
-19
lm_eval/api/metrics.py
lm_eval/api/metrics.py
+2
-0
lm_eval/api/model.py
lm_eval/api/model.py
+3
-1
lm_eval/api/registry.py
lm_eval/api/registry.py
+3
-1
lm_eval/api/task.py
lm_eval/api/task.py
+2
-1
lm_eval/evaluator.py
lm_eval/evaluator.py
+2
-2
lm_eval/logger.py
lm_eval/logger.py
+0
-10
lm_eval/tasks/__init__.py
lm_eval/tasks/__init__.py
+0
-3
lm_eval/utils.py
lm_eval/utils.py
+2
-1
No files found.
lm_eval/api/metrics.py
View file @
f701ba7d
...
...
@@ -9,6 +9,8 @@ import evaluate
from
lm_eval.api.registry
import
register_metric
,
register_aggregation
import
logging
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
# Register Aggregations First
@
register_aggregation
(
"mean"
)
...
...
lm_eval/api/model.py
View file @
f701ba7d
...
...
@@ -10,7 +10,9 @@ import hashlib
from
tqdm
import
tqdm
from
lm_eval
import
utils
from
lm_eval.logger
import
eval_logger
import
logging
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
T
=
TypeVar
(
"T"
,
bound
=
"LM"
)
...
...
lm_eval/api/registry.py
View file @
f701ba7d
import
os
import
evaluate
from
lm_eval.api.model
import
LM
from
lm_eval.logger
import
eval_logger
import
logging
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
MODEL_REGISTRY
=
{}
...
...
lm_eval/api/task.py
View file @
f701ba7d
...
...
@@ -21,7 +21,6 @@ from lm_eval.api import samplers
from
lm_eval.api.instance
import
Instance
from
lm_eval.api.filter
import
FilterEnsemble
from
lm_eval.logger
import
eval_logger
from
lm_eval.prompts
import
get_prompt
from
lm_eval.filters
import
build_filter_ensemble
from
lm_eval.api.metrics
import
(
...
...
@@ -47,6 +46,8 @@ ALL_OUTPUT_TYPES = [
"generate_until"
,
]
import
logging
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
@
dataclass
class
TaskConfig
(
dict
):
...
...
lm_eval/evaluator.py
View file @
f701ba7d
...
...
@@ -23,7 +23,7 @@ from lm_eval.utils import (
get_git_commit_hash
,
)
from
lm_
eval
.
logger
import
eval_logger
eval
_
logger
=
logging
.
getLogger
(
"lm-eval"
)
@
positional_deprecated
...
...
@@ -246,7 +246,7 @@ def evaluate(
task
.
build_all_requests
(
limit
=
limit
,
rank
=
lm
.
rank
,
world_size
=
lm
.
world_size
)
eval_logger
.
info
(
eval_logger
.
debug
(
f
"Task:
{
task_name
}
; number of requests on this rank:
{
len
(
task
.
instances
)
}
"
)
...
...
lm_eval/logger.py
deleted
100644 → 0
View file @
7ec82485
import
logging
logging
.
basicConfig
(
format
=
"%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s"
,
datefmt
=
"%Y-%m-%d:%H:%M:%S"
,
level
=
logging
.
INFO
,
)
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
SPACING
=
" "
*
47
lm_eval/tasks/__init__.py
View file @
f701ba7d
...
...
@@ -16,9 +16,6 @@ from lm_eval.api.registry import (
import
logging
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
# from lm_eval.logger import eval_logger
# print("tasks.py eval_logger.level")
print
(
eval_logger
.
level
)
def
register_configurable_task
(
config
:
Dict
[
str
,
str
])
->
int
:
SubClass
=
type
(
...
...
lm_eval/utils.py
View file @
f701ba7d
...
...
@@ -19,7 +19,8 @@ import transformers
from
jinja2
import
BaseLoader
,
Environment
,
StrictUndefined
from
itertools
import
islice
from
lm_eval.logger
import
eval_logger
import
logging
eval_logger
=
logging
.
getLogger
(
"lm-eval"
)
def
escaped_split
(
text
,
sep_char
,
maxsplit
=-
1
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment