Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
9a9c8e3c
Commit
9a9c8e3c
authored
Mar 27, 2021
by
Jonathan Tow
Browse files
Remove unused imports and format imports outside of `lm_eval` module
parent
d5d19219
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
4 additions
and
19 deletions
+4
-19
main.py
main.py
+0
-2
scripts/cost_estimate.py
scripts/cost_estimate.py
+2
-10
scripts/fewshot_description_experiment.py
scripts/fewshot_description_experiment.py
+0
-4
scripts/write_out.py
scripts/write_out.py
+0
-1
tests/test_models.py
tests/test_models.py
+1
-1
tests/test_tasks.py
tests/test_tasks.py
+1
-1
No files found.
main.py
View file @
9a9c8e3c
...
@@ -2,8 +2,6 @@ import argparse
...
@@ -2,8 +2,6 @@ import argparse
import
json
import
json
import
numpy
as
np
import
numpy
as
np
import
random
import
random
import
itertools
import
collections
import
logging
import
logging
from
lm_eval
import
models
,
tasks
,
evaluator
,
base
from
lm_eval
import
models
,
tasks
,
evaluator
,
base
...
...
scripts/cost_estimate.py
View file @
9a9c8e3c
import
argparse
import
json
import
numpy
as
np
import
random
import
random
import
itertools
import
collections
import
logging
from
lm_eval
import
models
,
tasks
,
evaluator
,
base
import
random
from
lm_eval.base
import
LM
import
transformers
import
transformers
from
lm_eval
import
tasks
,
evaluator
from
lm_eval.base
import
LM
class
DryrunLM
(
LM
):
class
DryrunLM
(
LM
):
...
...
scripts/fewshot_description_experiment.py
View file @
9a9c8e3c
import
argparse
import
json
import
json
import
numpy
as
np
import
numpy
as
np
import
random
import
random
import
itertools
import
collections
import
logging
import
logging
from
lm_eval
import
models
,
tasks
,
evaluator
,
base
from
lm_eval
import
models
,
tasks
,
evaluator
,
base
logging
.
getLogger
(
"openai"
).
setLevel
(
logging
.
WARNING
)
logging
.
getLogger
(
"openai"
).
setLevel
(
logging
.
WARNING
)
...
...
scripts/write_out.py
View file @
9a9c8e3c
...
@@ -2,7 +2,6 @@ import argparse
...
@@ -2,7 +2,6 @@ import argparse
import
numpy
as
np
import
numpy
as
np
import
os
import
os
import
random
import
random
from
lm_eval
import
tasks
from
lm_eval
import
tasks
from
lm_eval.utils
import
join_iters
from
lm_eval.utils
import
join_iters
...
...
tests/test_models.py
View file @
9a9c8e3c
import
lm_eval.models
as
models
import
lm_eval.models
as
models
import
lm_eval.base
as
base
def
test_gpt2
():
def
test_gpt2
():
gpt2
=
models
.
get_model
(
'gpt2'
).
create_from_arg_string
(
"device=cpu"
)
gpt2
=
models
.
get_model
(
'gpt2'
).
create_from_arg_string
(
"device=cpu"
)
...
...
tests/test_tasks.py
View file @
9a9c8e3c
import
lm_eval.tasks
as
tasks
import
lm_eval.tasks
as
tasks
import
lm_eval.base
as
base
import
lm_eval.base
as
base
from
itertools
import
islice
import
pytest
import
pytest
from
itertools
import
islice
@
pytest
.
mark
.
parametrize
(
"taskname,Task"
,
tasks
.
TASK_REGISTRY
.
items
())
@
pytest
.
mark
.
parametrize
(
"taskname,Task"
,
tasks
.
TASK_REGISTRY
.
items
())
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment