Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
1ff5bd38
Unverified
Commit
1ff5bd38
authored
Sep 24, 2020
by
Sylvain Gugger
Committed by
GitHub
Sep 24, 2020
Browse files
Check decorator order (#7326)
* Check decorator order * Adapt for parametrized decorators * Fix typos
parent
0be5f4a0
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
42 additions
and
5 deletions
+42
-5
tests/test_tokenization_bert_generation.py
tests/test_tokenization_bert_generation.py
+1
-1
tests/test_tokenization_common.py
tests/test_tokenization_common.py
+3
-3
tests/test_tokenization_reformer.py
tests/test_tokenization_reformer.py
+1
-1
utils/check_repo.py
utils/check_repo.py
+37
-0
No files found.
tests/test_tokenization_bert_generation.py
View file @
1ff5bd38
...
@@ -185,8 +185,8 @@ class BertGenerationTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
...
@@ -185,8 +185,8 @@ class BertGenerationTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self
.
assertListEqual
(
original_tokenizer_encodings
,
self
.
big_tokenizer
.
encode
(
symbols
))
self
.
assertListEqual
(
original_tokenizer_encodings
,
self
.
big_tokenizer
.
encode
(
symbols
))
@
slow
@
require_torch
@
require_torch
@
slow
def
test_torch_encode_plus_sent_to_model
(
self
):
def
test_torch_encode_plus_sent_to_model
(
self
):
import
torch
import
torch
...
...
tests/test_tokenization_common.py
View file @
1ff5bd38
...
@@ -1419,8 +1419,8 @@ class TokenizerTesterMixin:
...
@@ -1419,8 +1419,8 @@ class TokenizerTesterMixin:
# add pad_token_id to pass subsequent tests
# add pad_token_id to pass subsequent tests
tokenizer
.
add_special_tokens
({
"pad_token"
:
"<PAD>"
})
tokenizer
.
add_special_tokens
({
"pad_token"
:
"<PAD>"
})
@
slow
@
require_torch
@
require_torch
@
slow
def
test_torch_encode_plus_sent_to_model
(
self
):
def
test_torch_encode_plus_sent_to_model
(
self
):
import
torch
import
torch
...
@@ -1470,8 +1470,8 @@ class TokenizerTesterMixin:
...
@@ -1470,8 +1470,8 @@ class TokenizerTesterMixin:
# model(**encoded_sequence_fast)
# model(**encoded_sequence_fast)
# model(**batch_encoded_sequence_fast)
# model(**batch_encoded_sequence_fast)
@
slow
@
require_tf
@
require_tf
@
slow
def
test_tf_encode_plus_sent_to_model
(
self
):
def
test_tf_encode_plus_sent_to_model
(
self
):
from
transformers
import
TF_MODEL_MAPPING
,
TOKENIZER_MAPPING
from
transformers
import
TF_MODEL_MAPPING
,
TOKENIZER_MAPPING
...
@@ -1505,8 +1505,8 @@ class TokenizerTesterMixin:
...
@@ -1505,8 +1505,8 @@ class TokenizerTesterMixin:
model
(
batch_encoded_sequence
)
model
(
batch_encoded_sequence
)
# TODO: Check if require_torch is the best to test for numpy here ... Maybe move to require_flax when available
# TODO: Check if require_torch is the best to test for numpy here ... Maybe move to require_flax when available
@
slow
@
require_torch
@
require_torch
@
slow
def
test_np_encode_plus_sent_to_model
(
self
):
def
test_np_encode_plus_sent_to_model
(
self
):
from
transformers
import
MODEL_MAPPING
,
TOKENIZER_MAPPING
from
transformers
import
MODEL_MAPPING
,
TOKENIZER_MAPPING
...
...
tests/test_tokenization_reformer.py
View file @
1ff5bd38
...
@@ -230,8 +230,8 @@ class ReformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
...
@@ -230,8 +230,8 @@ class ReformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self
.
assertListEqual
(
original_tokenizer_encodings
,
self
.
big_tokenizer
.
encode
(
symbols
))
self
.
assertListEqual
(
original_tokenizer_encodings
,
self
.
big_tokenizer
.
encode
(
symbols
))
@
slow
@
require_torch
@
require_torch
@
slow
def
test_torch_encode_plus_sent_to_model
(
self
):
def
test_torch_encode_plus_sent_to_model
(
self
):
import
torch
import
torch
...
...
utils/check_repo.py
View file @
1ff5bd38
...
@@ -273,9 +273,46 @@ def check_all_models_are_documented():
...
@@ -273,9 +273,46 @@ def check_all_models_are_documented():
raise
Exception
(
f
"There were
{
len
(
failures
)
}
failures:
\n
"
+
"
\n
"
.
join
(
failures
))
raise
Exception
(
f
"There were
{
len
(
failures
)
}
failures:
\n
"
+
"
\n
"
.
join
(
failures
))
_re_decorator
=
re
.
compile
(
r
"^\s*@(\S+)\s+$"
)
def
check_decorator_order
(
filename
):
""" Check that in the test file `filename` the slow decorator is always last."""
with
open
(
filename
,
"r"
,
encoding
=
"utf-8"
)
as
f
:
lines
=
f
.
readlines
()
decorator_before
=
None
errors
=
[]
for
i
,
line
in
enumerate
(
lines
):
search
=
_re_decorator
.
search
(
line
)
if
search
is
not
None
:
decorator_name
=
search
.
groups
()[
0
]
if
decorator_before
is
not
None
and
decorator_name
.
startswith
(
"parameterized"
):
errors
.
append
(
i
)
decorator_before
=
decorator_name
elif
decorator_before
is
not
None
:
decorator_before
=
None
return
errors
def
check_all_decorator_order
():
""" Check that in all test files, the slow decorator is always last."""
errors
=
[]
for
fname
in
os
.
listdir
(
PATH_TO_TESTS
):
if
fname
.
endswith
(
".py"
):
filename
=
os
.
path
.
join
(
PATH_TO_TESTS
,
fname
)
new_errors
=
check_decorator_order
(
filename
)
errors
+=
[
f
"-
{
filename
}
, line
{
i
}
"
for
i
in
new_errors
]
if
len
(
errors
)
>
0
:
msg
=
"
\n
"
.
join
(
errors
)
raise
ValueError
(
f
"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:
\n
{
msg
}
"
)
def
check_repo_quality
():
def
check_repo_quality
():
""" Check all models are properly tested and documented."""
""" Check all models are properly tested and documented."""
print
(
"Checking all models are properly tested."
)
print
(
"Checking all models are properly tested."
)
check_all_decorator_order
()
check_all_models_are_tested
()
check_all_models_are_tested
()
print
(
"Checking all models are properly documented."
)
print
(
"Checking all models are properly documented."
)
check_all_models_are_documented
()
check_all_models_are_documented
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment