Commit 7e98e211 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Remove unittest.main() in test modules.

This construct isn't used anymore these days.

Running python tests/test_foo.py puts the tests/ directory on
PYTHONPATH, which isn't representative of how we run tests.

Use python -m unittest tests/test_foo.py instead.
parent 6be7cdda
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
import os import os
import unittest
from io import open from io import open
from transformers.tokenization_bert import ( from transformers.tokenization_bert import (
...@@ -146,7 +145,3 @@ class BertTokenizationTest(CommonTestCases.CommonTokenizerTester): ...@@ -146,7 +145,3 @@ class BertTokenizationTest(CommonTestCases.CommonTokenizerTester):
assert encoded_sentence == [101] + text + [102] assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_2 + [102] assert encoded_pair == [101] + text + [102] + text_2 + [102]
if __name__ == "__main__":
unittest.main()
...@@ -15,7 +15,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera ...@@ -15,7 +15,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import json import json
import os import os
import unittest
from io import open from io import open
from transformers.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from transformers.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
...@@ -63,7 +62,3 @@ class CTRLTokenizationTest(CommonTestCases.CommonTokenizerTester): ...@@ -63,7 +62,3 @@ class CTRLTokenizationTest(CommonTestCases.CommonTokenizerTester):
input_bpe_tokens = [0, 1, 2, 4, 5, 1, 0, 3, 6] input_bpe_tokens = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
if __name__ == "__main__":
unittest.main()
...@@ -14,8 +14,6 @@ ...@@ -14,8 +14,6 @@
# limitations under the License. # limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from transformers.tokenization_distilbert import DistilBertTokenizer from transformers.tokenization_distilbert import DistilBertTokenizer
from .test_tokenization_bert import BertTokenizationTest from .test_tokenization_bert import BertTokenizationTest
...@@ -43,7 +41,3 @@ class DistilBertTokenizationTest(BertTokenizationTest): ...@@ -43,7 +41,3 @@ class DistilBertTokenizationTest(BertTokenizationTest):
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [
tokenizer.sep_token_id tokenizer.sep_token_id
] ]
if __name__ == "__main__":
unittest.main()
...@@ -16,7 +16,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera ...@@ -16,7 +16,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import json import json
import os import os
import unittest
from io import open from io import open
from transformers.tokenization_gpt2 import VOCAB_FILES_NAMES, GPT2Tokenizer from transformers.tokenization_gpt2 import VOCAB_FILES_NAMES, GPT2Tokenizer
...@@ -84,7 +83,3 @@ class GPT2TokenizationTest(CommonTestCases.CommonTokenizerTester): ...@@ -84,7 +83,3 @@ class GPT2TokenizationTest(CommonTestCases.CommonTokenizerTester):
input_tokens = tokens + [tokenizer.unk_token] input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19] input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
if __name__ == "__main__":
unittest.main()
...@@ -16,7 +16,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera ...@@ -16,7 +16,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import json import json
import os import os
import unittest
from transformers.tokenization_openai import VOCAB_FILES_NAMES, OpenAIGPTTokenizer from transformers.tokenization_openai import VOCAB_FILES_NAMES, OpenAIGPTTokenizer
...@@ -83,7 +82,3 @@ class OpenAIGPTTokenizationTest(CommonTestCases.CommonTokenizerTester): ...@@ -83,7 +82,3 @@ class OpenAIGPTTokenizationTest(CommonTestCases.CommonTokenizerTester):
input_tokens = tokens + ["<unk>"] input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20] input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
if __name__ == "__main__":
unittest.main()
...@@ -16,7 +16,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera ...@@ -16,7 +16,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import json import json
import os import os
import unittest
from io import open from io import open
from transformers.tokenization_roberta import VOCAB_FILES_NAMES, RobertaTokenizer from transformers.tokenization_roberta import VOCAB_FILES_NAMES, RobertaTokenizer
...@@ -111,7 +110,3 @@ class RobertaTokenizationTest(CommonTestCases.CommonTokenizerTester): ...@@ -111,7 +110,3 @@ class RobertaTokenizationTest(CommonTestCases.CommonTokenizerTester):
assert encoded_sentence == encoded_text_from_decode assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode assert encoded_pair == encoded_pair_from_decode
if __name__ == "__main__":
unittest.main()
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
import os import os
import unittest
from transformers.tokenization_t5 import T5Tokenizer from transformers.tokenization_t5 import T5Tokenizer
from transformers.tokenization_xlnet import SPIECE_UNDERLINE from transformers.tokenization_xlnet import SPIECE_UNDERLINE
...@@ -110,7 +109,3 @@ class T5TokenizationTest(CommonTestCases.CommonTokenizerTester): ...@@ -110,7 +109,3 @@ class T5TokenizationTest(CommonTestCases.CommonTokenizerTester):
".", ".",
], ],
) )
if __name__ == "__main__":
unittest.main()
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
import os import os
import unittest
from io import open from io import open
from transformers import is_torch_available from transformers import is_torch_available
...@@ -83,7 +82,3 @@ class TransfoXLTokenizationTest(CommonTestCases.CommonTokenizerTester): ...@@ -83,7 +82,3 @@ class TransfoXLTokenizationTest(CommonTestCases.CommonTokenizerTester):
self.assertListEqual( self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
) )
if __name__ == "__main__":
unittest.main()
...@@ -44,7 +44,3 @@ class TokenizerUtilsTest(unittest.TestCase): ...@@ -44,7 +44,3 @@ class TokenizerUtilsTest(unittest.TestCase):
@slow @slow
def test_pretrained_tokenizers(self): def test_pretrained_tokenizers(self):
self.check_tokenizer_from_pretrained(GPT2Tokenizer) self.check_tokenizer_from_pretrained(GPT2Tokenizer)
if __name__ == "__main__":
unittest.main()
...@@ -16,7 +16,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera ...@@ -16,7 +16,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import json import json
import os import os
import unittest
from transformers.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
...@@ -98,7 +97,3 @@ class XLMTokenizationTest(CommonTestCases.CommonTokenizerTester): ...@@ -98,7 +97,3 @@ class XLMTokenizationTest(CommonTestCases.CommonTokenizerTester):
assert encoded_sentence == [1] + text + [1] assert encoded_sentence == [1] + text + [1]
assert encoded_pair == [1] + text + [1] + text_2 + [1] assert encoded_pair == [1] + text + [1] + text_2 + [1]
if __name__ == "__main__":
unittest.main()
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
import os import os
import unittest
from transformers.tokenization_xlnet import SPIECE_UNDERLINE, XLNetTokenizer from transformers.tokenization_xlnet import SPIECE_UNDERLINE, XLNetTokenizer
...@@ -183,7 +182,3 @@ class XLNetTokenizationTest(CommonTestCases.CommonTokenizerTester): ...@@ -183,7 +182,3 @@ class XLNetTokenizationTest(CommonTestCases.CommonTokenizerTester):
assert encoded_sentence == text + [4, 3] assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_2 + [4, 3] assert encoded_pair == text + [4] + text_2 + [4, 3]
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment