Unverified Commit 13deb95a authored by Sam Shleifer's avatar Sam Shleifer Committed by GitHub
Browse files

Move tests/utils.py -> transformers/testing_utils.py (#5350)

parent 9c219305
...@@ -18,9 +18,9 @@ import os ...@@ -18,9 +18,9 @@ import os
import unittest import unittest
from transformers import is_torch_available from transformers import is_torch_available
from transformers.testing_utils import require_torch
from .test_tokenization_common import TokenizerTesterMixin from .test_tokenization_common import TokenizerTesterMixin
from .utils import require_torch
if is_torch_available(): if is_torch_available():
......
...@@ -17,10 +17,9 @@ import unittest ...@@ -17,10 +17,9 @@ import unittest
from typing import Callable, Optional from typing import Callable, Optional
from transformers import BatchEncoding, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, TensorType from transformers import BatchEncoding, BertTokenizer, BertTokenizerFast, PreTrainedTokenizer, TensorType
from transformers.testing_utils import require_tf, require_torch, slow
from transformers.tokenization_gpt2 import GPT2Tokenizer from transformers.tokenization_gpt2 import GPT2Tokenizer
from .utils import require_tf, require_torch, slow
class TokenizerUtilsTest(unittest.TestCase): class TokenizerUtilsTest(unittest.TestCase):
def check_tokenizer_from_pretrained(self, tokenizer_class): def check_tokenizer_from_pretrained(self, tokenizer_class):
......
...@@ -18,10 +18,10 @@ import json ...@@ -18,10 +18,10 @@ import json
import os import os
import unittest import unittest
from transformers.testing_utils import slow
from transformers.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from .test_tokenization_common import TokenizerTesterMixin from .test_tokenization_common import TokenizerTesterMixin
from .utils import slow
class XLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): class XLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
......
...@@ -18,10 +18,10 @@ import os ...@@ -18,10 +18,10 @@ import os
import unittest import unittest
from transformers.file_utils import cached_property from transformers.file_utils import cached_property
from transformers.testing_utils import slow
from transformers.tokenization_xlm_roberta import SPIECE_UNDERLINE, XLMRobertaTokenizer from transformers.tokenization_xlm_roberta import SPIECE_UNDERLINE, XLMRobertaTokenizer
from .test_tokenization_common import TokenizerTesterMixin from .test_tokenization_common import TokenizerTesterMixin
from .utils import slow
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model") SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
......
...@@ -17,10 +17,10 @@ ...@@ -17,10 +17,10 @@
import os import os
import unittest import unittest
from transformers.testing_utils import slow
from transformers.tokenization_xlnet import SPIECE_UNDERLINE, XLNetTokenizer from transformers.tokenization_xlnet import SPIECE_UNDERLINE, XLNetTokenizer
from .test_tokenization_common import TokenizerTesterMixin from .test_tokenization_common import TokenizerTesterMixin
from .utils import slow
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model") SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
......
import unittest import unittest
from transformers import AutoTokenizer, TrainingArguments, is_torch_available from transformers import AutoTokenizer, TrainingArguments, is_torch_available
from transformers.testing_utils import require_torch
from .utils import require_torch
if is_torch_available(): if is_torch_available():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment