Unverified Commit 29c10a41 authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

[Test refactor 1/5] Per-folder tests reorganization (#15725)



* Per-folder tests reorganization
Co-authored-by: default avatarsgugger <sylvain.gugger@gmail.com>
Co-authored-by: default avatarStas Bekman <stas@stason.org>
parent fecb08c2
...@@ -20,9 +20,9 @@ import unittest ...@@ -20,9 +20,9 @@ import unittest
from transformers import ProphetNetConfig, is_torch_available from transformers import ProphetNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester from ..generation.test_generation_utils import GenerationTesterMixin
from .test_generation_utils import GenerationTesterMixin from ..test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available(): if is_torch_available():
......
...@@ -28,7 +28,7 @@ from transformers.models.bert.tokenization_bert import ( ...@@ -28,7 +28,7 @@ from transformers.models.bert.tokenization_bert import (
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow from transformers.testing_utils import require_torch, slow
from .test_tokenization_common import TokenizerTesterMixin from ..test_tokenization_common import TokenizerTesterMixin
class ProphetNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): class ProphetNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
......
...@@ -18,12 +18,11 @@ ...@@ -18,12 +18,11 @@
import unittest import unittest
from tests.test_modeling_common import floats_tensor
from transformers import QDQBertConfig, is_torch_available from transformers import QDQBertConfig, is_torch_available
from transformers.testing_utils import require_pytorch_quantization, require_torch, slow, torch_device from transformers.testing_utils import require_pytorch_quantization, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester from ..test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available(): if is_torch_available():
......
...@@ -19,6 +19,7 @@ import os ...@@ -19,6 +19,7 @@ import os
import shutil import shutil
import tempfile import tempfile
import unittest import unittest
from os.path import dirname
from unittest.mock import patch from unittest.mock import patch
import numpy as np import numpy as np
...@@ -37,15 +38,14 @@ from transformers.testing_utils import ( ...@@ -37,15 +38,14 @@ from transformers.testing_utils import (
torch_device, torch_device,
) )
from .test_modeling_bart import BartModelTester from ..bart.test_modeling_bart import BartModelTester
from .test_modeling_dpr import DPRModelTester from ..dpr.test_modeling_dpr import DPRModelTester
from .test_modeling_t5 import T5ModelTester from ..t5.test_modeling_t5 import T5ModelTester
TOLERANCE = 1e-3 TOLERANCE = 1e-3
T5_SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model") T5_SAMPLE_VOCAB = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/test_sentencepiece.model")
if is_torch_available() and is_datasets_available() and is_faiss_available(): if is_torch_available() and is_datasets_available() and is_faiss_available():
import torch import torch
from datasets import Dataset from datasets import Dataset
......
...@@ -34,8 +34,8 @@ if is_tf_available() and is_datasets_available() and is_faiss_available(): ...@@ -34,8 +34,8 @@ if is_tf_available() and is_datasets_available() and is_faiss_available():
from transformers.modeling_tf_outputs import TFBaseModelOutput from transformers.modeling_tf_outputs import TFBaseModelOutput
from .test_modeling_tf_bart import TFBartModelTester from ..bart.test_modeling_tf_bart import TFBartModelTester
from .test_modeling_tf_dpr import TFDPRModelTester from ..dpr.test_modeling_tf_dpr import TFDPRModelTester
TOLERANCE = 1e-3 TOLERANCE = 1e-3
......
...@@ -19,12 +19,11 @@ import unittest ...@@ -19,12 +19,11 @@ import unittest
import numpy as np import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import RealmConfig, is_torch_available from transformers import RealmConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester from ..test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available(): if is_torch_available():
......
...@@ -28,7 +28,7 @@ from transformers.models.bert.tokenization_bert import ( ...@@ -28,7 +28,7 @@ from transformers.models.bert.tokenization_bert import (
from transformers.models.realm.tokenization_realm import RealmTokenizer from transformers.models.realm.tokenization_realm import RealmTokenizer
from transformers.testing_utils import require_tokenizers, slow from transformers.testing_utils import require_tokenizers, slow
from .test_tokenization_common import TokenizerTesterMixin, filter_non_english from ..test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers @require_tokenizers
......
...@@ -25,9 +25,9 @@ from transformers.testing_utils import ( ...@@ -25,9 +25,9 @@ from transformers.testing_utils import (
torch_device, torch_device,
) )
from .test_configuration_common import ConfigTester from ..generation.test_generation_utils import GenerationTesterMixin
from .test_generation_utils import GenerationTesterMixin from ..test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available(): if is_torch_available():
......
...@@ -14,15 +14,16 @@ ...@@ -14,15 +14,16 @@
import os import os
import unittest import unittest
from os.path import dirname
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.file_utils import cached_property from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from .test_tokenization_common import TokenizerTesterMixin from ..test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model") SAMPLE_VOCAB = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/test_sentencepiece.model")
@require_sentencepiece @require_sentencepiece
......
...@@ -17,12 +17,11 @@ ...@@ -17,12 +17,11 @@
import unittest import unittest
from tests.test_modeling_common import floats_tensor
from transformers import is_torch_available from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester from ..test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available(): if is_torch_available():
......
...@@ -19,8 +19,8 @@ import unittest ...@@ -19,8 +19,8 @@ import unittest
from transformers import RemBertConfig, is_tf_available from transformers import RemBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester from ..test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
if is_tf_available(): if is_tf_available():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment