"vscode:/vscode.git/clone" did not exist on "19e4ebbe3fcded8a345fed05d9c3644b78312839"
Commit 345c23a6 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Replace (TF)CommonTestCases for modeling with a mixin.

I suspect the wrapper classes were created in order to prevent the
abstract base class (TF)CommonModelTester from being included in test
discovery and running, because that would fail.

I solved this by replacing the abstract base class with a mixin.

Code changes are just de-indenting and automatic reformattings
performed by black to use the extra line space.
parent 7e98e211
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import XxxConfig, is_tf_available
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFCommonTestCases, ids_tensor
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_tf, slow
......@@ -32,7 +34,7 @@ if is_tf_available():
@require_tf
class TFXxxModelTest(TFCommonTestCases.TFCommonModelTester):
class TFXxxModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import CommonTestCases, ids_tensor
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
......@@ -34,7 +36,7 @@ if is_torch_available():
@require_torch
class XxxModelTest(CommonTestCases.CommonModelTester):
class XxxModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(XxxModel, XxxForMaskedLM, XxxForQuestionAnswering, XxxForSequenceClassification, XxxForTokenClassification)
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import CommonTestCases, ids_tensor
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
......@@ -33,7 +35,7 @@ if is_torch_available():
@require_torch
class AlbertModelTest(CommonTestCases.CommonModelTester):
class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (AlbertModel, AlbertForMaskedLM) if is_torch_available() else ()
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import CommonTestCases, floats_tensor, ids_tensor
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
......@@ -37,7 +39,7 @@ if is_torch_available():
@require_torch
class BertModelTest(CommonTestCases.CommonModelTester):
class BertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
......
This diff is collapsed.
......@@ -13,10 +13,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import CommonTestCases, ids_tensor
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
......@@ -25,7 +27,7 @@ if is_torch_available():
@require_torch
class CTRLModelTest(CommonTestCases.CommonModelTester):
class CTRLModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (CTRLModel, CTRLLMHeadModel) if is_torch_available() else ()
test_pruning = False
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import CommonTestCases, ids_tensor
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import require_torch, torch_device
......@@ -33,7 +35,7 @@ if is_torch_available():
@require_torch
class DistilBertModelTest(CommonTestCases.CommonModelTester):
class DistilBertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(DistilBertModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DistilBertForSequenceClassification)
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import CommonTestCases, ids_tensor
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
......@@ -32,7 +34,7 @@ if is_torch_available():
@require_torch
class GPT2ModelTest(CommonTestCases.CommonModelTester):
class GPT2ModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel) if is_torch_available() else ()
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import CommonTestCases, ids_tensor
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
......@@ -32,7 +34,7 @@ if is_torch_available():
@require_torch
class OpenAIGPTModelTest(CommonTestCases.CommonModelTester):
class OpenAIGPTModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel) if is_torch_available() else ()
......
......@@ -19,7 +19,7 @@ import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import CommonTestCases, ids_tensor
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
......@@ -37,7 +37,7 @@ if is_torch_available():
@require_torch
class RobertaModelTest(CommonTestCases.CommonModelTester):
class RobertaModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (RobertaForMaskedLM, RobertaModel) if is_torch_available() else ()
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import is_torch_available
from .test_configuration_common import ConfigTester
from .test_modeling_common import CommonTestCases, ids_tensor
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_torch, slow
......@@ -27,7 +29,7 @@ if is_torch_available():
@require_torch
class T5ModelTest(CommonTestCases.CommonModelTester):
class T5ModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (T5Model, T5WithLMHeadModel) if is_torch_available() else ()
test_pruning = False
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import AlbertConfig, is_tf_available
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFCommonTestCases, ids_tensor
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_tf, slow
......@@ -31,7 +33,7 @@ if is_tf_available():
@require_tf
class TFAlbertModelTest(TFCommonTestCases.TFCommonModelTester):
class TFAlbertModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFAlbertModel, TFAlbertForMaskedLM, TFAlbertForSequenceClassification) if is_tf_available() else ()
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import BertConfig, is_tf_available
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFCommonTestCases, ids_tensor
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_tf, slow
......@@ -36,7 +38,7 @@ if is_tf_available():
@require_tf
class TFBertModelTest(TFCommonTestCases.TFCommonModelTester):
class TFBertModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
......
This diff is collapsed.
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import CTRLConfig, is_tf_available
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFCommonTestCases, ids_tensor
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_tf, slow
......@@ -26,7 +28,7 @@ if is_tf_available():
@require_tf
class TFCTRLModelTest(TFCommonTestCases.TFCommonModelTester):
class TFCTRLModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFCTRLModel, TFCTRLLMHeadModel) if is_tf_available() else ()
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import DistilBertConfig, is_tf_available
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFCommonTestCases, ids_tensor
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from .utils import require_tf
......@@ -31,7 +33,7 @@ if is_tf_available():
@require_tf
class TFDistilBertModelTest(TFCommonTestCases.TFCommonModelTester):
class TFDistilBertModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import GPT2Config, is_tf_available
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFCommonTestCases, ids_tensor
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_tf, slow
......@@ -32,7 +34,7 @@ if is_tf_available():
@require_tf
class TFGPT2ModelTest(TFCommonTestCases.TFCommonModelTester):
class TFGPT2ModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel) if is_tf_available() else ()
# all_model_classes = (TFGPT2Model, TFGPT2LMHeadModel) if is_tf_available() else ()
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import OpenAIGPTConfig, is_tf_available
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFCommonTestCases, ids_tensor
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_tf, slow
......@@ -32,7 +34,7 @@ if is_tf_available():
@require_tf
class TFOpenAIGPTModelTest(TFCommonTestCases.TFCommonModelTester):
class TFOpenAIGPTModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel) if is_tf_available() else ()
......
......@@ -19,7 +19,7 @@ import unittest
from transformers import RobertaConfig, is_tf_available
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFCommonTestCases, ids_tensor
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_tf, slow
......@@ -36,7 +36,7 @@ if is_tf_available():
@require_tf
class TFRobertaModelTest(TFCommonTestCases.TFCommonModelTester):
class TFRobertaModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(TFRobertaModel, TFRobertaForMaskedLM, TFRobertaForSequenceClassification) if is_tf_available() else ()
......
......@@ -14,10 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function
import unittest
from transformers import T5Config, is_tf_available
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFCommonTestCases, ids_tensor
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from .utils import CACHE_DIR, require_tf, slow
......@@ -26,7 +28,7 @@ if is_tf_available():
@require_tf
class TFT5ModelTest(TFCommonTestCases.TFCommonModelTester):
class TFT5ModelTest(TFModelTesterMixin, unittest.TestCase):
is_encoder_decoder = True
all_model_classes = (TFT5Model, TFT5WithLMHeadModel) if is_tf_available() else ()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment