Unverified Commit 871c31a6 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

🔥Rework pipeline testing by removing `PipelineTestCaseMeta` 🚀 (#21516)



* Add PipelineTesterMixin

* remove class PipelineTestCaseMeta

* move validate_test_components

* Add for ViT

* Add to SPECIAL_MODULE_TO_TEST_MAP

* style and quality

* Add feature-extraction

* update

* raise instead of skip

* add tiny_model_summary.json

* more explicit

* skip tasks not in mapping

* add availability check

* Add Copyright

* A way to diable irrelevant tests

* update with main

* remove disable_irrelevant_tests

* skip tests

* better skip message

* better skip message

* Add all pipeline task tests

* revert

* Import PipelineTesterMixin

* subclass test classes with PipelineTesterMixin

* Add pipieline_model_mapping

* Fix import after adding pipieline_model_mapping

* Fix style and quality after adding pipieline_model_mapping

* Fix one more import after adding pipieline_model_mapping

* Fix style and quality after adding pipieline_model_mapping

* Fix test issues

* Fix import requirements

* Fix mapping for MobileViTModelTest

* Update

* Better skip message

* pipieline_model_mapping could not be None

* Remove some PipelineTesterMixin

* Fix typo

* revert tests_fetcher.py

* update

* rename

* revert

* Remove PipelineTestCaseMeta from ZeroShotAudioClassificationPipelineTests

* style and quality

* test fetcher for all pipeline/model tests

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 4cb5ffa9
......@@ -21,6 +21,7 @@ from transformers.testing_utils import require_sentencepiece, require_tf, requir
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
......@@ -270,7 +271,7 @@ class TFLongformerModelTester:
@require_tf
class TFLongformerModelTest(TFModelTesterMixin, unittest.TestCase):
class TFLongformerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFLongformerModel,
......@@ -283,6 +284,18 @@ class TFLongformerModelTest(TFModelTesterMixin, unittest.TestCase):
if is_tf_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": TFLongformerModel,
"fill-mask": TFLongformerForMaskedLM,
"question-answering": TFLongformerForQuestionAnswering,
"text-classification": TFLongformerForSequenceClassification,
"token-classification": TFLongformerForTokenClassification,
"zero-shot": TFLongformerForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False
test_onnx = False
......
......@@ -26,6 +26,7 @@ from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -500,9 +501,19 @@ class LongT5ModelTester:
@require_torch
class LongT5ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class LongT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (LongT5Model, LongT5ForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (LongT5ForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": LongT5ForConditionalGeneration,
"feature-extraction": LongT5Model,
"summarization": LongT5ForConditionalGeneration,
"text2text-generation": LongT5ForConditionalGeneration,
}
if is_torch_available()
else {}
)
fx_compatible = False
test_pruning = False
test_torchscript = True
......
......@@ -20,6 +20,7 @@ from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -585,7 +586,7 @@ class LukeModelTester:
@require_torch
class LukeModelTest(ModelTesterMixin, unittest.TestCase):
class LukeModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
LukeModel,
......@@ -601,6 +602,18 @@ class LukeModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": LukeModel,
"fill-mask": LukeForMaskedLM,
"question-answering": LukeForQuestionAnswering,
"text-classification": LukeForSequenceClassification,
"token-classification": LukeForTokenClassification,
"zero-shot": LukeForSequenceClassification,
}
if is_torch_available()
else {}
)
test_pruning = False
test_torchscript = False
test_resize_embeddings = True
......
......@@ -25,6 +25,7 @@ from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -529,8 +530,13 @@ class LxmertModelTester:
@require_torch
class LxmertModelTest(ModelTesterMixin, unittest.TestCase):
class LxmertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (LxmertModel, LxmertForPreTraining, LxmertForQuestionAnswering) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": LxmertModel, "question-answering": LxmertForQuestionAnswering}
if is_torch_available()
else {}
)
fx_compatible = True
test_head_masking = False
......
......@@ -24,6 +24,7 @@ from transformers.testing_utils import require_tf, slow, tooslow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
......@@ -363,8 +364,9 @@ class TFLxmertModelTester(object):
@require_tf
class TFLxmertModelTest(TFModelTesterMixin, unittest.TestCase):
class TFLxmertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (TFLxmertModel, TFLxmertForPreTraining) if is_tf_available() else ()
pipeline_model_mapping = {"feature-extraction": TFLxmertModel} if is_tf_available() else {}
test_head_masking = False
test_onnx = False
......
......@@ -26,6 +26,7 @@ from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -220,7 +221,7 @@ class M2M100ModelTester:
@require_torch
class M2M100ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class M2M100ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
M2M100Model,
......@@ -230,6 +231,16 @@ class M2M100ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase
else ()
)
all_generative_model_classes = (M2M100ForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": M2M100ForConditionalGeneration,
"feature-extraction": M2M100Model,
"summarization": M2M100ForConditionalGeneration,
"text2text-generation": M2M100ForConditionalGeneration,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = True
test_pruning = False
......
......@@ -26,6 +26,7 @@ from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -235,9 +236,20 @@ class MarianModelTester:
@require_torch
class MarianModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class MarianModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (MarianModel, MarianMTModel) if is_torch_available() else ()
all_generative_model_classes = (MarianMTModel,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": MarianMTModel,
"feature-extraction": MarianModel,
"summarization": MarianMTModel,
"text2text-generation": MarianMTModel,
"text-generation": MarianForCausalLM,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = True
test_pruning = False
......
......@@ -24,6 +24,7 @@ from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
......@@ -177,9 +178,19 @@ def prepare_marian_inputs_dict(
@require_tf
class TFMarianModelTest(TFModelTesterMixin, unittest.TestCase):
class TFMarianModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (TFMarianMTModel, TFMarianModel) if is_tf_available() else ()
all_generative_model_classes = (TFMarianMTModel,) if is_tf_available() else ()
pipeline_model_mapping = (
{
"conversational": TFMarianMTModel,
"feature-extraction": TFMarianModel,
"summarization": TFMarianMTModel,
"text2text-generation": TFMarianMTModel,
}
if is_tf_available()
else {}
)
is_encoder_decoder = True
test_pruning = False
test_onnx = False
......
......@@ -22,6 +22,7 @@ from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -275,7 +276,7 @@ class MarkupLMModelTester:
@require_torch
class MarkupLMModelTest(ModelTesterMixin, unittest.TestCase):
class MarkupLMModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
MarkupLMModel,
......@@ -286,6 +287,17 @@ class MarkupLMModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available()
else None
)
pipeline_model_mapping = (
{
"feature-extraction": MarkupLMModel,
"question-answering": MarkupLMForQuestionAnswering,
"text-classification": MarkupLMForSequenceClassification,
"token-classification": MarkupLMForTokenClassification,
"zero-shot": MarkupLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = MarkupLMModelTester(self)
......
......@@ -26,6 +26,7 @@ from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -172,8 +173,9 @@ class Mask2FormerModelTester:
@require_torch
class Mask2FormerModelTest(ModelTesterMixin, unittest.TestCase):
class Mask2FormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (Mask2FormerModel, Mask2FormerForUniversalSegmentation) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": Mask2FormerModel} if is_torch_available() else {}
is_encoder_decoder = False
test_pruning = False
......
......@@ -26,6 +26,7 @@ from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -172,8 +173,13 @@ class MaskFormerModelTester:
@require_torch
class MaskFormerModelTest(ModelTesterMixin, unittest.TestCase):
class MaskFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
is_encoder_decoder = False
test_pruning = False
......
......@@ -25,6 +25,7 @@ from transformers.utils import is_torch_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -162,7 +163,7 @@ class MaskFormerSwinModelTester:
@require_torch
class MaskFormerSwinModelTest(ModelTesterMixin, unittest.TestCase):
class MaskFormerSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
MaskFormerSwinModel,
......@@ -171,6 +172,7 @@ class MaskFormerSwinModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available()
else ()
)
pipeline_model_mapping = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
fx_compatible = False
test_torchscript = False
test_pruning = False
......
......@@ -26,6 +26,7 @@ from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -224,13 +225,28 @@ class MBartModelTester:
@require_torch
class MBartModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class MBartModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(MBartModel, MBartForConditionalGeneration, MBartForSequenceClassification, MBartForQuestionAnswering)
if is_torch_available()
else ()
)
all_generative_model_classes = (MBartForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": MBartForConditionalGeneration,
"feature-extraction": MBartModel,
"fill-mask": MBartForConditionalGeneration,
"question-answering": MBartForQuestionAnswering,
"summarization": MBartForConditionalGeneration,
"text2text-generation": MBartForConditionalGeneration,
"text-classification": MBartForSequenceClassification,
"text-generation": MBartForCausalLM,
"zero-shot": MBartForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = False # Fix me Michael
test_pruning = False
......
......@@ -22,6 +22,7 @@ from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
......@@ -180,9 +181,19 @@ def prepare_mbart_inputs_dict(
@require_tf
class TFMBartModelTest(TFModelTesterMixin, unittest.TestCase):
class TFMBartModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
all_generative_model_classes = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
pipeline_model_mapping = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
is_encoder_decoder = True
test_pruning = False
test_onnx = False
......
......@@ -25,6 +25,7 @@ from transformers.testing_utils import require_soundfile, require_torch, slow, t
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -265,8 +266,11 @@ class MCTCTModelTester:
@require_torch
@unittest.skipIf(is_torch_less_than_1_9, "MCTCT is only available in torch v1.9+")
class MCTCTModelTest(ModelTesterMixin, unittest.TestCase):
class MCTCTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (MCTCTForCTC, MCTCTModel) if is_torch_available() else ()
pipeline_model_mapping = (
{"automatic-speech-recognition": MCTCTForCTC, "feature-extraction": MCTCTModel} if is_torch_available() else {}
)
test_pruning = False
test_headmasking = False
test_torchscript = False
......
......@@ -25,6 +25,7 @@ from transformers.testing_utils import require_sentencepiece, require_tokenizers
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -266,7 +267,7 @@ class MegatronBertModelTester:
@require_torch
class MegatronBertModelTest(ModelTesterMixin, unittest.TestCase):
class MegatronBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
MegatronBertModel,
......@@ -282,6 +283,19 @@ class MegatronBertModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True
# test_resize_embeddings = False
test_head_masking = False
......
......@@ -22,6 +22,7 @@ from transformers.testing_utils import require_sentencepiece, require_tokenizers
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -253,7 +254,7 @@ class MobileBertModelTester:
@require_torch
class MobileBertModelTest(ModelTesterMixin, unittest.TestCase):
class MobileBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
MobileBertModel,
......@@ -268,6 +269,18 @@ class MobileBertModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True
# special case for ForPreTraining model
......
......@@ -22,6 +22,7 @@ from transformers.testing_utils import require_tf, slow, tooslow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
......@@ -41,7 +42,7 @@ if is_tf_available():
@require_tf
class TFMobileBertModelTest(TFModelTesterMixin, unittest.TestCase):
class TFMobileBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFMobileBertModel,
......@@ -56,6 +57,18 @@ class TFMobileBertModelTest(TFModelTesterMixin, unittest.TestCase):
if is_tf_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False
test_onnx = False
......
......@@ -24,6 +24,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -139,13 +140,18 @@ class MobileNetV1ModelTester:
@require_torch
class MobileNetV1ModelTest(ModelTesterMixin, unittest.TestCase):
class MobileNetV1ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as MobileNetV1 does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (MobileNetV1Model, MobileNetV1ForImageClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": MobileNetV1Model, "image-classification": MobileNetV1ForImageClassification}
if is_torch_available()
else {}
)
test_pruning = False
test_resize_embeddings = False
......
......@@ -24,6 +24,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -182,7 +183,7 @@ class MobileNetV2ModelTester:
@require_torch
class MobileNetV2ModelTest(ModelTesterMixin, unittest.TestCase):
class MobileNetV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as MobileNetV2 does not use input_ids, inputs_embeds,
attention_mask and seq_length.
......@@ -193,6 +194,15 @@ class MobileNetV2ModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": MobileNetV2Model,
"image-classification": MobileNetV2ForImageClassification,
"image-segmentation": MobileNetV2ForSemanticSegmentation,
}
if is_torch_available()
else {}
)
test_pruning = False
test_resize_embeddings = False
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment