Unverified Commit 871c31a6 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

🔥Rework pipeline testing by removing `PipelineTestCaseMeta` 🚀 (#21516)



* Add PipelineTesterMixin

* remove class PipelineTestCaseMeta

* move validate_test_components

* Add for ViT

* Add to SPECIAL_MODULE_TO_TEST_MAP

* style and quality

* Add feature-extraction

* update

* raise instead of skip

* add tiny_model_summary.json

* more explicit

* skip tasks not in mapping

* add availability check

* Add Copyright

* A way to diable irrelevant tests

* update with main

* remove disable_irrelevant_tests

* skip tests

* better skip message

* better skip message

* Add all pipeline task tests

* revert

* Import PipelineTesterMixin

* subclass test classes with PipelineTesterMixin

* Add pipieline_model_mapping

* Fix import after adding pipieline_model_mapping

* Fix style and quality after adding pipieline_model_mapping

* Fix one more import after adding pipieline_model_mapping

* Fix style and quality after adding pipieline_model_mapping

* Fix test issues

* Fix import requirements

* Fix mapping for MobileViTModelTest

* Update

* Better skip message

* pipieline_model_mapping could not be None

* Remove some PipelineTesterMixin

* Fix typo

* revert tests_fetcher.py

* update

* rename

* revert

* Remove PipelineTestCaseMeta from ZeroShotAudioClassificationPipelineTests

* style and quality

* test fetcher for all pipeline/model tests

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 4cb5ffa9
...@@ -26,6 +26,7 @@ from transformers.testing_utils import require_tf, require_vision, slow ...@@ -26,6 +26,7 @@ from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available(): if is_tf_available():
...@@ -172,7 +173,7 @@ class TFData2VecVisionModelTester: ...@@ -172,7 +173,7 @@ class TFData2VecVisionModelTester:
@require_tf @require_tf
class TFData2VecVisionModelTest(TFModelTesterMixin, unittest.TestCase): class TFData2VecVisionModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
""" """
Here we also overwrite some of the tests of test_modeling_common.py, as Data2VecVision does not use input_ids, inputs_embeds, Here we also overwrite some of the tests of test_modeling_common.py, as Data2VecVision does not use input_ids, inputs_embeds,
attention_mask and seq_length. attention_mask and seq_length.
...@@ -183,6 +184,11 @@ class TFData2VecVisionModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -183,6 +184,11 @@ class TFData2VecVisionModelTest(TFModelTesterMixin, unittest.TestCase):
if is_tf_available() if is_tf_available()
else () else ()
) )
pipeline_model_mapping = (
{"feature-extraction": TFData2VecVisionModel, "image-classification": TFData2VecVisionForImageClassification}
if is_tf_available()
else {}
)
test_pruning = False test_pruning = False
test_onnx = False test_onnx = False
......
...@@ -19,6 +19,7 @@ from transformers.testing_utils import require_sentencepiece, require_tokenizers ...@@ -19,6 +19,7 @@ from transformers.testing_utils import require_sentencepiece, require_tokenizers
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -213,7 +214,7 @@ class DebertaModelTester(object): ...@@ -213,7 +214,7 @@ class DebertaModelTester(object):
@require_torch @require_torch
class DebertaModelTest(ModelTesterMixin, unittest.TestCase): class DebertaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
DebertaModel, DebertaModel,
...@@ -225,6 +226,18 @@ class DebertaModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -225,6 +226,18 @@ class DebertaModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available() if is_torch_available()
else () else ()
) )
pipeline_model_mapping = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True fx_compatible = True
test_torchscript = False test_torchscript = False
......
...@@ -21,6 +21,7 @@ from transformers.testing_utils import require_tf, slow ...@@ -21,6 +21,7 @@ from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available(): if is_tf_available():
...@@ -207,7 +208,7 @@ class TFDebertaModelTester: ...@@ -207,7 +208,7 @@ class TFDebertaModelTester:
@require_tf @require_tf
class TFDebertaModelTest(TFModelTesterMixin, unittest.TestCase): class TFDebertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
TFDebertaModel, TFDebertaModel,
...@@ -219,6 +220,18 @@ class TFDebertaModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -219,6 +220,18 @@ class TFDebertaModelTest(TFModelTesterMixin, unittest.TestCase):
if is_tf_available() if is_tf_available()
else () else ()
) )
pipeline_model_mapping = (
{
"feature-extraction": TFDebertaModel,
"fill-mask": TFDebertaForMaskedLM,
"question-answering": TFDebertaForQuestionAnswering,
"text-classification": TFDebertaForSequenceClassification,
"token-classification": TFDebertaForTokenClassification,
"zero-shot": TFDebertaForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False test_head_masking = False
test_onnx = False test_onnx = False
......
...@@ -19,6 +19,7 @@ from transformers.testing_utils import require_sentencepiece, require_tokenizers ...@@ -19,6 +19,7 @@ from transformers.testing_utils import require_sentencepiece, require_tokenizers
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -226,7 +227,7 @@ class DebertaV2ModelTester(object): ...@@ -226,7 +227,7 @@ class DebertaV2ModelTester(object):
@require_torch @require_torch
class DebertaV2ModelTest(ModelTesterMixin, unittest.TestCase): class DebertaV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
DebertaV2Model, DebertaV2Model,
...@@ -239,6 +240,18 @@ class DebertaV2ModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -239,6 +240,18 @@ class DebertaV2ModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available() if is_torch_available()
else () else ()
) )
pipeline_model_mapping = (
{
"feature-extraction": DebertaV2Model,
"fill-mask": DebertaV2ForMaskedLM,
"question-answering": DebertaV2ForQuestionAnswering,
"text-classification": DebertaV2ForSequenceClassification,
"token-classification": DebertaV2ForTokenClassification,
"zero-shot": DebertaV2ForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True fx_compatible = True
test_torchscript = False test_torchscript = False
......
...@@ -21,6 +21,7 @@ from transformers.testing_utils import require_tf, slow ...@@ -21,6 +21,7 @@ from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available(): if is_tf_available():
...@@ -209,7 +210,7 @@ class TFDebertaV2ModelTester: ...@@ -209,7 +210,7 @@ class TFDebertaV2ModelTester:
@require_tf @require_tf
class TFDebertaModelTest(TFModelTesterMixin, unittest.TestCase): class TFDebertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
TFDebertaV2Model, TFDebertaV2Model,
...@@ -221,6 +222,18 @@ class TFDebertaModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -221,6 +222,18 @@ class TFDebertaModelTest(TFModelTesterMixin, unittest.TestCase):
if is_tf_available() if is_tf_available()
else () else ()
) )
pipeline_model_mapping = (
{
"feature-extraction": TFDebertaV2Model,
"fill-mask": TFDebertaV2ForMaskedLM,
"question-answering": TFDebertaV2ForQuestionAnswering,
"text-classification": TFDebertaV2ForSequenceClassification,
"token-classification": TFDebertaV2ForTokenClassification,
"zero-shot": TFDebertaV2ForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False test_head_masking = False
test_onnx = False test_onnx = False
......
...@@ -24,6 +24,7 @@ from transformers.testing_utils import require_torch, slow, torch_device ...@@ -24,6 +24,7 @@ from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -131,9 +132,10 @@ class DecisionTransformerModelTester: ...@@ -131,9 +132,10 @@ class DecisionTransformerModelTester:
@require_torch @require_torch
class DecisionTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): class DecisionTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DecisionTransformerModel,) if is_torch_available() else () all_model_classes = (DecisionTransformerModel,) if is_torch_available() else ()
all_generative_model_classes = () all_generative_model_classes = ()
pipeline_model_mapping = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
test_generate_without_input_ids = False test_generate_without_input_ids = False
......
...@@ -27,6 +27,7 @@ from transformers.testing_utils import require_timm, require_torch_gpu, require_ ...@@ -27,6 +27,7 @@ from transformers.testing_utils import require_timm, require_torch_gpu, require_
from ...generation.test_utils import GenerationTesterMixin from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_timm_available(): if is_timm_available():
...@@ -185,8 +186,13 @@ class DeformableDetrModelTester: ...@@ -185,8 +186,13 @@ class DeformableDetrModelTester:
@require_timm @require_timm
class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DeformableDetrModel, DeformableDetrForObjectDetection) if is_timm_available() else () all_model_classes = (DeformableDetrModel, DeformableDetrForObjectDetection) if is_timm_available() else ()
pipeline_model_mapping = (
{"feature-extraction": DeformableDetrModel, "object-detection": DeformableDetrForObjectDetection}
if is_timm_available()
else {}
)
is_encoder_decoder = True is_encoder_decoder = True
test_torchscript = False test_torchscript = False
test_pruning = False test_pruning = False
......
...@@ -33,6 +33,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av ...@@ -33,6 +33,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -187,7 +188,7 @@ class DeiTModelTester: ...@@ -187,7 +188,7 @@ class DeiTModelTester:
@require_torch @require_torch
class DeiTModelTest(ModelTesterMixin, unittest.TestCase): class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
""" """
Here we also overwrite some of the tests of test_modeling_common.py, as DeiT does not use input_ids, inputs_embeds, Here we also overwrite some of the tests of test_modeling_common.py, as DeiT does not use input_ids, inputs_embeds,
attention_mask and seq_length. attention_mask and seq_length.
...@@ -203,6 +204,14 @@ class DeiTModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -203,6 +204,14 @@ class DeiTModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available() if is_torch_available()
else () else ()
) )
pipeline_model_mapping = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
test_pruning = False test_pruning = False
test_resize_embeddings = False test_resize_embeddings = False
......
...@@ -26,6 +26,7 @@ from transformers.utils import cached_property, is_tf_available, is_vision_avail ...@@ -26,6 +26,7 @@ from transformers.utils import cached_property, is_tf_available, is_vision_avail
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available(): if is_tf_available():
...@@ -162,7 +163,7 @@ class TFDeiTModelTester: ...@@ -162,7 +163,7 @@ class TFDeiTModelTester:
@require_tf @require_tf
class TFDeiTModelTest(TFModelTesterMixin, unittest.TestCase): class TFDeiTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
""" """
Here we also overwrite some of the tests of test_modeling_tf_common.py, as DeiT does not use input_ids, inputs_embeds, Here we also overwrite some of the tests of test_modeling_tf_common.py, as DeiT does not use input_ids, inputs_embeds,
attention_mask and seq_length. attention_mask and seq_length.
...@@ -178,6 +179,14 @@ class TFDeiTModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -178,6 +179,14 @@ class TFDeiTModelTest(TFModelTesterMixin, unittest.TestCase):
if is_tf_available() if is_tf_available()
else () else ()
) )
pipeline_model_mapping = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
test_pruning = False test_pruning = False
test_resize_embeddings = False test_resize_embeddings = False
......
...@@ -26,6 +26,7 @@ from transformers.testing_utils import require_torchvision, require_vision, slow ...@@ -26,6 +26,7 @@ from transformers.testing_utils import require_torchvision, require_vision, slow
from ...generation.test_utils import GenerationTesterMixin from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -169,8 +170,13 @@ class DetaModelTester: ...@@ -169,8 +170,13 @@ class DetaModelTester:
@require_torchvision @require_torchvision
class DetaModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): class DetaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DetaModel, DetaForObjectDetection) if is_torchvision_available() else () all_model_classes = (DetaModel, DetaForObjectDetection) if is_torchvision_available() else ()
pipeline_model_mapping = (
{"feature-extraction": DetaModel, "object-detection": DetaForObjectDetection}
if is_torchvision_available()
else {}
)
is_encoder_decoder = True is_encoder_decoder = True
test_torchscript = False test_torchscript = False
test_pruning = False test_pruning = False
......
...@@ -26,6 +26,7 @@ from transformers.utils import cached_property ...@@ -26,6 +26,7 @@ from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_timm_available(): if is_timm_available():
...@@ -174,7 +175,7 @@ class DetrModelTester: ...@@ -174,7 +175,7 @@ class DetrModelTester:
@require_timm @require_timm
class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
DetrModel, DetrModel,
...@@ -184,6 +185,15 @@ class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): ...@@ -184,6 +185,15 @@ class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
if is_timm_available() if is_timm_available()
else () else ()
) )
pipeline_model_mapping = (
{
"feature-extraction": DetrModel,
"image-segmentation": DetrForSegmentation,
"object-detection": DetrForObjectDetection,
}
if is_timm_available()
else {}
)
is_encoder_decoder = True is_encoder_decoder = True
test_torchscript = False test_torchscript = False
test_pruning = False test_pruning = False
......
...@@ -24,6 +24,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av ...@@ -24,6 +24,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -192,7 +193,7 @@ class DinatModelTester: ...@@ -192,7 +193,7 @@ class DinatModelTester:
@require_natten @require_natten
@require_torch @require_torch
class DinatModelTest(ModelTesterMixin, unittest.TestCase): class DinatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
DinatModel, DinatModel,
...@@ -202,6 +203,11 @@ class DinatModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -202,6 +203,11 @@ class DinatModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available() if is_torch_available()
else () else ()
) )
pipeline_model_mapping = (
{"feature-extraction": DinatModel, "image-classification": DinatForImageClassification}
if is_torch_available()
else {}
)
fx_compatible = False fx_compatible = False
test_torchscript = False test_torchscript = False
......
...@@ -21,6 +21,7 @@ from transformers.testing_utils import require_torch, require_torch_gpu, slow, t ...@@ -21,6 +21,7 @@ from transformers.testing_utils import require_torch, require_torch_gpu, slow, t
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -195,7 +196,7 @@ class DistilBertModelTester(object): ...@@ -195,7 +196,7 @@ class DistilBertModelTester(object):
@require_torch @require_torch
class DistilBertModelTest(ModelTesterMixin, unittest.TestCase): class DistilBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
DistilBertModel, DistilBertModel,
...@@ -208,6 +209,18 @@ class DistilBertModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -208,6 +209,18 @@ class DistilBertModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available() if is_torch_available()
else None else None
) )
pipeline_model_mapping = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True fx_compatible = True
test_pruning = True test_pruning = True
test_resize_embeddings = True test_resize_embeddings = True
......
...@@ -21,6 +21,7 @@ from transformers.testing_utils import require_tf, slow ...@@ -21,6 +21,7 @@ from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available(): if is_tf_available():
...@@ -169,7 +170,7 @@ class TFDistilBertModelTester: ...@@ -169,7 +170,7 @@ class TFDistilBertModelTester:
@require_tf @require_tf
class TFDistilBertModelTest(TFModelTesterMixin, unittest.TestCase): class TFDistilBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
TFDistilBertModel, TFDistilBertModel,
...@@ -182,6 +183,18 @@ class TFDistilBertModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -182,6 +183,18 @@ class TFDistilBertModelTest(TFModelTesterMixin, unittest.TestCase):
if is_tf_available() if is_tf_available()
else None else None
) )
pipeline_model_mapping = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False test_head_masking = False
test_onnx = False test_onnx = False
......
...@@ -24,6 +24,7 @@ from transformers.utils import is_torch_available ...@@ -24,6 +24,7 @@ from transformers.utils import is_torch_available
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -143,8 +144,9 @@ class DonutSwinModelTester: ...@@ -143,8 +144,9 @@ class DonutSwinModelTester:
@require_torch @require_torch
class DonutSwinModelTest(ModelTesterMixin, unittest.TestCase): class DonutSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (DonutSwinModel,) if is_torch_available() else () all_model_classes = (DonutSwinModel,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": DonutSwinModel} if is_torch_available() else {}
fx_compatible = True fx_compatible = True
test_pruning = False test_pruning = False
......
...@@ -22,6 +22,7 @@ from transformers.testing_utils import require_torch, slow, torch_device ...@@ -22,6 +22,7 @@ from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -178,7 +179,7 @@ class DPRModelTester: ...@@ -178,7 +179,7 @@ class DPRModelTester:
@require_torch @require_torch
class DPRModelTest(ModelTesterMixin, unittest.TestCase): class DPRModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
DPRContextEncoder, DPRContextEncoder,
...@@ -188,6 +189,7 @@ class DPRModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -188,6 +189,7 @@ class DPRModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available() if is_torch_available()
else () else ()
) )
pipeline_model_mapping = {"feature-extraction": DPRQuestionEncoder} if is_torch_available() else {}
test_resize_embeddings = False test_resize_embeddings = False
test_missing_keys = False # why? test_missing_keys = False # why?
......
...@@ -20,6 +20,7 @@ from transformers.testing_utils import require_tf, slow ...@@ -20,6 +20,7 @@ from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available(): if is_tf_available():
...@@ -171,7 +172,7 @@ class TFDPRModelTester: ...@@ -171,7 +172,7 @@ class TFDPRModelTester:
@require_tf @require_tf
class TFDPRModelTest(TFModelTesterMixin, unittest.TestCase): class TFDPRModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ( all_model_classes = (
( (
TFDPRContextEncoder, TFDPRContextEncoder,
...@@ -181,6 +182,7 @@ class TFDPRModelTest(TFModelTesterMixin, unittest.TestCase): ...@@ -181,6 +182,7 @@ class TFDPRModelTest(TFModelTesterMixin, unittest.TestCase):
if is_tf_available() if is_tf_available()
else () else ()
) )
pipeline_model_mapping = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
test_resize_embeddings = False test_resize_embeddings = False
test_missing_keys = False test_missing_keys = False
......
...@@ -25,6 +25,7 @@ from transformers.testing_utils import require_torch, require_vision, slow, torc ...@@ -25,6 +25,7 @@ from transformers.testing_utils import require_torch, require_vision, slow, torc
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -149,13 +150,22 @@ class DPTModelTester: ...@@ -149,13 +150,22 @@ class DPTModelTester:
@require_torch @require_torch
class DPTModelTest(ModelTesterMixin, unittest.TestCase): class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
""" """
Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds, Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds,
attention_mask and seq_length. attention_mask and seq_length.
""" """
all_model_classes = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () all_model_classes = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
pipeline_model_mapping = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
test_pruning = False test_pruning = False
test_resize_embeddings = False test_resize_embeddings = False
......
...@@ -25,6 +25,7 @@ from transformers.testing_utils import require_torch, require_vision, slow, torc ...@@ -25,6 +25,7 @@ from transformers.testing_utils import require_torch, require_vision, slow, torc
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -163,13 +164,22 @@ class DPTModelTester: ...@@ -163,13 +164,22 @@ class DPTModelTester:
@require_torch @require_torch
class DPTModelTest(ModelTesterMixin, unittest.TestCase): class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
""" """
Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds, Here we also overwrite some of the tests of test_modeling_common.py, as DPT does not use input_ids, inputs_embeds,
attention_mask and seq_length. attention_mask and seq_length.
""" """
all_model_classes = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () all_model_classes = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
pipeline_model_mapping = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
test_pruning = False test_pruning = False
test_resize_embeddings = False test_resize_embeddings = False
......
...@@ -26,6 +26,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av ...@@ -26,6 +26,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available(): if is_torch_available():
...@@ -157,7 +158,7 @@ class EfficientFormerModelTester: ...@@ -157,7 +158,7 @@ class EfficientFormerModelTester:
@require_torch @require_torch
class EfficientFormerModelTest(ModelTesterMixin, unittest.TestCase): class EfficientFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
""" """
Here we also overwrite some of the tests of test_modeling_common.py, as EfficientFormer does not use input_ids, inputs_embeds, Here we also overwrite some of the tests of test_modeling_common.py, as EfficientFormer does not use input_ids, inputs_embeds,
attention_mask and seq_length. attention_mask and seq_length.
...@@ -172,6 +173,17 @@ class EfficientFormerModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -172,6 +173,17 @@ class EfficientFormerModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available() if is_torch_available()
else () else ()
) )
pipeline_model_mapping = (
{
"feature-extraction": EfficientFormerModel,
"image-classification": (
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
),
}
if is_torch_available()
else {}
)
fx_compatible = False fx_compatible = False
test_pruning = False test_pruning = False
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment