Unverified Commit 871c31a6 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

🔥Rework pipeline testing by removing `PipelineTestCaseMeta` 🚀 (#21516)



* Add PipelineTesterMixin

* remove class PipelineTestCaseMeta

* move validate_test_components

* Add for ViT

* Add to SPECIAL_MODULE_TO_TEST_MAP

* style and quality

* Add feature-extraction

* update

* raise instead of skip

* add tiny_model_summary.json

* more explicit

* skip tasks not in mapping

* add availability check

* Add Copyright

* A way to diable irrelevant tests

* update with main

* remove disable_irrelevant_tests

* skip tests

* better skip message

* better skip message

* Add all pipeline task tests

* revert

* Import PipelineTesterMixin

* subclass test classes with PipelineTesterMixin

* Add pipieline_model_mapping

* Fix import after adding pipieline_model_mapping

* Fix style and quality after adding pipieline_model_mapping

* Fix one more import after adding pipieline_model_mapping

* Fix style and quality after adding pipieline_model_mapping

* Fix test issues

* Fix import requirements

* Fix mapping for MobileViTModelTest

* Update

* Better skip message

* pipieline_model_mapping could not be None

* Remove some PipelineTesterMixin

* Fix typo

* revert tests_fetcher.py

* update

* rename

* revert

* Remove PipelineTestCaseMeta from ZeroShotAudioClassificationPipelineTests

* style and quality

* test fetcher for all pipeline/model tests

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 4cb5ffa9
...@@ -29,8 +29,6 @@ from transformers import ( ...@@ -29,8 +29,6 @@ from transformers import (
) )
from transformers.testing_utils import nested_simplify, require_tf, require_torch from transformers.testing_utils import nested_simplify, require_tf, require_torch
from .test_pipelines_common import PipelineTestCaseMeta
if is_torch_available(): if is_torch_available():
import torch import torch
...@@ -39,7 +37,7 @@ if is_tf_available(): ...@@ -39,7 +37,7 @@ if is_tf_available():
import tensorflow as tf import tensorflow as tf
class FeatureExtractionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class FeatureExtractionPipelineTests(unittest.TestCase):
model_mapping = MODEL_MAPPING model_mapping = MODEL_MAPPING
tf_model_mapping = TF_MODEL_MAPPING tf_model_mapping = TF_MODEL_MAPPING
......
...@@ -18,10 +18,10 @@ from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAP ...@@ -18,10 +18,10 @@ from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAP
from transformers.pipelines import PipelineException from transformers.pipelines import PipelineException
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_gpu, slow from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_gpu, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
class FillMaskPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class FillMaskPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_MASKED_LM_MAPPING model_mapping = MODEL_FOR_MASKED_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING
......
...@@ -30,7 +30,7 @@ from transformers.testing_utils import ( ...@@ -30,7 +30,7 @@ from transformers.testing_utils import (
slow, slow,
) )
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
if is_vision_available(): if is_vision_available():
...@@ -45,7 +45,7 @@ else: ...@@ -45,7 +45,7 @@ else:
@require_torch_or_tf @require_torch_or_tf
@require_vision @require_vision
class ImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class ImageClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
......
...@@ -36,7 +36,7 @@ from transformers import ( ...@@ -36,7 +36,7 @@ from transformers import (
) )
from transformers.testing_utils import nested_simplify, require_tf, require_timm, require_torch, require_vision, slow from transformers.testing_utils import nested_simplify, require_tf, require_timm, require_torch, require_vision, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
if is_vision_available(): if is_vision_available():
...@@ -70,7 +70,7 @@ def mask_to_test_readable_only_shape(mask: Image) -> Dict: ...@@ -70,7 +70,7 @@ def mask_to_test_readable_only_shape(mask: Image) -> Dict:
@require_vision @require_vision
@require_timm @require_timm
@require_torch @require_torch
class ImageSegmentationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class ImageSegmentationPipelineTests(unittest.TestCase):
model_mapping = { model_mapping = {
k: v k: v
for k, v in ( for k, v in (
......
...@@ -18,7 +18,7 @@ from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_S ...@@ -18,7 +18,7 @@ from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_S
from transformers.pipelines import pipeline from transformers.pipelines import pipeline
from transformers.testing_utils import require_tf, require_torch, require_vision, slow from transformers.testing_utils import require_tf, require_torch, require_vision, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
if is_vision_available(): if is_vision_available():
...@@ -32,7 +32,7 @@ else: ...@@ -32,7 +32,7 @@ else:
@require_vision @require_vision
class ImageToTextPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class ImageToTextPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING
tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING
......
...@@ -32,7 +32,7 @@ from transformers.testing_utils import ( ...@@ -32,7 +32,7 @@ from transformers.testing_utils import (
slow, slow,
) )
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
if is_vision_available(): if is_vision_available():
...@@ -48,7 +48,7 @@ else: ...@@ -48,7 +48,7 @@ else:
@require_vision @require_vision
@require_timm @require_timm
@require_torch @require_torch
class ObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class ObjectDetectionPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING
def get_test_pipeline(self, model, tokenizer, processor): def get_test_pipeline(self, model, tokenizer, processor):
......
...@@ -24,10 +24,10 @@ from transformers.data.processors.squad import SquadExample ...@@ -24,10 +24,10 @@ from transformers.data.processors.squad import SquadExample
from transformers.pipelines import QuestionAnsweringArgumentHandler, pipeline from transformers.pipelines import QuestionAnsweringArgumentHandler, pipeline
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_or_tf, slow from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_or_tf, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
class QAPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class QAPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
tf_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING tf_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
......
...@@ -24,13 +24,13 @@ from transformers import ( ...@@ -24,13 +24,13 @@ from transformers import (
from transformers.testing_utils import get_gpu_count, require_tf, require_torch, slow, torch_device from transformers.testing_utils import get_gpu_count, require_tf, require_torch, slow, torch_device
from transformers.tokenization_utils import TruncationStrategy from transformers.tokenization_utils import TruncationStrategy
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0 DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0
class SummarizationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class SummarizationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
......
...@@ -24,10 +24,8 @@ from transformers import ( ...@@ -24,10 +24,8 @@ from transformers import (
) )
from transformers.testing_utils import require_pandas, require_tensorflow_probability, require_tf, require_torch, slow from transformers.testing_utils import require_pandas, require_tensorflow_probability, require_tf, require_torch, slow
from .test_pipelines_common import PipelineTestCaseMeta
class TQAPipelineTests(unittest.TestCase):
class TQAPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
# Putting it there for consistency, but TQA do not have fast tokenizer # Putting it there for consistency, but TQA do not have fast tokenizer
# which are needed to generate automatic tests # which are needed to generate automatic tests
model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
......
...@@ -23,14 +23,14 @@ from transformers import ( ...@@ -23,14 +23,14 @@ from transformers import (
from transformers.testing_utils import require_tf, require_torch from transformers.testing_utils import require_tf, require_torch
from transformers.utils import is_torch_available from transformers.utils import is_torch_available
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
if is_torch_available(): if is_torch_available():
import torch import torch
class Text2TextGenerationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class Text2TextGenerationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
......
...@@ -22,10 +22,10 @@ from transformers import ( ...@@ -22,10 +22,10 @@ from transformers import (
) )
from transformers.testing_utils import nested_simplify, require_tf, require_torch, slow from transformers.testing_utils import nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
class TextClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class TextClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
......
...@@ -23,11 +23,11 @@ from transformers.testing_utils import ( ...@@ -23,11 +23,11 @@ from transformers.testing_utils import (
require_torch_or_tf, require_torch_or_tf,
) )
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
@require_torch_or_tf @require_torch_or_tf
class TextGenerationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class TextGenerationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING
......
...@@ -27,13 +27,13 @@ from transformers import ( ...@@ -27,13 +27,13 @@ from transformers import (
from transformers.pipelines import AggregationStrategy, TokenClassificationArgumentHandler from transformers.pipelines import AggregationStrategy, TokenClassificationArgumentHandler
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_gpu, slow from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_gpu, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
VALID_INPUTS = ["A simple string", ["list of strings", "A simple string that is quite a bit longer"]] VALID_INPUTS = ["A simple string", ["list of strings", "A simple string that is quite a bit longer"]]
class TokenClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class TokenClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
......
...@@ -27,10 +27,10 @@ from transformers import ( ...@@ -27,10 +27,10 @@ from transformers import (
) )
from transformers.testing_utils import require_tf, require_torch, slow from transformers.testing_utils import require_tf, require_torch, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
class TranslationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class TranslationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
......
...@@ -27,13 +27,13 @@ from transformers.testing_utils import ( ...@@ -27,13 +27,13 @@ from transformers.testing_utils import (
require_vision, require_vision,
) )
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
@require_torch_or_tf @require_torch_or_tf
@require_vision @require_vision
@require_decord @require_decord
class VideoClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class VideoClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def get_test_pipeline(self, model, tokenizer, processor): def get_test_pipeline(self, model, tokenizer, processor):
......
...@@ -18,7 +18,7 @@ from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_ ...@@ -18,7 +18,7 @@ from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_
from transformers.pipelines import pipeline from transformers.pipelines import pipeline
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
if is_vision_available(): if is_vision_available():
...@@ -33,7 +33,7 @@ else: ...@@ -33,7 +33,7 @@ else:
@require_torch @require_torch
@require_vision @require_vision
class VisualQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class VisualQuestionAnsweringPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def get_test_pipeline(self, model, tokenizer, processor): def get_test_pipeline(self, model, tokenizer, processor):
......
...@@ -23,10 +23,10 @@ from transformers import ( ...@@ -23,10 +23,10 @@ from transformers import (
) )
from transformers.testing_utils import nested_simplify, require_tf, require_torch, slow from transformers.testing_utils import nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
class ZeroShotClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class ZeroShotClassificationPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
......
...@@ -19,11 +19,9 @@ from datasets import load_dataset ...@@ -19,11 +19,9 @@ from datasets import load_dataset
from transformers.pipelines import pipeline from transformers.pipelines import pipeline
from transformers.testing_utils import nested_simplify, require_torch, slow from transformers.testing_utils import nested_simplify, require_torch, slow
from .test_pipelines_common import PipelineTestCaseMeta
@require_torch @require_torch
class ZeroShotAudioClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class ZeroShotAudioClassificationPipelineTests(unittest.TestCase):
# Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping,
# and only CLAP would be there for now. # and only CLAP would be there for now.
# model_mapping = {CLAPConfig: CLAPModel} # model_mapping = {CLAPConfig: CLAPModel}
......
...@@ -18,7 +18,7 @@ from transformers import is_vision_available ...@@ -18,7 +18,7 @@ from transformers import is_vision_available
from transformers.pipelines import pipeline from transformers.pipelines import pipeline
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
if is_vision_available(): if is_vision_available():
...@@ -32,7 +32,7 @@ else: ...@@ -32,7 +32,7 @@ else:
@require_vision @require_vision
class ZeroShotImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class ZeroShotImageClassificationPipelineTests(unittest.TestCase):
# Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping,
# and only CLIP would be there for now. # and only CLIP would be there for now.
# model_mapping = {CLIPConfig: CLIPModel} # model_mapping = {CLIPConfig: CLIPModel}
......
...@@ -17,7 +17,7 @@ import unittest ...@@ -17,7 +17,7 @@ import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta from .test_pipelines_common import ANY
if is_vision_available(): if is_vision_available():
...@@ -32,7 +32,7 @@ else: ...@@ -32,7 +32,7 @@ else:
@require_vision @require_vision
@require_torch @require_torch
class ZeroShotObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): class ZeroShotObjectDetectionPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def get_test_pipeline(self, model, tokenizer, processor): def get_test_pipeline(self, model, tokenizer, processor):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment