Unverified Commit 871c31a6 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

🔥Rework pipeline testing by removing `PipelineTestCaseMeta` 🚀 (#21516)



* Add PipelineTesterMixin

* remove class PipelineTestCaseMeta

* move validate_test_components

* Add for ViT

* Add to SPECIAL_MODULE_TO_TEST_MAP

* style and quality

* Add feature-extraction

* update

* raise instead of skip

* add tiny_model_summary.json

* more explicit

* skip tasks not in mapping

* add availability check

* Add Copyright

* A way to diable irrelevant tests

* update with main

* remove disable_irrelevant_tests

* skip tests

* better skip message

* better skip message

* Add all pipeline task tests

* revert

* Import PipelineTesterMixin

* subclass test classes with PipelineTesterMixin

* Add pipieline_model_mapping

* Fix import after adding pipieline_model_mapping

* Fix style and quality after adding pipieline_model_mapping

* Fix one more import after adding pipieline_model_mapping

* Fix style and quality after adding pipieline_model_mapping

* Fix test issues

* Fix import requirements

* Fix mapping for MobileViTModelTest

* Update

* Better skip message

* pipieline_model_mapping could not be None

* Remove some PipelineTesterMixin

* Fix typo

* revert tests_fetcher.py

* update

* rename

* revert

* Remove PipelineTestCaseMeta from ZeroShotAudioClassificationPipelineTests

* style and quality

* test fetcher for all pipeline/model tests

---------
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent 4cb5ffa9
......@@ -22,6 +22,7 @@ from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -239,7 +240,7 @@ class AlbertModelTester:
@require_torch
class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
class AlbertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
AlbertModel,
......@@ -253,6 +254,18 @@ class AlbertModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True
# special case for ForPreTraining model
......
......@@ -22,6 +22,7 @@ from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
......@@ -227,7 +228,7 @@ class TFAlbertModelTester:
@require_tf
class TFAlbertModelTest(TFModelTesterMixin, unittest.TestCase):
class TFAlbertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFAlbertModel,
......@@ -241,6 +242,18 @@ class TFAlbertModelTest(TFModelTesterMixin, unittest.TestCase):
if is_tf_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": TFAlbertModel,
"fill-mask": TFAlbertForMaskedLM,
"question-answering": TFAlbertForQuestionAnswering,
"text-classification": TFAlbertForSequenceClassification,
"token-classification": TFAlbertForTokenClassification,
"zero-shot": TFAlbertForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False
test_onnx = False
......
......@@ -35,6 +35,7 @@ from ...test_modeling_common import (
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -392,8 +393,9 @@ def prepare_img():
@require_torch
class AltCLIPModelTest(ModelTesterMixin, unittest.TestCase):
class AltCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (AltCLIPModel,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": AltCLIPModel} if is_torch_available() else {}
fx_compatible = True
test_head_masking = False
test_pruning = False
......
......@@ -25,6 +25,7 @@ from transformers.utils import cached_property, is_torch_available, is_torchaudi
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -140,7 +141,7 @@ class ASTModelTester:
@require_torch
class ASTModelTest(ModelTesterMixin, unittest.TestCase):
class ASTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as AST does not use input_ids, inputs_embeds,
attention_mask and seq_length.
......@@ -154,6 +155,11 @@ class ASTModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
......
......@@ -28,6 +28,7 @@ from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -414,13 +415,28 @@ class BartHeadTests(unittest.TestCase):
@require_torch
class BartModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class BartModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(BartModel, BartForConditionalGeneration, BartForSequenceClassification, BartForQuestionAnswering)
if is_torch_available()
else ()
)
all_generative_model_classes = (BartForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": BartForConditionalGeneration,
"feature-extraction": BartModel,
"fill-mask": BartForConditionalGeneration,
"question-answering": BartForQuestionAnswering,
"summarization": BartForConditionalGeneration,
"text2text-generation": BartForConditionalGeneration,
"text-classification": BartForSequenceClassification,
"text-generation": BartForCausalLM,
"zero-shot": BartForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = False # Fix me Michael
test_pruning = False
......
......@@ -25,6 +25,7 @@ from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin
......@@ -188,11 +189,23 @@ def prepare_bart_inputs_dict(
@require_tf
class TFBartModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, unittest.TestCase):
class TFBartModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBartModel) if is_tf_available() else ()
)
all_generative_model_classes = (TFBartForConditionalGeneration,) if is_tf_available() else ()
pipeline_model_mapping = (
{
"conversational": TFBartForConditionalGeneration,
"feature-extraction": TFBartModel,
"summarization": TFBartForConditionalGeneration,
"text2text-generation": TFBartForConditionalGeneration,
"text-classification": TFBartForSequenceClassification,
"zero-shot": TFBartForSequenceClassification,
}
if is_tf_available()
else {}
)
is_encoder_decoder = True
test_pruning = False
test_onnx = True
......
......@@ -28,6 +28,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -185,7 +186,7 @@ class BeitModelTester:
@require_torch
class BeitModelTest(ModelTesterMixin, unittest.TestCase):
class BeitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as BEiT does not use input_ids, inputs_embeds,
attention_mask and seq_length.
......@@ -196,6 +197,15 @@ class BeitModelTest(ModelTesterMixin, unittest.TestCase):
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
test_pruning = False
test_resize_embeddings = False
......
......@@ -23,6 +23,7 @@ from transformers.testing_utils import require_torch, require_torch_gpu, slow, t
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -426,7 +427,7 @@ class BertModelTester:
@require_torch
class BertModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class BertModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
BertModel,
......@@ -443,6 +444,19 @@ class BertModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
else ()
)
all_generative_model_classes = (BertLMHeadModel,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": BertModel,
"fill-mask": BertForMaskedLM,
"question-answering": BertForQuestionAnswering,
"text-classification": BertForSequenceClassification,
"text-generation": BertLMHeadModel,
"token-classification": BertForTokenClassification,
"zero-shot": BertForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True
# special case for ForPreTraining model
......
......@@ -22,6 +22,7 @@ from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
from ...utils.test_modeling_tf_core import TFCoreModelTesterMixin
......@@ -590,7 +591,7 @@ class TFBertModelTester:
@require_tf
class TFBertModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, unittest.TestCase):
class TFBertModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFBertModel,
......@@ -606,6 +607,19 @@ class TFBertModelTest(TFModelTesterMixin, TFCoreModelTesterMixin, unittest.TestC
if is_tf_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": TFBertModel,
"fill-mask": TFBertForMaskedLM,
"question-answering": TFBertForQuestionAnswering,
"text-classification": TFBertForSequenceClassification,
"text-generation": TFBertLMHeadModel,
"token-classification": TFBertForTokenClassification,
"zero-shot": TFBertForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False
test_onnx = True
onnx_min_opset = 10
......
......@@ -22,6 +22,7 @@ from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -240,9 +241,14 @@ class BertGenerationEncoderTester:
@require_torch
class BertGenerationEncoderTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class BertGenerationEncoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
all_generative_model_classes = (BertGenerationDecoder,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = BertGenerationEncoderTester(self)
......
......@@ -24,6 +24,7 @@ from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -429,7 +430,7 @@ class BigBirdModelTester:
@require_torch
class BigBirdModelTest(ModelTesterMixin, unittest.TestCase):
class BigBirdModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
# head masking & pruning is currently not supported for big bird
test_head_masking = False
test_pruning = False
......@@ -453,6 +454,19 @@ class BigBirdModelTest(ModelTesterMixin, unittest.TestCase):
else ()
)
all_generative_model_classes = (BigBirdForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": BigBirdModel,
"fill-mask": BigBirdForMaskedLM,
"question-answering": BigBirdForQuestionAnswering,
"text-classification": BigBirdForSequenceClassification,
"text-generation": BigBirdForCausalLM,
"token-classification": BigBirdForTokenClassification,
"zero-shot": BigBirdForSequenceClassification,
}
if is_torch_available()
else {}
)
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
......
......@@ -25,6 +25,7 @@ from transformers.testing_utils import require_sentencepiece, require_tokenizers
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -232,7 +233,7 @@ class BigBirdPegasusModelTester:
@require_torch
class BigBirdPegasusModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class BigBirdPegasusModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
BigBirdPegasusModel,
......@@ -244,6 +245,20 @@ class BigBirdPegasusModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.
else ()
)
all_generative_model_classes = (BigBirdPegasusForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": BigBirdPegasusForConditionalGeneration,
"feature-extraction": BigBirdPegasusModel,
"question-answering": BigBirdPegasusForQuestionAnswering,
"summarization": BigBirdPegasusForConditionalGeneration,
"text2text-generation": BigBirdPegasusForConditionalGeneration,
"text-classification": BigBirdPegasusForSequenceClassification,
"text-generation": BigBirdPegasusForCausalLM,
"zero-shot": BigBirdPegasusForSequenceClassification,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = False
test_pruning = False
......
......@@ -23,6 +23,7 @@ from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -262,9 +263,12 @@ class BioGptModelTester:
@require_torch
class BioGptModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class BioGptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (BioGptModel, BioGptForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (BioGptForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": BioGptModel, "text-generation": BioGptForCausalLM} if is_torch_available() else {}
)
test_pruning = False
def setUp(self):
......
......@@ -24,6 +24,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -150,13 +151,18 @@ class BitModelTester:
@require_torch
class BitModelTest(ModelTesterMixin, unittest.TestCase):
class BitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Bit does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
fx_compatible = False
test_pruning = False
......
......@@ -24,6 +24,7 @@ from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -223,9 +224,20 @@ class BlenderbotModelTester:
@require_torch
class BlenderbotModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class BlenderbotModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (BlenderbotModel, BlenderbotForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (BlenderbotForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": BlenderbotForConditionalGeneration,
"feature-extraction": BlenderbotModel,
"summarization": BlenderbotForConditionalGeneration,
"text2text-generation": BlenderbotForConditionalGeneration,
"text-generation": BlenderbotForCausalLM,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = True
test_pruning = False
......
......@@ -22,6 +22,7 @@ from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
......@@ -175,9 +176,19 @@ def prepare_blenderbot_inputs_dict(
@require_tf
class TFBlenderbotModelTest(TFModelTesterMixin, unittest.TestCase):
class TFBlenderbotModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
all_generative_model_classes = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
pipeline_model_mapping = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
is_encoder_decoder = True
test_pruning = False
test_onnx = False
......
......@@ -24,6 +24,7 @@ from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -217,9 +218,20 @@ class BlenderbotSmallModelTester:
@require_torch
class BlenderbotSmallModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class BlenderbotSmallModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (BlenderbotSmallModel, BlenderbotSmallForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (BlenderbotSmallForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"conversational": BlenderbotSmallForConditionalGeneration,
"feature-extraction": BlenderbotSmallModel,
"summarization": BlenderbotSmallForConditionalGeneration,
"text2text-generation": BlenderbotSmallForConditionalGeneration,
"text-generation": BlenderbotSmallForCausalLM,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
fx_compatible = True
test_pruning = False
......
......@@ -22,6 +22,7 @@ from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
......@@ -175,11 +176,21 @@ def prepare_blenderbot_small_inputs_dict(
@require_tf
class TFBlenderbotSmallModelTest(TFModelTesterMixin, unittest.TestCase):
class TFBlenderbotSmallModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
all_generative_model_classes = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
pipeline_model_mapping = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
is_encoder_decoder = True
test_pruning = False
test_onnx = False
......
......@@ -35,6 +35,7 @@ from ...test_modeling_common import (
ids_tensor,
random_attention_mask,
)
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -391,8 +392,9 @@ class BlipModelTester:
@require_torch
class BlipModelTest(ModelTesterMixin, unittest.TestCase):
class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (BlipModel,) if is_torch_available() else ()
pipeline_model_mapping = {"feature-extraction": BlipModel} if is_torch_available() else {}
fx_compatible = False
test_head_masking = False
test_pruning = False
......
......@@ -23,6 +23,7 @@ from transformers.testing_utils import require_torch, require_torch_gpu, slow, t
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
......@@ -319,7 +320,7 @@ class BloomModelTester:
@require_torch
class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
BloomModel,
......@@ -333,6 +334,18 @@ class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase)
)
all_generative_model_classes = (BloomForCausalLM,) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": BloomModel,
"question-answering": BloomForQuestionAnswering,
"text-classification": BloomForSequenceClassification,
"text-generation": BloomForCausalLM,
"token-classification": BloomForTokenClassification,
"zero-shot": BloomForSequenceClassification,
}
if is_torch_available()
else {}
)
fx_compatible = True
test_missing_keys = False
test_pruning = False
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment