Unverified Commit 1de7dc74 authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Skip tests properly (#31308)

* Skip tests properly

* [test_all]

* Add 'reason' as kwarg for skipTest

* [test_all] Fix up

* [test_all]
parent 1f9f57ab
...@@ -335,14 +335,15 @@ class BigBirdPegasusModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineT ...@@ -335,14 +335,15 @@ class BigBirdPegasusModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineT
def test_generate_without_input_ids(self): def test_generate_without_input_ids(self):
if self.model_tester.attention_type == "block_sparse": if self.model_tester.attention_type == "block_sparse":
# this test can never pass for BigBird-block-sparse attention since input_ids must be multiple of block_size self.skipTest(
return "Cannot pass for BigBird-block-sparse attention since input_ids must be multiple of block_size"
)
super().test_generate_without_input_ids() super().test_generate_without_input_ids()
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
if self.model_tester.attention_type == "block_sparse": if self.model_tester.attention_type == "block_sparse":
# this test can't pass since attention matrix (which is getting returned) can't have gradients (& just 0 at many locations) # this test can't pass since attention matrix (which is getting returned) can't have gradients (& just 0 at many locations)
return self.skipTest(reason="Cannot pass since returned attention matrix can't have gradients")
super().test_retain_grad_hidden_states_attentions() super().test_retain_grad_hidden_states_attentions()
# BigBirdPegasusForSequenceClassification does not support inputs_embeds # BigBirdPegasusForSequenceClassification does not support inputs_embeds
...@@ -811,6 +812,6 @@ class BigBirdPegasusStandaloneDecoderModelTest(ModelTesterMixin, GenerationTeste ...@@ -811,6 +812,6 @@ class BigBirdPegasusStandaloneDecoderModelTest(ModelTesterMixin, GenerationTeste
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip("Decoder cannot retain gradients")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return return
...@@ -414,7 +414,7 @@ class BioGptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix ...@@ -414,7 +414,7 @@ class BioGptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("The `input_embeds` when fed don't produce the same results.") @unittest.skip(reason="The `input_embeds` when fed don't produce the same results.")
def test_beam_sample_generate(self): def test_beam_sample_generate(self):
pass pass
......
...@@ -565,6 +565,6 @@ class BlenderbotStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMix ...@@ -565,6 +565,6 @@ class BlenderbotStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMix
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return return
...@@ -564,6 +564,6 @@ class BlenderbotSmallStandaloneDecoderModelTest(ModelTesterMixin, GenerationTest ...@@ -564,6 +564,6 @@ class BlenderbotSmallStandaloneDecoderModelTest(ModelTesterMixin, GenerationTest
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return return
...@@ -130,18 +130,18 @@ class BlipImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.Tes ...@@ -130,18 +130,18 @@ class BlipImageProcessingTestFourChannels(ImageProcessingTestMixin, unittest.Tes
self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "image_std"))
self.assertTrue(hasattr(image_processor, "do_convert_rgb")) self.assertTrue(hasattr(image_processor, "do_convert_rgb"))
@unittest.skip("BlipImageProcessor does not support 4 channels yet") # FIXME Amy @unittest.skip(reason="BlipImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_numpy(self): def test_call_numpy(self):
return super().test_call_numpy() return super().test_call_numpy()
@unittest.skip("BlipImageProcessor does not support 4 channels yet") # FIXME Amy @unittest.skip(reason="BlipImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_pytorch(self): def test_call_pytorch(self):
return super().test_call_torch() return super().test_call_torch()
@unittest.skip("BLIP doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy @unittest.skip(reason="BLIP doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy
def test_call_pil(self): def test_call_pil(self):
pass pass
@unittest.skip("BLIP doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy @unittest.skip(reason="BLIP doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy
def test_call_numpy_4_channels(self): def test_call_numpy_4_channels(self):
pass pass
...@@ -193,9 +193,11 @@ class BlipVisionModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -193,9 +193,11 @@ class BlipVisionModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -335,9 +337,11 @@ class BlipTextModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -335,9 +337,11 @@ class BlipTextModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -491,7 +495,7 @@ class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -491,7 +495,7 @@ class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict): def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript: if not self.test_torchscript:
return self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True configs_no_init.torchscript = True
...@@ -932,7 +936,7 @@ class BlipTextRetrievalModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -932,7 +936,7 @@ class BlipTextRetrievalModelTest(ModelTesterMixin, unittest.TestCase):
def test_training(self): def test_training(self):
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="ModelTester is not setup for training")
for model_class in self.all_model_classes[:-1]: for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
...@@ -951,7 +955,7 @@ class BlipTextRetrievalModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -951,7 +955,7 @@ class BlipTextRetrievalModelTest(ModelTesterMixin, unittest.TestCase):
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="ModelTester is not setup for training")
for model_class in self.all_model_classes[:-1]: for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
...@@ -1008,7 +1012,7 @@ class BlipTextRetrievalModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -1008,7 +1012,7 @@ class BlipTextRetrievalModelTest(ModelTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict): def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript: if not self.test_torchscript:
return self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True configs_no_init.torchscript = True
...@@ -1160,7 +1164,7 @@ class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -1160,7 +1164,7 @@ class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase):
def test_training(self): def test_training(self):
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="ModelTester is not setup for training")
for model_class in self.all_model_classes[:-1]: for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
...@@ -1179,7 +1183,7 @@ class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -1179,7 +1183,7 @@ class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase):
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="ModelTester is not setup for training")
for model_class in self.all_model_classes[:-1]: for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
...@@ -1224,7 +1228,7 @@ class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -1224,7 +1228,7 @@ class BlipTextImageModelTest(ModelTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict): def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript: if not self.test_torchscript:
return self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True configs_no_init.torchscript = True
......
...@@ -141,9 +141,11 @@ class BlipTextModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -141,9 +141,11 @@ class BlipTextModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
......
...@@ -187,9 +187,11 @@ class Blip2VisionModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -187,9 +187,11 @@ class Blip2VisionModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
......
...@@ -389,7 +389,7 @@ class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi ...@@ -389,7 +389,7 @@ class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs) self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs)
@unittest.skip("Bloom has a non-standard KV cache format.") @unittest.skip(reason="Bloom has a non-standard KV cache format.")
def test_past_key_values_format(self): def test_past_key_values_format(self):
pass pass
......
...@@ -43,7 +43,7 @@ class BloomTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -43,7 +43,7 @@ class BloomTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
kwargs.update(self.special_tokens_map) kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) return BloomTokenizerFast.from_pretrained(self.tmpdirname, **kwargs)
@unittest.skip("This needs a slow tokenizer. Bloom does not have one!") @unittest.skip(reason="This needs a slow tokenizer. Bloom does not have one!")
def test_encode_decode_with_spaces(self): def test_encode_decode_with_spaces(self):
return return
......
...@@ -300,15 +300,15 @@ class ByT5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -300,15 +300,15 @@ class ByT5TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertTrue(tokenizer.decode([255]) == "") self.assertTrue(tokenizer.decode([255]) == "")
# tokenizer does not have vocabulary @unittest.skip(reason="ByT5Tokenizer does not have a vocabulary")
def test_get_vocab(self): def test_get_vocab(self):
pass pass
# inputs cannot be pretokenized since ids depend on whole input string and not just on single characters @unittest.skip(reason="inputs cannot be pretokenized as ids depend on whole input string")
def test_pretokenized_inputs(self): def test_pretokenized_inputs(self):
pass pass
# tests all ids in vocab => vocab doesn't exist so unnecessary to test @unittest.skip(reason="ByT5Tokenizer does not have a vocabulary")
def test_conversion_reversible(self): def test_conversion_reversible(self):
pass pass
......
...@@ -94,7 +94,7 @@ class CamembertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -94,7 +94,7 @@ class CamembertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self): def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer() rust_tokenizer = self.get_rust_tokenizer()
......
...@@ -441,7 +441,7 @@ class CanineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -441,7 +441,7 @@ class CanineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def test_headmasking(self): def test_headmasking(self):
if not self.test_head_masking: if not self.test_head_masking:
return self.skipTest(reason="test_head_masking is set to False")
global_rng.seed(42) global_rng.seed(42)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
...@@ -496,7 +496,7 @@ class CanineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -496,7 +496,7 @@ class CanineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
check_attentions_validity(outputs.attentions) check_attentions_validity(outputs.attentions)
@unittest.skip("CANINE does not have a get_input_embeddings() method.") @unittest.skip(reason="CANINE does not have a get_input_embeddings() method.")
def test_inputs_embeds(self): def test_inputs_embeds(self):
# ViT does not use inputs_embeds # ViT does not use inputs_embeds
pass pass
...@@ -505,7 +505,7 @@ class CanineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -505,7 +505,7 @@ class CanineModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def test_inputs_embeds_matches_input_ids(self): def test_inputs_embeds_matches_input_ids(self):
pass pass
@unittest.skip("CANINE does not have a get_input_embeddings() method.") @unittest.skip(reason="CANINE does not have a get_input_embeddings() method.")
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
......
...@@ -303,31 +303,32 @@ class CanineTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -303,31 +303,32 @@ class CanineTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [additional_special_token]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens"), [additional_special_token])
self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [additional_special_token_id]) self.assertListEqual(getattr(tokenizer, "additional_special_tokens_ids"), [additional_special_token_id])
# tokenizer has a fixed vocab_size (namely all possible unicode code points) @unittest.skip(reason="tokenizer has a fixed vocab_size (namely all possible unicode code points)")
def test_add_tokens_tokenizer(self): def test_add_tokens_tokenizer(self):
pass pass
# CanineTokenizer does not support do_lower_case = True, as each character has its own Unicode code point # CanineTokenizer does not support do_lower_case = True, as each character has its own Unicode code point
# ("b" and "B" for example have different Unicode code points) # ("b" and "B" for example have different Unicode code points)
@unittest.skip(reason="CanineTokenizer does not support do_lower_case = True")
def test_added_tokens_do_lower_case(self): def test_added_tokens_do_lower_case(self):
pass pass
# CanineModel does not support the get_input_embeddings nor the get_vocab method @unittest.skip(reason="CanineModel does not support the get_input_embeddings nor the get_vocab method")
def test_np_encode_plus_sent_to_model(self): def test_np_encode_plus_sent_to_model(self):
pass pass
# CanineModel does not support the get_input_embeddings nor the get_vocab method @unittest.skip(reason="CanineModel does not support the get_input_embeddings nor the get_vocab method")
def test_torch_encode_plus_sent_to_model(self): def test_torch_encode_plus_sent_to_model(self):
pass pass
# tokenizer does not have vocabulary @unittest.skip(reason="CanineTokenizer does not have vocabulary")
def test_get_vocab(self): def test_get_vocab(self):
pass pass
# inputs cannot be pretokenized since ids depend on whole input string and not just on single characters @unittest.skip(reason="inputs cannot be pretokenized since ids depend on whole input string")
def test_pretokenized_inputs(self): def test_pretokenized_inputs(self):
pass pass
# tests all ids in vocab => vocab doesn't exist so unnecessary to test @unittest.skip(reason="CanineTokenizer does not have vocabulary")
def test_conversion_reversible(self): def test_conversion_reversible(self):
pass pass
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
import unittest import unittest
from transformers.testing_utils import require_torch, require_vision from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available from transformers.utils import is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
...@@ -26,10 +26,6 @@ if is_vision_available(): ...@@ -26,10 +26,6 @@ if is_vision_available():
from transformers import ChineseCLIPImageProcessor from transformers import ChineseCLIPImageProcessor
if is_torch_available():
pass
class ChineseCLIPImageProcessingTester(unittest.TestCase): class ChineseCLIPImageProcessingTester(unittest.TestCase):
def __init__( def __init__(
self, self,
...@@ -125,7 +121,9 @@ class ChineseCLIPImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase ...@@ -125,7 +121,9 @@ class ChineseCLIPImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase
self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
@unittest.skip("ChineseCLIPImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy @unittest.skip(
reason="ChineseCLIPImageProcessor doesn't treat 4 channel PIL and numpy consistently yet"
) # FIXME Amy
def test_call_numpy_4_channels(self): def test_call_numpy_4_channels(self):
pass pass
...@@ -155,14 +153,16 @@ class ChineseCLIPImageProcessingTestFourChannels(ImageProcessingTestMixin, unitt ...@@ -155,14 +153,16 @@ class ChineseCLIPImageProcessingTestFourChannels(ImageProcessingTestMixin, unitt
self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
@unittest.skip("ChineseCLIPImageProcessor does not support 4 channels yet") # FIXME Amy @unittest.skip(reason="ChineseCLIPImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_numpy(self): def test_call_numpy(self):
return super().test_call_numpy() return super().test_call_numpy()
@unittest.skip("ChineseCLIPImageProcessor does not support 4 channels yet") # FIXME Amy @unittest.skip(reason="ChineseCLIPImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_pytorch(self): def test_call_pytorch(self):
return super().test_call_torch() return super().test_call_torch()
@unittest.skip("ChineseCLIPImageProcessor doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy @unittest.skip(
reason="ChineseCLIPImageProcessor doesn't treat 4 channel PIL and numpy consistently yet"
) # FIXME Amy
def test_call_numpy_4_channels(self): def test_call_numpy_4_channels(self):
pass pass
...@@ -388,9 +388,11 @@ class ChineseCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -388,9 +388,11 @@ class ChineseCLIPTextModelTest(ModelTesterMixin, unittest.TestCase):
model = ChineseCLIPTextModel.from_pretrained(model_name) model = ChineseCLIPTextModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -466,9 +468,11 @@ class ChineseCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -466,9 +468,11 @@ class ChineseCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -621,7 +625,7 @@ class ChineseCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC ...@@ -621,7 +625,7 @@ class ChineseCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC
def _create_and_check_torchscript(self, config, inputs_dict): def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript: if not self.test_torchscript:
return self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True configs_no_init.torchscript = True
......
...@@ -562,7 +562,7 @@ class ClapModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -562,7 +562,7 @@ class ClapModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict): def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript: if not self.test_torchscript:
return self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True configs_no_init.torchscript = True
......
...@@ -220,9 +220,11 @@ class CLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -220,9 +220,11 @@ class CLIPVisionModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_projection(*config_and_inputs) self.model_tester.create_and_check_model_with_projection(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -381,9 +383,11 @@ class CLIPTextModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -381,9 +383,11 @@ class CLIPTextModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_with_projection(*config_and_inputs) self.model_tester.create_and_check_model_with_projection(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -535,7 +539,7 @@ class CLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -535,7 +539,7 @@ class CLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict): def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript: if not self.test_torchscript:
return self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True configs_no_init.torchscript = True
...@@ -636,7 +640,7 @@ class CLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -636,7 +640,7 @@ class CLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
fx_model_class_name = "Flax" + model_class.__name__ fx_model_class_name = "Flax" + model_class.__name__
if not hasattr(transformers, fx_model_class_name): if not hasattr(transformers, fx_model_class_name):
return self.skipTest(reason="No Flax model exists for this class")
fx_model_class = getattr(transformers, fx_model_class_name) fx_model_class = getattr(transformers, fx_model_class_name)
...@@ -692,8 +696,7 @@ class CLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -692,8 +696,7 @@ class CLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
fx_model_class_name = "Flax" + model_class.__name__ fx_model_class_name = "Flax" + model_class.__name__
if not hasattr(transformers, fx_model_class_name): if not hasattr(transformers, fx_model_class_name):
# no flax model exists for this class self.skipTest(reason="No Flax model exists for this class")
return
fx_model_class = getattr(transformers, fx_model_class_name) fx_model_class = getattr(transformers, fx_model_class_name)
......
...@@ -178,7 +178,6 @@ class CLIPTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -178,7 +178,6 @@ class CLIPTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_tokenization_python_rust_equals(self): def test_tokenization_python_rust_equals(self):
super().test_tokenization_python_rust_equals() super().test_tokenization_python_rust_equals()
# overwrite common test @unittest.skip(reason="CLIP always lower cases letters")
def test_added_tokens_do_lower_case(self): def test_added_tokens_do_lower_case(self):
# CLIP always lower cases letters
pass pass
...@@ -194,9 +194,11 @@ class CLIPSegVisionModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -194,9 +194,11 @@ class CLIPSegVisionModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -331,9 +333,11 @@ class CLIPSegTextModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -331,9 +333,11 @@ class CLIPSegTextModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -540,7 +544,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -540,7 +544,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
def _create_and_check_torchscript(self, config, inputs_dict): def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript: if not self.test_torchscript:
return self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True configs_no_init.torchscript = True
...@@ -641,7 +645,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -641,7 +645,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
fx_model_class_name = "Flax" + model_class.__name__ fx_model_class_name = "Flax" + model_class.__name__
if not hasattr(transformers, fx_model_class_name): if not hasattr(transformers, fx_model_class_name):
return self.skipTest(reason="No Flax model exists for this class")
fx_model_class = getattr(transformers, fx_model_class_name) fx_model_class = getattr(transformers, fx_model_class_name)
...@@ -697,8 +701,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -697,8 +701,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
fx_model_class_name = "Flax" + model_class.__name__ fx_model_class_name = "Flax" + model_class.__name__
if not hasattr(transformers, fx_model_class_name): if not hasattr(transformers, fx_model_class_name):
# no flax model exists for this class self.skipTest(reason="No Flax model exists for this class")
return
fx_model_class = getattr(transformers, fx_model_class_name) fx_model_class = getattr(transformers, fx_model_class_name)
...@@ -744,7 +747,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -744,7 +747,7 @@ class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
def test_training(self): def test_training(self):
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="Training test is skipped as the model was not trained")
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment