Unverified Commit 1de7dc74 authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Skip tests properly (#31308)

* Skip tests properly

* [test_all]

* Add 'reason' as kwarg for skipTest

* [test_all] Fix up

* [test_all]
parent 1f9f57ab
......@@ -377,7 +377,7 @@ class OneFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
def test_training(self):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False")
# only OneFormerForUniversalSegmentation has the loss
model_class = self.all_model_classes[1]
(
......
......@@ -209,6 +209,7 @@ class OneFormerProcessingTest(unittest.TestCase):
self.assertTrue(hasattr(processor, "max_seq_length"))
self.assertTrue(hasattr(processor, "task_seq_length"))
@unittest.skip
def test_batch_feature(self):
pass
......@@ -397,6 +398,7 @@ class OneFormerProcessingTest(unittest.TestCase):
return inputs
@unittest.skip
def test_init_without_params(self):
pass
......
......@@ -131,7 +131,7 @@ class OpenAIGPTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
padding="max_length",
)
# tokenizer has no padding token
@unittest.skip(reason="tokenizer has no padding token")
def test_padding_different_model_input_name(self):
pass
......
......@@ -322,7 +322,7 @@ class OPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels)
self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases.")
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_model_parallelism(self):
super().test_model_parallelism()
......
......@@ -168,6 +168,6 @@ class Owlv2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
f"Batch image bounding boxes fail. Expected {expected_boxes}, got {boxes}",
)
@unittest.skip("OWLv2 doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy
@unittest.skip(reason="OWLv2 doesn't treat 4 channel PIL and numpy consistently yet") # FIXME Amy
def test_call_numpy_4_channels(self):
pass
......@@ -494,7 +494,7 @@ class Owlv2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
......@@ -708,7 +708,7 @@ class Owlv2ForObjectDetectionTest(ModelTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
......
......@@ -489,7 +489,7 @@ class OwlViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
......@@ -701,7 +701,7 @@ class OwlViTForObjectDetectionTest(ModelTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
......
......@@ -234,7 +234,7 @@ class PaliGemmaForConditionalGenerationModelTest(ModelTesterMixin, unittest.Test
pass
# TODO extend valid outputs to include this test @Molbap
@unittest.skip("PaliGemma has currently one output format.")
@unittest.skip(reason="PaliGemma has currently one output format.")
def test_model_outputs_equivalence(self):
pass
......
......@@ -317,7 +317,7 @@ class PatchTSMixerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Test
for model_class in self.all_model_classes:
check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip("No tokens embeddings")
@unittest.skip(reason="No tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
......
......@@ -592,6 +592,6 @@ class PegasusStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin,
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
......@@ -128,10 +128,6 @@ class PegasusTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
revision="ba85d0851d708441f91440d509690f1ab6353415",
)
# @unittest.skip("We have to use from_slow")
# def test_added_tokens_serialization(self):
# pass
@require_sentencepiece
@require_tokenizers
......@@ -215,7 +211,3 @@ class BigBirdPegasusTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
token_ids,
[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1],
)
# @unittest.skip("We have to use from_slow")
# def test_added_tokens_serialization(self):
# pass
......@@ -872,6 +872,6 @@ class PegasusXStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
......@@ -387,7 +387,7 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
def test_training(self):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False")
for model_class in self.all_model_classes:
if model_class.__name__ in [
......@@ -732,7 +732,7 @@ class PerceiverModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
def test_correct_missing_keys(self):
if not self.test_missing_keys:
return
self.skipTest(reason="test_missing_keys is set to False")
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
......
......@@ -270,15 +270,16 @@ class PerceiverTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178]), "�")
# tokenizer does not have vocabulary
@unittest.skip(reason="tokenizer does not have vocabulary")
def test_get_vocab(self):
pass
# inputs cannot be pretokenized since ids depend on whole input string and not just on single characters
@unittest.skip(reason="inputs cannot be pretokenized")
def test_pretokenized_inputs(self):
# inputs cannot be pretokenized since ids depend on whole input string and not just on single characters
pass
# tests all ids in vocab => vocab doesn't exist so unnecessary to test
@unittest.skip(reason="vocab does not exist")
def test_conversion_reversible(self):
pass
......
......@@ -384,7 +384,7 @@ class PersimmonModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels),
)
@unittest.skip("Persimmon buffers include complex numbers, which breaks this test")
@unittest.skip(reason="Persimmon buffers include complex numbers, which breaks this test")
# Copied from tests.models.llama.test_modeling_llama.LlamaModelTest.test_save_load_fast_init_from_base
def test_save_load_fast_init_from_base(self):
pass
......
......@@ -335,14 +335,16 @@ class Pix2StructImageProcessingTestFourChannels(ImageProcessingTestMixin, unitte
(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim),
)
@unittest.skip("Pix2StructImageProcessor does not support 4 channels yet") # FIXME Amy
@unittest.skip(reason="Pix2StructImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_numpy(self):
return super().test_call_numpy()
@unittest.skip("Pix2StructImageProcessor does not support 4 channels yet") # FIXME Amy
@unittest.skip(reason="Pix2StructImageProcessor does not support 4 channels yet") # FIXME Amy
def test_call_pytorch(self):
return super().test_call_torch()
@unittest.skip("Pix2StructImageProcessor does treat numpy and PIL 4 channel images consistently") # FIXME Amy
@unittest.skip(
reason="Pix2StructImageProcessor does treat numpy and PIL 4 channel images consistently"
) # FIXME Amy
def test_call_numpy_4_channels(self):
return super().test_call_torch()
......@@ -489,7 +489,7 @@ class Pix2StructModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
def test_training(self):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False")
for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
......@@ -508,7 +508,7 @@ class Pix2StructModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False")
for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
......@@ -555,7 +555,7 @@ class Pix2StructModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
def test_resize_tokens_embeddings(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
self.skipTest(reason="test_resize_embeddings is set to False")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
......@@ -602,13 +602,13 @@ class Pix2StructModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
def test_resize_embeddings_untied(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
self.skipTest(reason="test_resize_embeddings is set to False")
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
return
self.skipTest(reason="Model cannot untie embeddings")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
......@@ -652,7 +652,7 @@ class Pix2StructModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
......
......@@ -319,7 +319,7 @@ class PLBartModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
@unittest.skip("Failing since #26752")
@unittest.skip(reason="Failing since #26752")
def test_sample_generate(self):
pass
......@@ -664,6 +664,6 @@ class PLBartStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin,
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
......@@ -144,11 +144,11 @@ class PoolFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip("PoolFormer does not use inputs_embeds")
@unittest.skip(reason="PoolFormer does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip("PoolFormer does not have get_input_embeddings method and get_output_embeddings methods")
@unittest.skip(reason="PoolFormer does not have get_input_embeddings method and get_output_embeddings methods")
def test_model_get_set_embeddings(self):
pass
......@@ -190,7 +190,7 @@ class PoolFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
def test_training(self):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
......
......@@ -280,14 +280,14 @@ class Pop2PianoFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittes
def test_padding_from_array(self):
pass
@unittest.skip("Pop2PianoFeatureExtractor does not support truncation")
@unittest.skip(reason="Pop2PianoFeatureExtractor does not support truncation")
def test_attention_mask_with_truncation(self):
pass
@unittest.skip("Pop2PianoFeatureExtractor does not supports truncation")
@unittest.skip(reason="Pop2PianoFeatureExtractor does not supports truncation")
def test_truncation_from_array(self):
pass
@unittest.skip("Pop2PianoFeatureExtractor does not supports truncation")
@unittest.skip(reason="Pop2PianoFeatureExtractor does not supports truncation")
def test_truncation_from_list(self):
pass
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment