Unverified Commit 1de7dc74 authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Skip tests properly (#31308)

* Skip tests properly

* [test_all]

* Add 'reason' as kwarg for skipTest

* [test_all] Fix up

* [test_all]
parent 1f9f57ab
......@@ -51,7 +51,7 @@ class MgpstrTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
output_text = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters.")
@unittest.skip(reason="MGP-STR always lower cases letters.")
def test_added_tokens_do_lower_case(self):
pass
......@@ -86,10 +86,10 @@ class MgpstrTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(text_2.replace(" ", ""), output_text)
@unittest.skip("MGP-STR tokenizer only handles one sequence.")
@unittest.skip(reason="MGP-STR tokenizer only handles one sequence.")
def test_maximum_encoding_length_pair_input(self):
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer")
@unittest.skip(reason="inputs cannot be pretokenized in MgpstrTokenizer")
def test_pretokenized_inputs(self):
pass
......@@ -397,11 +397,11 @@ class MistralModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels),
)
@unittest.skip("Mistral buffers include complex numbers, which breaks this test")
@unittest.skip(reason="Mistral buffers include complex numbers, which breaks this test")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip("Mistral uses GQA on all models so the KV cache is a non standard format")
@unittest.skip(reason="Mistral uses GQA on all models so the KV cache is a non standard format")
def test_past_key_values_format(self):
pass
......@@ -489,7 +489,7 @@ class MistralModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest("Mistral flash attention does not support right padding")
self.skipTest(reason="Mistral flash attention does not support right padding")
@require_torch_gpu
......@@ -647,10 +647,10 @@ class MistralIntegrationTest(unittest.TestCase):
# `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2
# work as intended. See https://github.com/pytorch/pytorch/issues/121943
if version.parse(torch.__version__) < version.parse("2.3.0"):
self.skipTest("This test requires torch >= 2.3 to run.")
self.skipTest(reason="This test requires torch >= 2.3 to run.")
if self.cuda_compute_capability_major_version == 7:
self.skipTest("This test is failing (`torch.compile` fails) on Nvidia T4 GPU.")
self.skipTest(reason="This test is failing (`torch.compile` fails) on Nvidia T4 GPU.")
NUM_TOKENS_TO_GENERATE = 40
EXPECTED_TEXT_COMPLETION = {
......
......@@ -398,11 +398,11 @@ class MixtralModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels),
)
@unittest.skip("Mixtral buffers include complex numbers, which breaks this test")
@unittest.skip(reason="Mixtral buffers include complex numbers, which breaks this test")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip("Mixtral uses GQA on all models so the KV cache is a non standard format")
@unittest.skip(reason="Mixtral uses GQA on all models so the KV cache is a non standard format")
def test_past_key_values_format(self):
pass
......@@ -490,7 +490,7 @@ class MixtralModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest("Mixtral flash attention does not support right padding")
self.skipTest(reason="Mixtral flash attention does not support right padding")
# Ignore copy
def test_load_balancing_loss(self):
......
......@@ -93,6 +93,7 @@ class MLukeTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
ids = tokenizer.encode(txt, add_special_tokens=False)
return txt, ids
@unittest.skip
def test_pretokenized_inputs(self):
pass
......
......@@ -298,7 +298,7 @@ class MobileBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
return inputs_dict
# TODO (@SunMarc): Fix me
@unittest.skip("It's broken.")
@unittest.skip(reason="It's broken.")
def test_resize_tokens_embeddings(self):
super().test_resize_tokens_embeddings()
......
......@@ -87,7 +87,7 @@ class MobileBERTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
# Copied from tests.models.bert.test_tokenization_bert.BertTokenizationTest.test_rust_and_python_full_tokenizers
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
......
......@@ -246,7 +246,7 @@ class MPNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*config_and_inputs)
@unittest.skip("TFMPNet adds poolers to all models, unlike the PT model class.")
@unittest.skip(reason="TFMPNet adds poolers to all models, unlike the PT model class.")
def test_tf_from_pt_safetensors(self):
return
......
......@@ -422,7 +422,7 @@ class MptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpt_weight_initialization(*config_and_inputs)
@unittest.skip("For backward compatibility the lm_head is not in the model's state dict on the Hub.")
@unittest.skip(reason="For backward compatibility the lm_head is not in the model's state dict on the Hub.")
def test_model_weights_reload_no_missing_tied_weights(self):
pass
......
......@@ -376,7 +376,9 @@ class MraModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip("Model has `nan` in hidden_states, see https://github.com/huggingface/transformers/issues/29373.")
@unittest.skip(
reason="Model has `nan` in hidden_states, see https://github.com/huggingface/transformers/issues/29373."
)
def test_batching_equivalence(self):
pass
......
......@@ -593,7 +593,7 @@ class MT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False):
if not is_torch_fx_available() or not self.fx_compatible:
return
self.skipTest(reason="torch.fx is not available or not compatible with this model")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.return_dict = False
......@@ -837,7 +837,7 @@ class MT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
model = MT5Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip("Test has a segmentation fault on torch 1.8.0")
@unittest.skip(reason="Test has a segmentation fault on torch 1.8.0")
def test_export_to_onnx(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
model = MT5Model(config_and_inputs[0]).to(torch_device)
......
......@@ -205,7 +205,7 @@ class MusicgenDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
......@@ -270,15 +270,15 @@ class MusicgenDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
def test_inputs_embeds_matches_input_ids(self):
pass
# skip as this model doesn't support all arguments tested
@unittest.skip(reason="MusicGen does not support all arguments tested")
def test_model_outputs_equivalence(self):
pass
# skip as this model has multiple inputs embeds and lm heads that should not be tied
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied")
def test_tie_model_weights(self):
pass
# skip as this model has multiple inputs embeds and lm heads that should not be tied
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied")
def test_tied_weights_keys(self):
pass
......@@ -624,6 +624,9 @@ class MusicgenDecoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
@slow
# Copied from tests.test_modeling_common.ModelTesterMixin.test_eager_matches_sdpa_inference
def test_eager_matches_sdpa_inference(self, torch_dtype: str):
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self.all_model_classes[0]._supports_sdpa:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
......@@ -1085,7 +1088,7 @@ class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
......@@ -1262,27 +1265,27 @@ class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
model = model_class(config)
self.assertTrue(model.is_gradient_checkpointing)
# skip as this model has multiple inputs embeds and lm heads that should not be tied
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied.")
def test_tie_model_weights(self):
pass
# skip as this model has multiple inputs embeds and lm heads that should not be tied
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied.")
def test_tied_model_weights_key_ignore(self):
pass
# skip as this model has multiple inputs embeds and lm heads that should not be tied
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied.")
def test_tied_weights_keys(self):
pass
@unittest.skip("No support for low_cpu_mem_usage=True.")
@unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage(self):
pass
@unittest.skip("No support for low_cpu_mem_usage=True.")
@unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_checkpoints(self):
pass
@unittest.skip("No support for low_cpu_mem_usage=True.")
@unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_no_safetensors(self):
pass
......@@ -1569,7 +1572,7 @@ class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
# if no bos token id => cannot generate from None
if config.bos_token_id is None:
return
self.skipTest(reason="bos_token_id is None")
for model_class in self.greedy_sample_model_classes:
model = model_class(config).to(torch_device)
......@@ -1615,7 +1618,9 @@ class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
self.assertNotIn(config.pad_token_id, output_generate)
@unittest.skip("MusicgenModel is actually not the base of MusicgenForCausalLM as the latter is a composit model")
@unittest.skip(
reason="MusicgenModel is actually not the base of MusicgenForCausalLM as the latter is a composit model"
)
def test_save_load_fast_init_from_base(self):
pass
......@@ -1934,6 +1939,9 @@ class MusicgenTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
@slow
# Copied from tests.test_modeling_common.ModelTesterMixin.test_eager_matches_sdpa_inference
def test_eager_matches_sdpa_inference(self, torch_dtype: str):
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self.all_model_classes[0]._supports_sdpa:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
......
......@@ -22,12 +22,9 @@ import numpy as np
from transformers import T5Tokenizer, T5TokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torch
from transformers.utils.import_utils import is_speech_available, is_torch_available
from transformers.utils.import_utils import is_speech_available
if is_torch_available():
pass
if is_speech_available():
from transformers import EncodecFeatureExtractor, MusicgenProcessor
......
......@@ -208,7 +208,7 @@ class MusicgenMelodyDecoderTest(ModelTesterMixin, GenerationTesterMixin, unittes
# Copied from tests.models.musicgen.test_modeling_musicgen.MusicgenDecoderTest.check_training_gradient_checkpointing with Musicgen->MusicgenMelody
def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
......@@ -273,15 +273,15 @@ class MusicgenMelodyDecoderTest(ModelTesterMixin, GenerationTesterMixin, unittes
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip("this model doesn't support all arguments tested")
@unittest.skip(reason="this model doesn't support all arguments tested")
def test_model_outputs_equivalence(self):
pass
@unittest.skip("this model has multiple inputs embeds and lm heads that should not be tied")
@unittest.skip(reason="this model has multiple inputs embeds and lm heads that should not be tied")
def test_tie_model_weights(self):
pass
@unittest.skip("this model has multiple inputs embeds and lm heads that should not be tied")
@unittest.skip(reason="this model has multiple inputs embeds and lm heads that should not be tied")
def test_tied_weights_keys(self):
pass
......@@ -626,6 +626,9 @@ class MusicgenMelodyDecoderTest(ModelTesterMixin, GenerationTesterMixin, unittes
@slow
# Copied from tests.models.musicgen.test_modeling_musicgen.MusicgenDecoderTest.test_eager_matches_sdpa_inference
def test_eager_matches_sdpa_inference(self, torch_dtype: str):
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
if not self.all_model_classes[0]._supports_sdpa:
self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA")
......@@ -1089,7 +1092,7 @@ class MusicgenMelodyTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester
def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
......@@ -1247,27 +1250,27 @@ class MusicgenMelodyTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester
model = model_class(config)
self.assertTrue(model.is_gradient_checkpointing)
# skip as this model has multiple inputs embeds and lm heads that should not be tied
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied.")
def test_tie_model_weights(self):
pass
# skip as this model has multiple inputs embeds and lm heads that should not be tied
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied.")
def test_tied_model_weights_key_ignore(self):
pass
# skip as this model has multiple inputs embeds and lm heads that should not be tied
@unittest.skip(reason="MusicGen has multiple inputs embeds and lm heads that should not be tied.")
def test_tied_weights_keys(self):
pass
@unittest.skip("No support for low_cpu_mem_usage=True.")
@unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage(self):
pass
@unittest.skip("No support for low_cpu_mem_usage=True.")
@unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_checkpoints(self):
pass
@unittest.skip("No support for low_cpu_mem_usage=True.")
@unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_no_safetensors(self):
pass
......@@ -1553,7 +1556,7 @@ class MusicgenMelodyTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester
# if no bos token id => cannot generate from None
if config.bos_token_id is None:
return
self.skipTest(reason="bos_token_id is None")
for model_class in self.greedy_sample_model_classes:
model = model_class(config).to(torch_device)
......@@ -1600,7 +1603,7 @@ class MusicgenMelodyTest(ModelTesterMixin, GenerationTesterMixin, PipelineTester
self.assertNotIn(config.pad_token_id, output_generate)
@unittest.skip(
"MusicgenMelodyModel is actually not the base of MusicgenMelodyForCausalLM as the latter is a composit model"
reason="MusicgenMelodyModel is actually not the base of MusicgenMelodyForCausalLM as the latter is a composit model"
)
def test_save_load_fast_init_from_base(self):
pass
......
......@@ -818,6 +818,6 @@ class MvpStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, uni
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
......@@ -146,6 +146,7 @@ class TestTokenizationMvp(TokenizerTesterMixin, unittest.TestCase):
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@unittest.skip
def test_pretokenized_inputs(self):
pass
......
......@@ -207,7 +207,7 @@ class NllbTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
@require_torch
def test_prepare_seq2seq_batch(self):
if not self.test_seq2seq:
return
self.skipTest(reason="test_seq2seq is set to False")
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
......@@ -236,7 +236,7 @@ class NllbTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tgt_lang="ron_Latn",
)
except NotImplementedError:
return
self.skipTest(reason="Encountered NotImplementedError when calling prepare_seq2seq_batch")
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 10)
# max_target_length will default to max_length if not specified
......@@ -253,7 +253,7 @@ class NllbTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", batch_encoder_only)
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece.")
@unittest.skip(reason="Unfortunately way too slow to build a BPE with SentencePiece.")
def test_save_slow_from_fast_and_reload_fast(self):
pass
......@@ -290,7 +290,7 @@ class NllbTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
@unittest.skip("Need to fix this after #26538")
@unittest.skip(reason="Need to fix this after #26538")
def test_training_new_tokenizer(self):
pass
......
......@@ -403,7 +403,7 @@ class NllbMoeModelIntegrationTests(unittest.TestCase):
EXPECTED_LOGTIS = torch.Tensor([-0.3059, 0.0000, 9.3029, 0.6456, -0.9148, 1.7836, 0.6478, 0.9438, -0.5272, -0.6617, -1.2717, 0.4564, 0.1345, -0.2301, -1.0140, 1.1427, -1.5535, 0.1337, 0.2082, -0.8112, -0.3842, -0.3377, 0.1256, 0.6450, -0.0452, 0.0219, 1.4274, -0.4991, -0.2063, -0.4409,]) # fmt: skip
torch.testing.assert_close(output.logits[1, 0, :30], EXPECTED_LOGTIS, rtol=6e-3, atol=9e-3)
@unittest.skip("This requires 300GB of RAM")
@unittest.skip(reason="This requires 300GB of RAM")
def test_large_logits(self):
model = self.big_model
with torch.no_grad():
......@@ -421,7 +421,7 @@ class NllbMoeModelIntegrationTests(unittest.TestCase):
torch.testing.assert_close(output.last_hidden_state[1, 0, :30], EXPECTED_DECODER_STATE, rtol=6e-3, atol=9e-3)
torch.testing.assert_close(output.logits[1, 0, :30], EXPECTED_LOGTIS, rtol=6e-3, atol=9e-3)
@unittest.skip("This requires 300GB of RAM")
@unittest.skip(reason="This requires 300GB of RAM")
def test_seq_to_seq_generation(self):
model = self.big_model
tokenizer = NllbTokenizer.from_pretrained("facebook/nllb-moe-54b")
......
......@@ -102,19 +102,19 @@ class NougatTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
padding="max_length",
)
@unittest.skip("NougatTokenizerFast does not have tokenizer_file in its signature")
@unittest.skip(reason="NougatTokenizerFast does not have tokenizer_file in its signature")
def test_rust_tokenizer_signature(self):
pass
@unittest.skip("NougatTokenizerFast does not support pretokenized inputs")
@unittest.skip(reason="NougatTokenizerFast does not support pretokenized inputs")
def test_pretokenized_inputs(self):
pass
@unittest.skip("NougatTokenizerFast directly inherits from PreTrainedTokenizerFast")
@unittest.skip(reason="NougatTokenizerFast directly inherits from PreTrainedTokenizerFast")
def test_prepare_for_model(self):
pass
@unittest.skip("This needs a slow tokenizer. Nougat does not have one!")
@unittest.skip(reason="This needs a slow tokenizer. Nougat does not have one!")
def test_encode_decode_with_spaces(self):
pass
......
......@@ -301,7 +301,7 @@ class OlmoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip("OLMo does not support head pruning.")
@unittest.skip(reason="OLMo does not support head pruning.")
def test_headmasking(self):
pass
......@@ -311,7 +311,7 @@ class OlmoModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
config_and_inputs[0].position_embedding_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip("OLMo buffers include complex numbers, which breaks this test")
@unittest.skip(reason="OLMo buffers include complex numbers, which breaks this test")
def test_save_load_fast_init_from_base(self):
pass
......
......@@ -213,6 +213,7 @@ class OneFormerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase):
return inputs
@unittest.skip
def test_init_without_params(self):
pass
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment