Unverified Commit 1de7dc74 authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Skip tests properly (#31308)

* Skip tests properly

* [test_all]

* Add 'reason' as kwarg for skipTest

* [test_all] Fix up

* [test_all]
parent 1f9f57ab
......@@ -569,15 +569,15 @@ class Speech2TextBertModelTest(EncoderDecoderMixin, unittest.TestCase):
"labels": decoder_token_labels,
}
# can't save full model for now because Speech2TextModel != Speech2TextEncoder
@unittest.skip(reason="Cannot save full model as Speech2TextModel != Speech2TextEncoder")
def test_encoder_decoder_model_from_pretrained_configs(self):
pass
# can't save full model for now because Speech2TextModel != Speech2TextEncoder
@unittest.skip(reason="Cannot save full model as Speech2TextModel != Speech2TextEncoder")
def test_save_and_load_from_pretrained(self):
pass
@require_deterministic_for_xpu
# all published pretrained models are Speech2TextModel != Speech2TextEncoder
@unittest.skip(reason="Cannot save full model as Speech2TextModel != Speech2TextEncoder")
def test_real_model_save_load_from_pretrained(self):
pass
......@@ -326,14 +326,15 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# not implemented currently
@unittest.skip(reason="Not implemented currently")
def test_inputs_embeds(self):
pass
# training is not supported yet
@unittest.skip(reason="Training is not supported yet")
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
......@@ -536,7 +537,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
self.skipTest(reason="test_resize_embeddings is set to False")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
......@@ -584,13 +585,13 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
self.skipTest(reason="test_resize_embeddings is set to False")
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
return
self.skipTest(reason="Model cannot untie embeddings")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
......@@ -627,6 +628,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@unittest.skip
def test_generate_without_input_ids(self):
pass
......@@ -695,7 +697,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
......@@ -773,7 +775,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
# Allow missing keys since TF doesn't cache the sinusoidal embeddings in an attribute
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
@unittest.skip("Test failing, @RocketNight is looking into it")
@unittest.skip(reason="Test failing, @RocketNight is looking into it")
def test_tf_from_pt_safetensors(self):
pass
......
......@@ -212,31 +212,31 @@ class SpeechT5ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
# this model has no inputs_embeds
@unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self):
pass
# this model has no input embeddings
@unittest.skip(reason="Model has no input_embeds")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
pass
@slow
@unittest.skip(reason="Model does not have decoder_input_ids")
def test_torchscript_output_attentions(self):
# disabled because this model doesn't have decoder_input_ids
pass
@slow
@unittest.skip(reason="Model does not have decoder_input_ids")
def test_torchscript_output_hidden_state(self):
# disabled because this model doesn't have decoder_input_ids
pass
@slow
@unittest.skip(reason="Model does not have decoder_input_ids")
def test_torchscript_simple(self):
# disabled because this model doesn't have decoder_input_ids
pass
......@@ -598,19 +598,20 @@ class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase):
)
# this model has no inputs_embeds
@unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self):
pass
def test_resize_embeddings_untied(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
self.skipTest(reason="test_resize_embeddings is set to False")
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
return
self.skipTest(reason="Model cannot untie embeddings")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
......@@ -650,7 +651,7 @@ class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase):
def test_resize_tokens_embeddings(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
self.skipTest(reason="test_resize_embeddings is set to False")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
......@@ -692,14 +693,16 @@ class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase):
self.assertTrue(models_equal)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
pass
# training is not supported yet
@unittest.skip(reason="Training is not supported yet")
def test_training(self):
pass
@unittest.skip(reason="Training is not supported yet")
def test_training_gradient_checkpointing(self):
pass
......@@ -928,15 +931,15 @@ class SpeechT5ForTextToSpeechTest(ModelTesterMixin, unittest.TestCase):
(self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.num_mel_bins),
)
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet
@unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_decoder_model_past_with_large_inputs(self):
pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet
@unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_determinism(self):
pass
@unittest.skip("skipped because there is always dropout in SpeechT5SpeechDecoderPrenet")
@unittest.skip(reason="skipped because there is always dropout in SpeechT5SpeechDecoderPrenet")
def test_batching_equivalence(self):
pass
......@@ -985,41 +988,43 @@ class SpeechT5ForTextToSpeechTest(ModelTesterMixin, unittest.TestCase):
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# this model has no inputs_embeds
@unittest.skip(reason="Model has no inputs_embeds")
def test_inputs_embeds(self):
pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet
@unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_model_outputs_equivalence(self):
pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet
@unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_save_load(self):
pass
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
pass
@slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_output_attentions(self):
# disabled because this model doesn't have decoder_input_ids
pass
@slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_output_hidden_state(self):
# disabled because this model doesn't have decoder_input_ids
pass
@slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_simple(self):
# disabled because this model doesn't have decoder_input_ids
pass
# training is not supported yet
@unittest.skip(reason="training is not supported yet")
def test_training(self):
pass
@unittest.skip(reason="training is not supported yet")
def test_training_gradient_checkpointing(self):
pass
......@@ -1472,15 +1477,15 @@ class SpeechT5ForSpeechToSpeechTest(ModelTesterMixin, unittest.TestCase):
(self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.num_mel_bins),
)
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet
@unittest.skip(reason="There is always dropout in SpeechT5SpeechDecoderPrenet")
def test_decoder_model_past_with_large_inputs(self):
pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet
@unittest.skip(reason="There is always dropout in SpeechT5SpeechDecoderPrenet")
def test_determinism(self):
pass
@unittest.skip("skipped because there is always dropout in SpeechT5SpeechDecoderPrenet")
@unittest.skip(reason="skipped because there is always dropout in SpeechT5SpeechDecoderPrenet")
def test_batching_equivalence(self):
pass
......@@ -1685,45 +1690,46 @@ class SpeechT5ForSpeechToSpeechTest(ModelTesterMixin, unittest.TestCase):
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
# this model has no inputs_embeds
@unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self):
pass
# this model has no input embeddings
@unittest.skip(reason="Model has no input_embeds")
def test_model_get_set_embeddings(self):
pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet
@unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_model_outputs_equivalence(self):
pass
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet
@unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_save_load(self):
pass
@slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_output_attentions(self):
# disabled because this model doesn't have decoder_input_ids
pass
@slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_output_hidden_state(self):
# disabled because this model doesn't have decoder_input_ids
pass
@slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_simple(self):
# disabled because this model doesn't have decoder_input_ids
pass
# training is not supported yet
@unittest.skip(reason="Training is not supported yet")
def test_training(self):
pass
@unittest.skip(reason="Training is not supported yet")
def test_training_gradient_checkpointing(self):
pass
......@@ -1873,35 +1879,35 @@ class SpeechT5HifiGanTest(ModelTesterMixin, unittest.TestCase):
]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
# this model does not output hidden states
@unittest.skip(reason="Model does not output hidden states")
def test_hidden_states_output(self):
pass
# skip
@unittest.skip
def test_initialization(self):
pass
# this model has no inputs_embeds
@unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self):
pass
# this model has no input embeddings
@unittest.skip(reason="Model has no input_embeds")
def test_model_get_set_embeddings(self):
pass
# skip as this model doesn't support all arguments tested
@unittest.skip(reason="Model does not support all arguments tested")
def test_model_outputs_equivalence(self):
pass
# this model does not output hidden states
@unittest.skip(reason="Model does not output hidden states")
def test_retain_grad_hidden_states_attentions(self):
pass
# skip because it fails on automapping of SpeechT5HifiGanConfig
@unittest.skip(reason="Fails on automapping of SpeechT5HifiGanConfig")
def test_save_load_fast_init_from_base(self):
pass
# skip because it fails on automapping of SpeechT5HifiGanConfig
@unittest.skip(reason="Fails on automapping of SpeechT5HifiGanConfig")
def test_save_load_fast_init_to_base(self):
pass
......
......@@ -143,9 +143,11 @@ class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-3], tokenizer.pad_token_id)
@unittest.skip
def test_pickle_subword_regularization_tokenizer(self):
pass
@unittest.skip
def test_subword_regularization_tokenizer(self):
pass
......
......@@ -389,11 +389,11 @@ class Starcoder2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels),
)
@unittest.skip("Starcoder2 buffers include complex numbers, which breaks this test")
@unittest.skip(reason="Starcoder2 buffers include complex numbers, which breaks this test")
def test_save_load_fast_init_from_base(self):
pass
@unittest.skip("Starcoder2 uses GQA on all models so the KV cache is a non standard format")
@unittest.skip(reason="Starcoder2 uses GQA on all models so the KV cache is a non standard format")
def test_past_key_values_format(self):
pass
......@@ -481,7 +481,7 @@ class Starcoder2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest("Starcoder2 flash attention does not support right padding")
self.skipTest(reason="Starcoder2 flash attention does not support right padding")
@slow
......
......@@ -672,7 +672,7 @@ class SwitchTransformersModelTest(ModelTesterMixin, GenerationTesterMixin, Pipel
model = SwitchTransformersModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip("Test has a segmentation fault on torch 1.8.0")
@unittest.skip(reason="Test has a segmentation fault on torch 1.8.0")
def test_export_to_onnx(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
model = SwitchTransformersModel(config_and_inputs[0]).to(torch_device)
......
......@@ -596,7 +596,7 @@ class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False):
if not is_torch_fx_available() or not self.fx_compatible:
return
self.skipTest(reason="torch.fx is not available or not compatible with this model")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.return_dict = False
......@@ -840,7 +840,7 @@ class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
model = T5Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip("Test has a segmentation fault on torch 1.8.0")
@unittest.skip(reason="Test has a segmentation fault on torch 1.8.0")
def test_export_to_onnx(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
model = T5Model(config_and_inputs[0]).to(torch_device)
......
......@@ -153,7 +153,7 @@ class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
......
......@@ -277,8 +277,8 @@ class TableTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, Pipelin
pass
@slow
@unittest.skip(reason="TODO Niels: fix me!")
def test_model_outputs_equivalence(self):
# TODO Niels: fix me!
pass
def test_attention_outputs(self):
......
......@@ -520,11 +520,11 @@ class TapasModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
@require_tensorflow_probability
@unittest.skip("tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
@unittest.skip(reason="tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
def test_pt_tf_model_equivalence(self):
pass
@unittest.skip("tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
@unittest.skip(reason="tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
def test_tf_from_pt_safetensors(self):
pass
......
......@@ -158,13 +158,13 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
self.skipTest(f"{tokenizer.__class__} is not in the MODEL_TOKENIZER_MAPPING")
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
self.skipTest(reason="Model is an encoder-decoder or does not have a pad token id set")
model = model_class(config)
......@@ -184,7 +184,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
......@@ -223,7 +223,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
@unittest.skip("Chat template tests don't play well with table/layout models.")
@unittest.skip(reason="Chat template tests don't play well with table/layout models.")
def test_chat_template_batched(self):
pass
......@@ -633,11 +633,11 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
@unittest.skip("TAPAS tokenizer only handles two sequences.")
@unittest.skip(reason="TAPAS tokenizer only handles two sequences.")
def test_maximum_encoding_length_pair_input(self):
pass
@unittest.skip("TAPAS tokenizer only handles two sequences.")
@unittest.skip(reason="TAPAS tokenizer only handles two sequences.")
def test_maximum_encoding_length_single_input(self):
pass
......@@ -779,7 +779,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
encoded_sequences_batch_padded_2[key],
)
@unittest.skip("batch_encode_plus does not handle overflowing tokens.")
@unittest.skip(reason="batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
......@@ -846,7 +846,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
if tokenizer.pad_token is None:
self.skipTest("No padding token.")
self.skipTest(reason="No padding token.")
else:
empty_tokens = tokenizer(table, padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer(table, "This is a sample input", padding=True, pad_to_multiple_of=8)
......@@ -864,7 +864,9 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
@unittest.skip("TAPAS cannot handle `prepare_for_model` without passing by `encode_plus` or `batch_encode_plus`")
@unittest.skip(
reason="TAPAS cannot handle `prepare_for_model` without passing by `encode_plus` or `batch_encode_plus`"
)
def test_prepare_for_model(self):
pass
......@@ -948,7 +950,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
shutil.rmtree(tmpdirname)
@unittest.skip("Not implemented")
@unittest.skip(reason="Not implemented")
def test_right_and_left_truncation(self):
pass
......@@ -1051,13 +1053,13 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
self.skipTest(f"{tokenizer.__class__} is not in the MODEL_TOKENIZER_MAPPING")
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
self.skipTest(reason="Model is an encoder-decoder or has no padding token set.")
model = model_class(config)
......@@ -1081,7 +1083,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
model(**encoded_sequence)
model(**batch_encoded_sequence)
@unittest.skip("TAPAS doesn't handle pre-tokenized inputs.")
@unittest.skip(reason="TAPAS doesn't handle pre-tokenized inputs.")
def test_pretokenized_inputs(self):
pass
......@@ -1268,10 +1270,10 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertListEqual(column_ids.tolist(), expected_results["column_ids"])
self.assertListEqual(row_ids.tolist(), expected_results["row_ids"])
@unittest.skip("Doesn't support another framework than PyTorch")
@unittest.skip(reason="Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
@unittest.skip("Chat is not supported")
@unittest.skip(reason="Chat is not supported")
def test_chat_template(self):
pass
......@@ -214,7 +214,7 @@ class TimeSeriesTransformerModelTest(ModelTesterMixin, PipelineTesterMixin, unit
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# Ignore since we have no tokens embeddings
@unittest.skip(reason="Model has no tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
......
......@@ -217,7 +217,7 @@ class TimesformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC
def test_attention_outputs(self):
if not self.has_attentions:
pass
self.skipTest(reason="Model has no attentions")
else:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
......
......@@ -138,75 +138,75 @@ class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTeste
self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features))
self.assertEqual(timm_model.channels, transformers_model.channels)
@unittest.skip("TimmBackbone doesn't support feed forward chunking")
@unittest.skip(reason="TimmBackbone doesn't support feed forward chunking")
def test_feed_forward_chunking(self):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute")
@unittest.skip(reason="TimmBackbone doesn't have num_hidden_layers attribute")
def test_hidden_states_output(self):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side")
@unittest.skip(reason="TimmBackbone initialization is managed on the timm side")
def test_initialization(self):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds")
@unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds")
@unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds")
def test_model_get_set_embeddings(self):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint")
@unittest.skip(reason="TimmBackbone model cannot be created without specifying a backbone checkpoint")
def test_from_pretrained_no_checkpoint(self):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
@unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone")
def test_save_load(self):
pass
@unittest.skip("No support for low_cpu_mem_usage=True.")
@unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage(self):
pass
@unittest.skip("No support for low_cpu_mem_usage=True.")
@unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_checkpoints(self):
pass
@unittest.skip("No support for low_cpu_mem_usage=True.")
@unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_no_safetensors(self):
pass
@unittest.skip("model weights aren't tied in TimmBackbone.")
@unittest.skip(reason="model weights aren't tied in TimmBackbone.")
def test_tie_model_weights(self):
pass
@unittest.skip("model weights aren't tied in TimmBackbone.")
@unittest.skip(reason="model weights aren't tied in TimmBackbone.")
def test_tied_model_weights_key_ignore(self):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
@unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone")
def test_load_save_without_tied_weights(self):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone")
@unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone")
def test_model_weights_reload_no_missing_tied_weights(self):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration.")
@unittest.skip(reason="TimmBackbone doesn't have hidden size info in its configuration.")
def test_channels(self):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions.")
@unittest.skip(reason="TimmBackbone doesn't support output_attentions.")
def test_torchscript_output_attentions(self):
pass
@unittest.skip("Safetensors is not supported by timm.")
@unittest.skip(reason="Safetensors is not supported by timm.")
def test_can_use_safetensors(self):
pass
@unittest.skip("Need to use a timm backbone and there is no tiny model available.")
@unittest.skip(reason="Need to use a timm backbone and there is no tiny model available.")
def test_model_is_small(self):
pass
......
......@@ -170,15 +170,15 @@ class TrOCRStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, P
self.model_tester = TrOCRStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=TrOCRConfig)
# not implemented currently
@unittest.skip(reason="Not yet implemented")
def test_inputs_embeds(self):
pass
# trocr has no base model
@unittest.skip(reason="trocr has no base model")
def test_save_load_fast_init_from_base(self):
pass
# trocr has no base model
@unittest.skip(reason="trocr has no base model")
def test_save_load_fast_init_to_base(self):
pass
......@@ -189,10 +189,10 @@ class TrOCRStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, P
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
# decoder cannot keep gradients
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self):
return
@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :)
@unittest.skip(reason="The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def test_left_padding_compatibility(self):
pass
......@@ -320,7 +320,7 @@ class UdopModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
@unittest.skip("Gradient checkpointing is not supported by this model")
@unittest.skip(reason="Gradient checkpointing is not supported by this model")
def test_training_gradient_checkpointing(self):
pass
......
......@@ -110,7 +110,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
# this tokenizer
def test_save_sentencepiece_tokenizer(self) -> None:
if not self.test_sentencepiece or not self.test_slow_tokenizer:
return
self.skipTest(reason="test_sentencepiece or test_slow_tokenizer is set to False")
# We want to verify that we will be able to save the tokenizer even if the original files that were used to
# build the tokenizer have been deleted in the meantime.
words, boxes = self.get_words_and_boxes()
......@@ -687,7 +687,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_padding_warning_message_fast_tokenizer(self):
if not self.test_rust_tokenizer:
return
self.skipTest(reason="test_rust_tokenizer is set to False")
words, boxes = self.get_words_and_boxes_batch()
......@@ -708,7 +708,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
)
if not self.test_slow_tokenizer:
return
self.skipTest(reason="test_slow_tokenizer is set to False")
tokenizer_slow = self.get_tokenizer()
......@@ -817,7 +817,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
encoded_sequences_batch_padded_2[key],
)
@unittest.skip("batch_encode_plus does not handle overflowing tokens.")
@unittest.skip(reason="batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
......@@ -878,7 +878,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest("No padding token.")
self.skipTest(reason="No padding token.")
else:
words, boxes = self.get_words_and_boxes()
......@@ -919,7 +919,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_build_inputs_with_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
......@@ -1008,7 +1008,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
shutil.rmtree(tmpdirname)
@unittest.skip("Not implemented")
@unittest.skip(reason="Not implemented")
def test_right_and_left_truncation(self):
pass
......@@ -1153,11 +1153,11 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
@unittest.skip("Chat template tests don't play well with table/layout models.")
@unittest.skip(reason="Chat template tests don't play well with table/layout models.")
def test_chat_template(self):
pass
@unittest.skip("Chat template tests don't play well with table/layout models.")
@unittest.skip(reason="Chat template tests don't play well with table/layout models.")
def test_chat_template_batched(self):
pass
......@@ -1174,13 +1174,13 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
self.skipTest(f"{tokenizer.__class__} not in MODEL_TOKENIZER_MAPPING")
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
self.skipTest(reason="Model is an encoder-decoder or has no padding token set.")
model = model_class(config)
......@@ -1206,11 +1206,11 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
self.skipTest(reason="test_rust_tokenizer is set to False")
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
self.skipTest(reason="test_slow_tokenizer is set to False")
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
......@@ -1228,7 +1228,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_tokenization_python_rust_equals(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
......@@ -1282,7 +1282,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_embeded_special_tokens(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
......@@ -1478,7 +1478,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_training_new_tokenizer(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
......@@ -1515,7 +1515,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_training_new_tokenizer_with_special_tokens_change(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
return
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_rust_tokenizer()
# Test with a special tokens map
......@@ -1628,7 +1628,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_padding_different_model_input_name(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
......@@ -1722,27 +1722,27 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4)
@unittest.skip("TO DO: overwrite this very extensive test.")
@unittest.skip(reason="TO DO: overwrite this very extensive test.")
def test_alignement_methods(self):
pass
@unittest.skip("UDOP tokenizer requires boxes besides sequences.")
@unittest.skip(reason="UDOP tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_pair_input(self):
pass
@unittest.skip("UDOP tokenizer requires boxes besides sequences.")
@unittest.skip(reason="UDOP tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_single_input(self):
pass
@unittest.skip("UDOP tokenizer requires boxes besides sequences.")
@unittest.skip(reason="UDOP tokenizer requires boxes besides sequences.")
def test_pretokenized_inputs(self):
pass
@unittest.skip("UDOP tokenizer always expects pretokenized inputs.")
@unittest.skip(reason="UDOP tokenizer always expects pretokenized inputs.")
def test_compare_pretokenized_inputs(self):
pass
@unittest.skip("UDOP fast tokenizer does not support prepare_for_model")
@unittest.skip(reason="UDOP fast tokenizer does not support prepare_for_model")
def test_compare_prepare_for_model(self):
pass
......@@ -1863,15 +1863,15 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
@unittest.skip("Doesn't support another framework than PyTorch")
@unittest.skip(reason="Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
@unittest.skip("Doesn't use SentencePiece")
@unittest.skip(reason="Doesn't use SentencePiece")
def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):
pass
@unittest.skip("Doesn't use SentencePiece")
@unittest.skip(reason="Doesn't use SentencePiece")
def test_sentencepiece_tokenize_and_decode(self):
pass
......
......@@ -331,7 +331,7 @@ class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False):
if not is_torch_fx_available() or not self.fx_compatible:
return
self.skipTest(reason="torch fx is not available or not compatible with this model")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.return_dict = False
......@@ -483,7 +483,7 @@ class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs)
@unittest.skip("Test has a segmentation fault on torch 1.8.0")
@unittest.skip(reason="Test has a segmentation fault on torch 1.8.0")
def test_export_to_onnx(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
model = UMT5Model(config_and_inputs[0]).to(torch_device)
......
......@@ -354,21 +354,22 @@ class UniSpeechRobustModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.T
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# UniSpeech has no inputs_embeds
@unittest.skip(reason="UniSpeech has no inputs_embeds")
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
@unittest.skip(reason="UniSpeech has no inputs_embeds")
def test_forward_signature(self):
pass
# UniSpeech cannot resize token embeddings
# since it has no tokens embeddings
@unittest.skip(reason="UniSpeech has no tokens embeds")
def test_resize_tokens_embeddings(self):
pass
# UniSpeech has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
@unittest.skip(reason="UniSpeech has no inputs_embeds")
def test_model_get_set_embeddings(self):
pass
......
......@@ -403,22 +403,19 @@ class UniSpeechSatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Test
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# UniSpeechSat has no inputs_embeds
@unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
@unittest.skip(reason="Model has input_values instead of input_ids")
def test_forward_signature(self):
pass
# UniSpeechSat cannot resize token embeddings
# since it has no tokens embeddings
@unittest.skip(reason="Model has no tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
# UniSpeechSat has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
@unittest.skip(reason="Model has no input_embeds")
def test_model_get_set_embeddings(self):
pass
......@@ -615,22 +612,19 @@ class UniSpeechSatRobustModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# UniSpeechSat has no inputs_embeds
@unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
@unittest.skip(reason="Model has input_values instead of input_ids")
def test_forward_signature(self):
pass
# UniSpeechSat cannot resize token embeddings
# since it has no tokens embeddings
@unittest.skip(reason="Model has no tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
# UniSpeechSat has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
@unittest.skip(reason="Model has no input_embeds")
def test_model_get_set_embeddings(self):
pass
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment