"test/vscode:/vscode.git/clone" did not exist on "baf277a9bfd17669a1b1956d3bdbc24c3678a65b"
Unverified Commit 1de7dc74 authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Skip tests properly (#31308)

* Skip tests properly

* [test_all]

* Add 'reason' as kwarg for skipTest

* [test_all] Fix up

* [test_all]
parent 1f9f57ab
...@@ -569,15 +569,15 @@ class Speech2TextBertModelTest(EncoderDecoderMixin, unittest.TestCase): ...@@ -569,15 +569,15 @@ class Speech2TextBertModelTest(EncoderDecoderMixin, unittest.TestCase):
"labels": decoder_token_labels, "labels": decoder_token_labels,
} }
# can't save full model for now because Speech2TextModel != Speech2TextEncoder @unittest.skip(reason="Cannot save full model as Speech2TextModel != Speech2TextEncoder")
def test_encoder_decoder_model_from_pretrained_configs(self): def test_encoder_decoder_model_from_pretrained_configs(self):
pass pass
# can't save full model for now because Speech2TextModel != Speech2TextEncoder @unittest.skip(reason="Cannot save full model as Speech2TextModel != Speech2TextEncoder")
def test_save_and_load_from_pretrained(self): def test_save_and_load_from_pretrained(self):
pass pass
@require_deterministic_for_xpu @require_deterministic_for_xpu
# all published pretrained models are Speech2TextModel != Speech2TextEncoder @unittest.skip(reason="Cannot save full model as Speech2TextModel != Speech2TextEncoder")
def test_real_model_save_load_from_pretrained(self): def test_real_model_save_load_from_pretrained(self):
pass pass
...@@ -326,14 +326,15 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest ...@@ -326,14 +326,15 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# not implemented currently @unittest.skip(reason="Not implemented currently")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# training is not supported yet @unittest.skip(reason="Training is not supported yet")
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -536,7 +537,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest ...@@ -536,7 +537,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
inputs_dict, inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common() ) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings: if not self.test_resize_embeddings:
return self.skipTest(reason="test_resize_embeddings is set to False")
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
config = copy.deepcopy(original_config) config = copy.deepcopy(original_config)
...@@ -584,13 +585,13 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest ...@@ -584,13 +585,13 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
inputs_dict, inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common() ) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings: if not self.test_resize_embeddings:
return self.skipTest(reason="test_resize_embeddings is set to False")
original_config.tie_word_embeddings = False original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test # if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings: if original_config.tie_word_embeddings:
return self.skipTest(reason="Model cannot untie embeddings")
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
config = copy.deepcopy(original_config) config = copy.deepcopy(original_config)
...@@ -627,6 +628,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest ...@@ -627,6 +628,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
# Check that the model can still do a forward pass successfully (every parameter should be resized) # Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class)) model(**self._prepare_for_class(inputs_dict, model_class))
@unittest.skip
def test_generate_without_input_ids(self): def test_generate_without_input_ids(self):
pass pass
...@@ -695,7 +697,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest ...@@ -695,7 +697,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
def _create_and_check_torchscript(self, config, inputs_dict): def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript: if not self.test_torchscript:
return self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True configs_no_init.torchscript = True
...@@ -773,7 +775,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest ...@@ -773,7 +775,7 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
# Allow missing keys since TF doesn't cache the sinusoidal embeddings in an attribute # Allow missing keys since TF doesn't cache the sinusoidal embeddings in an attribute
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys) super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
@unittest.skip("Test failing, @RocketNight is looking into it") @unittest.skip(reason="Test failing, @RocketNight is looking into it")
def test_tf_from_pt_safetensors(self): def test_tf_from_pt_safetensors(self):
pass pass
......
...@@ -212,31 +212,31 @@ class SpeechT5ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase ...@@ -212,31 +212,31 @@ class SpeechT5ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase
) )
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
# this model has no inputs_embeds @unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# this model has no input embeddings @unittest.skip(reason="Model has no input_embeds")
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
pass pass
@slow @slow
@unittest.skip(reason="Model does not have decoder_input_ids")
def test_torchscript_output_attentions(self): def test_torchscript_output_attentions(self):
# disabled because this model doesn't have decoder_input_ids
pass pass
@slow @slow
@unittest.skip(reason="Model does not have decoder_input_ids")
def test_torchscript_output_hidden_state(self): def test_torchscript_output_hidden_state(self):
# disabled because this model doesn't have decoder_input_ids
pass pass
@slow @slow
@unittest.skip(reason="Model does not have decoder_input_ids")
def test_torchscript_simple(self): def test_torchscript_simple(self):
# disabled because this model doesn't have decoder_input_ids
pass pass
...@@ -598,19 +598,20 @@ class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase): ...@@ -598,19 +598,20 @@ class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase):
) )
# this model has no inputs_embeds # this model has no inputs_embeds
@unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
def test_resize_embeddings_untied(self): def test_resize_embeddings_untied(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings: if not self.test_resize_embeddings:
return self.skipTest(reason="test_resize_embeddings is set to False")
original_config.tie_word_embeddings = False original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test # if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings: if original_config.tie_word_embeddings:
return self.skipTest(reason="Model cannot untie embeddings")
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
config = copy.deepcopy(original_config) config = copy.deepcopy(original_config)
...@@ -650,7 +651,7 @@ class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase): ...@@ -650,7 +651,7 @@ class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase):
def test_resize_tokens_embeddings(self): def test_resize_tokens_embeddings(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings: if not self.test_resize_embeddings:
return self.skipTest(reason="test_resize_embeddings is set to False")
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
config = copy.deepcopy(original_config) config = copy.deepcopy(original_config)
...@@ -692,14 +693,16 @@ class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase): ...@@ -692,14 +693,16 @@ class SpeechT5ForSpeechToTextTest(ModelTesterMixin, unittest.TestCase):
self.assertTrue(models_equal) self.assertTrue(models_equal)
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients # decoder cannot keep gradients
pass pass
# training is not supported yet @unittest.skip(reason="Training is not supported yet")
def test_training(self): def test_training(self):
pass pass
@unittest.skip(reason="Training is not supported yet")
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -928,15 +931,15 @@ class SpeechT5ForTextToSpeechTest(ModelTesterMixin, unittest.TestCase): ...@@ -928,15 +931,15 @@ class SpeechT5ForTextToSpeechTest(ModelTesterMixin, unittest.TestCase):
(self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.num_mel_bins), (self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.num_mel_bins),
) )
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet @unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_decoder_model_past_with_large_inputs(self): def test_decoder_model_past_with_large_inputs(self):
pass pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet @unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_determinism(self): def test_determinism(self):
pass pass
@unittest.skip("skipped because there is always dropout in SpeechT5SpeechDecoderPrenet") @unittest.skip(reason="skipped because there is always dropout in SpeechT5SpeechDecoderPrenet")
def test_batching_equivalence(self): def test_batching_equivalence(self):
pass pass
...@@ -985,41 +988,43 @@ class SpeechT5ForTextToSpeechTest(ModelTesterMixin, unittest.TestCase): ...@@ -985,41 +988,43 @@ class SpeechT5ForTextToSpeechTest(ModelTesterMixin, unittest.TestCase):
msg=f"Parameter {name} of model {model_class} seems not properly initialized", msg=f"Parameter {name} of model {model_class} seems not properly initialized",
) )
# this model has no inputs_embeds @unittest.skip(reason="Model has no inputs_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet @unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_model_outputs_equivalence(self): def test_model_outputs_equivalence(self):
pass pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet @unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_save_load(self): def test_save_load(self):
pass pass
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
pass pass
@slow @slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_output_attentions(self): def test_torchscript_output_attentions(self):
# disabled because this model doesn't have decoder_input_ids
pass pass
@slow @slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_output_hidden_state(self): def test_torchscript_output_hidden_state(self):
# disabled because this model doesn't have decoder_input_ids
pass pass
@slow @slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_simple(self): def test_torchscript_simple(self):
# disabled because this model doesn't have decoder_input_ids # disabled because this model doesn't have decoder_input_ids
pass pass
# training is not supported yet @unittest.skip(reason="training is not supported yet")
def test_training(self): def test_training(self):
pass pass
@unittest.skip(reason="training is not supported yet")
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -1472,15 +1477,15 @@ class SpeechT5ForSpeechToSpeechTest(ModelTesterMixin, unittest.TestCase): ...@@ -1472,15 +1477,15 @@ class SpeechT5ForSpeechToSpeechTest(ModelTesterMixin, unittest.TestCase):
(self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.num_mel_bins), (self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.num_mel_bins),
) )
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet @unittest.skip(reason="There is always dropout in SpeechT5SpeechDecoderPrenet")
def test_decoder_model_past_with_large_inputs(self): def test_decoder_model_past_with_large_inputs(self):
pass pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet @unittest.skip(reason="There is always dropout in SpeechT5SpeechDecoderPrenet")
def test_determinism(self): def test_determinism(self):
pass pass
@unittest.skip("skipped because there is always dropout in SpeechT5SpeechDecoderPrenet") @unittest.skip(reason="skipped because there is always dropout in SpeechT5SpeechDecoderPrenet")
def test_batching_equivalence(self): def test_batching_equivalence(self):
pass pass
...@@ -1685,45 +1690,46 @@ class SpeechT5ForSpeechToSpeechTest(ModelTesterMixin, unittest.TestCase): ...@@ -1685,45 +1690,46 @@ class SpeechT5ForSpeechToSpeechTest(ModelTesterMixin, unittest.TestCase):
msg=f"Parameter {name} of model {model_class} seems not properly initialized", msg=f"Parameter {name} of model {model_class} seems not properly initialized",
) )
# this model has no inputs_embeds @unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# this model has no input embeddings @unittest.skip(reason="Model has no input_embeds")
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet @unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_model_outputs_equivalence(self): def test_model_outputs_equivalence(self):
pass pass
@unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
pass pass
# skipped because there is always dropout in SpeechT5SpeechDecoderPrenet @unittest.skip(reason="Dropout is always present in SpeechT5SpeechDecoderPrenet")
def test_save_load(self): def test_save_load(self):
pass pass
@slow @slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_output_attentions(self): def test_torchscript_output_attentions(self):
# disabled because this model doesn't have decoder_input_ids
pass pass
@slow @slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_output_hidden_state(self): def test_torchscript_output_hidden_state(self):
# disabled because this model doesn't have decoder_input_ids
pass pass
@slow @slow
@unittest.skip(reason="Model doesn't have decoder_input_ids")
def test_torchscript_simple(self): def test_torchscript_simple(self):
# disabled because this model doesn't have decoder_input_ids
pass pass
# training is not supported yet @unittest.skip(reason="Training is not supported yet")
def test_training(self): def test_training(self):
pass pass
@unittest.skip(reason="Training is not supported yet")
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -1873,35 +1879,35 @@ class SpeechT5HifiGanTest(ModelTesterMixin, unittest.TestCase): ...@@ -1873,35 +1879,35 @@ class SpeechT5HifiGanTest(ModelTesterMixin, unittest.TestCase):
] ]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
# this model does not output hidden states @unittest.skip(reason="Model does not output hidden states")
def test_hidden_states_output(self): def test_hidden_states_output(self):
pass pass
# skip @unittest.skip
def test_initialization(self): def test_initialization(self):
pass pass
# this model has no inputs_embeds @unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# this model has no input embeddings @unittest.skip(reason="Model has no input_embeds")
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
# skip as this model doesn't support all arguments tested @unittest.skip(reason="Model does not support all arguments tested")
def test_model_outputs_equivalence(self): def test_model_outputs_equivalence(self):
pass pass
# this model does not output hidden states @unittest.skip(reason="Model does not output hidden states")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
pass pass
# skip because it fails on automapping of SpeechT5HifiGanConfig @unittest.skip(reason="Fails on automapping of SpeechT5HifiGanConfig")
def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_from_base(self):
pass pass
# skip because it fails on automapping of SpeechT5HifiGanConfig @unittest.skip(reason="Fails on automapping of SpeechT5HifiGanConfig")
def test_save_load_fast_init_to_base(self): def test_save_load_fast_init_to_base(self):
pass pass
......
...@@ -143,9 +143,11 @@ class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -143,9 +143,11 @@ class SpeechT5TokenizerTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(tokens[0], tokenizer.eos_token_id) self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-3], tokenizer.pad_token_id) self.assertEqual(tokens[-3], tokenizer.pad_token_id)
@unittest.skip
def test_pickle_subword_regularization_tokenizer(self): def test_pickle_subword_regularization_tokenizer(self):
pass pass
@unittest.skip
def test_subword_regularization_tokenizer(self): def test_subword_regularization_tokenizer(self):
pass pass
......
...@@ -389,11 +389,11 @@ class Starcoder2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste ...@@ -389,11 +389,11 @@ class Starcoder2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels),
) )
@unittest.skip("Starcoder2 buffers include complex numbers, which breaks this test") @unittest.skip(reason="Starcoder2 buffers include complex numbers, which breaks this test")
def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_from_base(self):
pass pass
@unittest.skip("Starcoder2 uses GQA on all models so the KV cache is a non standard format") @unittest.skip(reason="Starcoder2 uses GQA on all models so the KV cache is a non standard format")
def test_past_key_values_format(self): def test_past_key_values_format(self):
pass pass
...@@ -481,7 +481,7 @@ class Starcoder2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste ...@@ -481,7 +481,7 @@ class Starcoder2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
@pytest.mark.flash_attn_test @pytest.mark.flash_attn_test
@slow @slow
def test_flash_attn_2_inference_equivalence_right_padding(self): def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest("Starcoder2 flash attention does not support right padding") self.skipTest(reason="Starcoder2 flash attention does not support right padding")
@slow @slow
......
...@@ -672,7 +672,7 @@ class SwitchTransformersModelTest(ModelTesterMixin, GenerationTesterMixin, Pipel ...@@ -672,7 +672,7 @@ class SwitchTransformersModelTest(ModelTesterMixin, GenerationTesterMixin, Pipel
model = SwitchTransformersModel.from_pretrained(model_name) model = SwitchTransformersModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
@unittest.skip("Test has a segmentation fault on torch 1.8.0") @unittest.skip(reason="Test has a segmentation fault on torch 1.8.0")
def test_export_to_onnx(self): def test_export_to_onnx(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
model = SwitchTransformersModel(config_and_inputs[0]).to(torch_device) model = SwitchTransformersModel(config_and_inputs[0]).to(torch_device)
......
...@@ -596,7 +596,7 @@ class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, ...@@ -596,7 +596,7 @@ class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False):
if not is_torch_fx_available() or not self.fx_compatible: if not is_torch_fx_available() or not self.fx_compatible:
return self.skipTest(reason="torch.fx is not available or not compatible with this model")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.return_dict = False configs_no_init.return_dict = False
...@@ -840,7 +840,7 @@ class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, ...@@ -840,7 +840,7 @@ class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
model = T5Model.from_pretrained(model_name) model = T5Model.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
@unittest.skip("Test has a segmentation fault on torch 1.8.0") @unittest.skip(reason="Test has a segmentation fault on torch 1.8.0")
def test_export_to_onnx(self): def test_export_to_onnx(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
model = T5Model(config_and_inputs[0]).to(torch_device) model = T5Model(config_and_inputs[0]).to(torch_device)
......
...@@ -153,7 +153,7 @@ class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -153,7 +153,7 @@ class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self): def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer() rust_tokenizer = self.get_rust_tokenizer()
......
...@@ -277,8 +277,8 @@ class TableTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, Pipelin ...@@ -277,8 +277,8 @@ class TableTransformerModelTest(ModelTesterMixin, GenerationTesterMixin, Pipelin
pass pass
@slow @slow
@unittest.skip(reason="TODO Niels: fix me!")
def test_model_outputs_equivalence(self): def test_model_outputs_equivalence(self):
# TODO Niels: fix me!
pass pass
def test_attention_outputs(self): def test_attention_outputs(self):
......
...@@ -520,11 +520,11 @@ class TapasModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -520,11 +520,11 @@ class TapasModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
@require_tensorflow_probability @require_tensorflow_probability
@unittest.skip("tfp is not defined even if installed. FIXME @Arthur in a followup PR!") @unittest.skip(reason="tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
def test_pt_tf_model_equivalence(self): def test_pt_tf_model_equivalence(self):
pass pass
@unittest.skip("tfp is not defined even if installed. FIXME @Arthur in a followup PR!") @unittest.skip(reason="tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
def test_tf_from_pt_safetensors(self): def test_tf_from_pt_safetensors(self):
pass pass
......
...@@ -158,13 +158,13 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -158,13 +158,13 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
for tokenizer in tokenizers: for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"): with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return self.skipTest(f"{tokenizer.__class__} is not in the MODEL_TOKENIZER_MAPPING")
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class() config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None: if config.is_encoder_decoder or config.pad_token_id is None:
return self.skipTest(reason="Model is an encoder-decoder or does not have a pad token id set")
model = model_class(config) model = model_class(config)
...@@ -184,7 +184,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -184,7 +184,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self): def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer() rust_tokenizer = self.get_rust_tokenizer()
...@@ -223,7 +223,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -223,7 +223,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
rust_ids = rust_tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids) self.assertListEqual(ids, rust_ids)
@unittest.skip("Chat template tests don't play well with table/layout models.") @unittest.skip(reason="Chat template tests don't play well with table/layout models.")
def test_chat_template_batched(self): def test_chat_template_batched(self):
pass pass
...@@ -633,11 +633,11 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -633,11 +633,11 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
sequences, mask = information["input_ids"], information["token_type_ids"] sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask)) self.assertEqual(len(sequences), len(mask))
@unittest.skip("TAPAS tokenizer only handles two sequences.") @unittest.skip(reason="TAPAS tokenizer only handles two sequences.")
def test_maximum_encoding_length_pair_input(self): def test_maximum_encoding_length_pair_input(self):
pass pass
@unittest.skip("TAPAS tokenizer only handles two sequences.") @unittest.skip(reason="TAPAS tokenizer only handles two sequences.")
def test_maximum_encoding_length_single_input(self): def test_maximum_encoding_length_single_input(self):
pass pass
...@@ -779,7 +779,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -779,7 +779,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
encoded_sequences_batch_padded_2[key], encoded_sequences_batch_padded_2[key],
) )
@unittest.skip("batch_encode_plus does not handle overflowing tokens.") @unittest.skip(reason="batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self): def test_batch_encode_plus_overflowing_tokens(self):
pass pass
...@@ -846,7 +846,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -846,7 +846,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
with self.subTest(f"{tokenizer.__class__.__name__}"): with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0) table = self.get_table(tokenizer, length=0)
if tokenizer.pad_token is None: if tokenizer.pad_token is None:
self.skipTest("No padding token.") self.skipTest(reason="No padding token.")
else: else:
empty_tokens = tokenizer(table, padding=True, pad_to_multiple_of=8) empty_tokens = tokenizer(table, padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer(table, "This is a sample input", padding=True, pad_to_multiple_of=8) normal_tokens = tokenizer(table, "This is a sample input", padding=True, pad_to_multiple_of=8)
...@@ -864,7 +864,9 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -864,7 +864,9 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
for key, value in normal_tokens.items(): for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
@unittest.skip("TAPAS cannot handle `prepare_for_model` without passing by `encode_plus` or `batch_encode_plus`") @unittest.skip(
reason="TAPAS cannot handle `prepare_for_model` without passing by `encode_plus` or `batch_encode_plus`"
)
def test_prepare_for_model(self): def test_prepare_for_model(self):
pass pass
...@@ -948,7 +950,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -948,7 +950,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
shutil.rmtree(tmpdirname) shutil.rmtree(tmpdirname)
@unittest.skip("Not implemented") @unittest.skip(reason="Not implemented")
def test_right_and_left_truncation(self): def test_right_and_left_truncation(self):
pass pass
...@@ -1051,13 +1053,13 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1051,13 +1053,13 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
for tokenizer in tokenizers: for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"): with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return self.skipTest(f"{tokenizer.__class__} is not in the MODEL_TOKENIZER_MAPPING")
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class() config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None: if config.is_encoder_decoder or config.pad_token_id is None:
return self.skipTest(reason="Model is an encoder-decoder or has no padding token set.")
model = model_class(config) model = model_class(config)
...@@ -1081,7 +1083,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1081,7 +1083,7 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
model(**encoded_sequence) model(**encoded_sequence)
model(**batch_encoded_sequence) model(**batch_encoded_sequence)
@unittest.skip("TAPAS doesn't handle pre-tokenized inputs.") @unittest.skip(reason="TAPAS doesn't handle pre-tokenized inputs.")
def test_pretokenized_inputs(self): def test_pretokenized_inputs(self):
pass pass
...@@ -1268,10 +1270,10 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1268,10 +1270,10 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertListEqual(column_ids.tolist(), expected_results["column_ids"]) self.assertListEqual(column_ids.tolist(), expected_results["column_ids"])
self.assertListEqual(row_ids.tolist(), expected_results["row_ids"]) self.assertListEqual(row_ids.tolist(), expected_results["row_ids"])
@unittest.skip("Doesn't support another framework than PyTorch") @unittest.skip(reason="Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self): def test_np_encode_plus_sent_to_model(self):
pass pass
@unittest.skip("Chat is not supported") @unittest.skip(reason="Chat is not supported")
def test_chat_template(self): def test_chat_template(self):
pass pass
...@@ -214,7 +214,7 @@ class TimeSeriesTransformerModelTest(ModelTesterMixin, PipelineTesterMixin, unit ...@@ -214,7 +214,7 @@ class TimeSeriesTransformerModelTest(ModelTesterMixin, PipelineTesterMixin, unit
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
# Ignore since we have no tokens embeddings @unittest.skip(reason="Model has no tokens embeddings")
def test_resize_tokens_embeddings(self): def test_resize_tokens_embeddings(self):
pass pass
......
...@@ -217,7 +217,7 @@ class TimesformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC ...@@ -217,7 +217,7 @@ class TimesformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestC
def test_attention_outputs(self): def test_attention_outputs(self):
if not self.has_attentions: if not self.has_attentions:
pass self.skipTest(reason="Model has no attentions")
else: else:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
......
...@@ -138,75 +138,75 @@ class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTeste ...@@ -138,75 +138,75 @@ class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTeste
self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features))
self.assertEqual(timm_model.channels, transformers_model.channels) self.assertEqual(timm_model.channels, transformers_model.channels)
@unittest.skip("TimmBackbone doesn't support feed forward chunking") @unittest.skip(reason="TimmBackbone doesn't support feed forward chunking")
def test_feed_forward_chunking(self): def test_feed_forward_chunking(self):
pass pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute") @unittest.skip(reason="TimmBackbone doesn't have num_hidden_layers attribute")
def test_hidden_states_output(self): def test_hidden_states_output(self):
pass pass
@unittest.skip("TimmBackbone initialization is managed on the timm side") @unittest.skip(reason="TimmBackbone initialization is managed on the timm side")
def test_initialization(self): def test_initialization(self):
pass pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds") @unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds") @unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds")
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint") @unittest.skip(reason="TimmBackbone model cannot be created without specifying a backbone checkpoint")
def test_from_pretrained_no_checkpoint(self): def test_from_pretrained_no_checkpoint(self):
pass pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone")
def test_save_load(self): def test_save_load(self):
pass pass
@unittest.skip("No support for low_cpu_mem_usage=True.") @unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage(self): def test_save_load_low_cpu_mem_usage(self):
pass pass
@unittest.skip("No support for low_cpu_mem_usage=True.") @unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_checkpoints(self): def test_save_load_low_cpu_mem_usage_checkpoints(self):
pass pass
@unittest.skip("No support for low_cpu_mem_usage=True.") @unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_no_safetensors(self): def test_save_load_low_cpu_mem_usage_no_safetensors(self):
pass pass
@unittest.skip("model weights aren't tied in TimmBackbone.") @unittest.skip(reason="model weights aren't tied in TimmBackbone.")
def test_tie_model_weights(self): def test_tie_model_weights(self):
pass pass
@unittest.skip("model weights aren't tied in TimmBackbone.") @unittest.skip(reason="model weights aren't tied in TimmBackbone.")
def test_tied_model_weights_key_ignore(self): def test_tied_model_weights_key_ignore(self):
pass pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone")
def test_load_save_without_tied_weights(self): def test_load_save_without_tied_weights(self):
pass pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone") @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone")
def test_model_weights_reload_no_missing_tied_weights(self): def test_model_weights_reload_no_missing_tied_weights(self):
pass pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration.") @unittest.skip(reason="TimmBackbone doesn't have hidden size info in its configuration.")
def test_channels(self): def test_channels(self):
pass pass
@unittest.skip("TimmBackbone doesn't support output_attentions.") @unittest.skip(reason="TimmBackbone doesn't support output_attentions.")
def test_torchscript_output_attentions(self): def test_torchscript_output_attentions(self):
pass pass
@unittest.skip("Safetensors is not supported by timm.") @unittest.skip(reason="Safetensors is not supported by timm.")
def test_can_use_safetensors(self): def test_can_use_safetensors(self):
pass pass
@unittest.skip("Need to use a timm backbone and there is no tiny model available.") @unittest.skip(reason="Need to use a timm backbone and there is no tiny model available.")
def test_model_is_small(self): def test_model_is_small(self):
pass pass
......
...@@ -170,15 +170,15 @@ class TrOCRStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, P ...@@ -170,15 +170,15 @@ class TrOCRStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, P
self.model_tester = TrOCRStandaloneDecoderModelTester(self, is_training=False) self.model_tester = TrOCRStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=TrOCRConfig) self.config_tester = ConfigTester(self, config_class=TrOCRConfig)
# not implemented currently @unittest.skip(reason="Not yet implemented")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# trocr has no base model @unittest.skip(reason="trocr has no base model")
def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_from_base(self):
pass pass
# trocr has no base model @unittest.skip(reason="trocr has no base model")
def test_save_load_fast_init_to_base(self): def test_save_load_fast_init_to_base(self):
pass pass
...@@ -189,10 +189,10 @@ class TrOCRStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, P ...@@ -189,10 +189,10 @@ class TrOCRStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, P
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
# decoder cannot keep gradients @unittest.skip(reason="Decoder cannot keep gradients")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
return return
@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :) @unittest.skip(reason="The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def test_left_padding_compatibility(self): def test_left_padding_compatibility(self):
pass pass
...@@ -320,7 +320,7 @@ class UdopModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -320,7 +320,7 @@ class UdopModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
@unittest.skip("Gradient checkpointing is not supported by this model") @unittest.skip(reason="Gradient checkpointing is not supported by this model")
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
......
...@@ -110,7 +110,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -110,7 +110,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
# this tokenizer # this tokenizer
def test_save_sentencepiece_tokenizer(self) -> None: def test_save_sentencepiece_tokenizer(self) -> None:
if not self.test_sentencepiece or not self.test_slow_tokenizer: if not self.test_sentencepiece or not self.test_slow_tokenizer:
return self.skipTest(reason="test_sentencepiece or test_slow_tokenizer is set to False")
# We want to verify that we will be able to save the tokenizer even if the original files that were used to # We want to verify that we will be able to save the tokenizer even if the original files that were used to
# build the tokenizer have been deleted in the meantime. # build the tokenizer have been deleted in the meantime.
words, boxes = self.get_words_and_boxes() words, boxes = self.get_words_and_boxes()
...@@ -687,7 +687,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -687,7 +687,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_padding_warning_message_fast_tokenizer(self): def test_padding_warning_message_fast_tokenizer(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
words, boxes = self.get_words_and_boxes_batch() words, boxes = self.get_words_and_boxes_batch()
...@@ -708,7 +708,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -708,7 +708,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
) )
if not self.test_slow_tokenizer: if not self.test_slow_tokenizer:
return self.skipTest(reason="test_slow_tokenizer is set to False")
tokenizer_slow = self.get_tokenizer() tokenizer_slow = self.get_tokenizer()
...@@ -817,7 +817,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -817,7 +817,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
encoded_sequences_batch_padded_2[key], encoded_sequences_batch_padded_2[key],
) )
@unittest.skip("batch_encode_plus does not handle overflowing tokens.") @unittest.skip(reason="batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self): def test_batch_encode_plus_overflowing_tokens(self):
pass pass
...@@ -878,7 +878,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -878,7 +878,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
for tokenizer in tokenizers: for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"): with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None: if tokenizer.pad_token is None:
self.skipTest("No padding token.") self.skipTest(reason="No padding token.")
else: else:
words, boxes = self.get_words_and_boxes() words, boxes = self.get_words_and_boxes()
...@@ -919,7 +919,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -919,7 +919,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_build_inputs_with_special_tokens(self): def test_build_inputs_with_special_tokens(self):
if not self.test_slow_tokenizer: if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions # as we don't have a slow version, we can't compare the outputs between slow and fast versions
return self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list: for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
...@@ -1008,7 +1008,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1008,7 +1008,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
shutil.rmtree(tmpdirname) shutil.rmtree(tmpdirname)
@unittest.skip("Not implemented") @unittest.skip(reason="Not implemented")
def test_right_and_left_truncation(self): def test_right_and_left_truncation(self):
pass pass
...@@ -1153,11 +1153,11 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1153,11 +1153,11 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
# Assert there is online added_tokens special_tokens # Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
@unittest.skip("Chat template tests don't play well with table/layout models.") @unittest.skip(reason="Chat template tests don't play well with table/layout models.")
def test_chat_template(self): def test_chat_template(self):
pass pass
@unittest.skip("Chat template tests don't play well with table/layout models.") @unittest.skip(reason="Chat template tests don't play well with table/layout models.")
def test_chat_template_batched(self): def test_chat_template_batched(self):
pass pass
...@@ -1174,13 +1174,13 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1174,13 +1174,13 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
for tokenizer in tokenizers: for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"): with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return self.skipTest(f"{tokenizer.__class__} not in MODEL_TOKENIZER_MAPPING")
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class() config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None: if config.is_encoder_decoder or config.pad_token_id is None:
return self.skipTest(reason="Model is an encoder-decoder or has no padding token set.")
model = model_class(config) model = model_class(config)
...@@ -1206,11 +1206,11 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1206,11 +1206,11 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self): def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
if not self.test_slow_tokenizer: if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions # as we don't have a slow version, we can't compare the outputs between slow and fast versions
return self.skipTest(reason="test_slow_tokenizer is set to False")
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer() rust_tokenizer = self.get_rust_tokenizer()
...@@ -1228,7 +1228,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1228,7 +1228,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_tokenization_python_rust_equals(self): def test_tokenization_python_rust_equals(self):
if not self.test_slow_tokenizer: if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions # as we don't have a slow version, we can't compare the outputs between slow and fast versions
return self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list: for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
...@@ -1282,7 +1282,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1282,7 +1282,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_embeded_special_tokens(self): def test_embeded_special_tokens(self):
if not self.test_slow_tokenizer: if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions # as we don't have a slow version, we can't compare the outputs between slow and fast versions
return self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list: for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
...@@ -1478,7 +1478,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1478,7 +1478,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_training_new_tokenizer(self): def test_training_new_tokenizer(self):
# This feature only exists for fast tokenizers # This feature only exists for fast tokenizers
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_rust_tokenizer() tokenizer = self.get_rust_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
...@@ -1515,7 +1515,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1515,7 +1515,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_training_new_tokenizer_with_special_tokens_change(self): def test_training_new_tokenizer_with_special_tokens_change(self):
# This feature only exists for fast tokenizers # This feature only exists for fast tokenizers
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_rust_tokenizer() tokenizer = self.get_rust_tokenizer()
# Test with a special tokens map # Test with a special tokens map
...@@ -1628,7 +1628,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1628,7 +1628,7 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_padding_different_model_input_name(self): def test_padding_different_model_input_name(self):
if not self.test_slow_tokenizer: if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions # as we don't have a slow version, we can't compare the outputs between slow and fast versions
return self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list: for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
...@@ -1722,27 +1722,27 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1722,27 +1722,27 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(len(tokens[key].shape), 3) self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-1], 4) self.assertEqual(tokens[key].shape[-1], 4)
@unittest.skip("TO DO: overwrite this very extensive test.") @unittest.skip(reason="TO DO: overwrite this very extensive test.")
def test_alignement_methods(self): def test_alignement_methods(self):
pass pass
@unittest.skip("UDOP tokenizer requires boxes besides sequences.") @unittest.skip(reason="UDOP tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_pair_input(self): def test_maximum_encoding_length_pair_input(self):
pass pass
@unittest.skip("UDOP tokenizer requires boxes besides sequences.") @unittest.skip(reason="UDOP tokenizer requires boxes besides sequences.")
def test_maximum_encoding_length_single_input(self): def test_maximum_encoding_length_single_input(self):
pass pass
@unittest.skip("UDOP tokenizer requires boxes besides sequences.") @unittest.skip(reason="UDOP tokenizer requires boxes besides sequences.")
def test_pretokenized_inputs(self): def test_pretokenized_inputs(self):
pass pass
@unittest.skip("UDOP tokenizer always expects pretokenized inputs.") @unittest.skip(reason="UDOP tokenizer always expects pretokenized inputs.")
def test_compare_pretokenized_inputs(self): def test_compare_pretokenized_inputs(self):
pass pass
@unittest.skip("UDOP fast tokenizer does not support prepare_for_model") @unittest.skip(reason="UDOP fast tokenizer does not support prepare_for_model")
def test_compare_prepare_for_model(self): def test_compare_prepare_for_model(self):
pass pass
...@@ -1863,15 +1863,15 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -1863,15 +1863,15 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertDictEqual(dict(encoding_p), expected_results) self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results) self.assertDictEqual(dict(encoding_r), expected_results)
@unittest.skip("Doesn't support another framework than PyTorch") @unittest.skip(reason="Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self): def test_np_encode_plus_sent_to_model(self):
pass pass
@unittest.skip("Doesn't use SentencePiece") @unittest.skip(reason="Doesn't use SentencePiece")
def test_sentencepiece_tokenize_and_convert_tokens_to_string(self): def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):
pass pass
@unittest.skip("Doesn't use SentencePiece") @unittest.skip(reason="Doesn't use SentencePiece")
def test_sentencepiece_tokenize_and_decode(self): def test_sentencepiece_tokenize_and_decode(self):
pass pass
......
...@@ -331,7 +331,7 @@ class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin ...@@ -331,7 +331,7 @@ class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False):
if not is_torch_fx_available() or not self.fx_compatible: if not is_torch_fx_available() or not self.fx_compatible:
return self.skipTest(reason="torch fx is not available or not compatible with this model")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.return_dict = False configs_no_init.return_dict = False
...@@ -483,7 +483,7 @@ class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin ...@@ -483,7 +483,7 @@ class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs) self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs)
@unittest.skip("Test has a segmentation fault on torch 1.8.0") @unittest.skip(reason="Test has a segmentation fault on torch 1.8.0")
def test_export_to_onnx(self): def test_export_to_onnx(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
model = UMT5Model(config_and_inputs[0]).to(torch_device) model = UMT5Model(config_and_inputs[0]).to(torch_device)
......
...@@ -354,21 +354,22 @@ class UniSpeechRobustModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.T ...@@ -354,21 +354,22 @@ class UniSpeechRobustModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.T
self.model_tester.check_labels_out_of_vocab(*config_and_inputs) self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# UniSpeech has no inputs_embeds # UniSpeech has no inputs_embeds
@unittest.skip(reason="UniSpeech has no inputs_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# `input_ids` is renamed to `input_values` # `input_ids` is renamed to `input_values`
@unittest.skip(reason="UniSpeech has no inputs_embeds")
def test_forward_signature(self): def test_forward_signature(self):
pass pass
# UniSpeech cannot resize token embeddings # UniSpeech cannot resize token embeddings
# since it has no tokens embeddings # since it has no tokens embeddings
@unittest.skip(reason="UniSpeech has no tokens embeds")
def test_resize_tokens_embeddings(self): def test_resize_tokens_embeddings(self):
pass pass
# UniSpeech has no inputs_embeds @unittest.skip(reason="UniSpeech has no inputs_embeds")
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
......
...@@ -403,22 +403,19 @@ class UniSpeechSatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Test ...@@ -403,22 +403,19 @@ class UniSpeechSatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Test
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs) self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# UniSpeechSat has no inputs_embeds @unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# `input_ids` is renamed to `input_values` @unittest.skip(reason="Model has input_values instead of input_ids")
def test_forward_signature(self): def test_forward_signature(self):
pass pass
# UniSpeechSat cannot resize token embeddings @unittest.skip(reason="Model has no tokens embeddings")
# since it has no tokens embeddings
def test_resize_tokens_embeddings(self): def test_resize_tokens_embeddings(self):
pass pass
# UniSpeechSat has no inputs_embeds @unittest.skip(reason="Model has no input_embeds")
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
...@@ -615,22 +612,19 @@ class UniSpeechSatRobustModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -615,22 +612,19 @@ class UniSpeechSatRobustModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs) self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# UniSpeechSat has no inputs_embeds @unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# `input_ids` is renamed to `input_values` @unittest.skip(reason="Model has input_values instead of input_ids")
def test_forward_signature(self): def test_forward_signature(self):
pass pass
# UniSpeechSat cannot resize token embeddings @unittest.skip(reason="Model has no tokens embeddings")
# since it has no tokens embeddings
def test_resize_tokens_embeddings(self): def test_resize_tokens_embeddings(self):
pass pass
# UniSpeechSat has no inputs_embeds @unittest.skip(reason="Model has no input_embeds")
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment