Unverified Commit 1de7dc74 authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Skip tests properly (#31308)

* Skip tests properly

* [test_all]

* Add 'reason' as kwarg for skipTest

* [test_all] Fix up

* [test_all]
parent 1f9f57ab
...@@ -1005,6 +1005,7 @@ class GPT2EncoderDecoderModelTest(EncoderDecoderMixin, unittest.TestCase): ...@@ -1005,6 +1005,7 @@ class GPT2EncoderDecoderModelTest(EncoderDecoderMixin, unittest.TestCase):
"google-bert/bert-base-cased", "openai-community/gpt2" "google-bert/bert-base-cased", "openai-community/gpt2"
) )
@unittest.skip
def test_encoder_decoder_model_shared_weights(self): def test_encoder_decoder_model_shared_weights(self):
pass pass
...@@ -1079,6 +1080,7 @@ class ProphetNetEncoderDecoderModelTest(EncoderDecoderMixin, unittest.TestCase): ...@@ -1079,6 +1080,7 @@ class ProphetNetEncoderDecoderModelTest(EncoderDecoderMixin, unittest.TestCase):
"google-bert/bert-large-uncased", "microsoft/prophetnet-large-uncased" "google-bert/bert-large-uncased", "microsoft/prophetnet-large-uncased"
) )
@unittest.skip
def test_encoder_decoder_model_shared_weights(self): def test_encoder_decoder_model_shared_weights(self):
pass pass
...@@ -1135,6 +1137,7 @@ class BartEncoderDecoderModelTest(EncoderDecoderMixin, unittest.TestCase): ...@@ -1135,6 +1137,7 @@ class BartEncoderDecoderModelTest(EncoderDecoderMixin, unittest.TestCase):
"google-bert/bert-large-uncased", "facebook/bart-large" "google-bert/bert-large-uncased", "facebook/bart-large"
) )
@unittest.skip
def test_encoder_decoder_model_shared_weights(self): def test_encoder_decoder_model_shared_weights(self):
pass pass
......
...@@ -577,9 +577,8 @@ class ErnieModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi ...@@ -577,9 +577,8 @@ class ErnieModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi
def test_torchscript_device_change(self): def test_torchscript_device_change(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
# ErnieForMultipleChoice behaves incorrectly in JIT environments.
if model_class == ErnieForMultipleChoice: if model_class == ErnieForMultipleChoice:
return self.skipTest(reason="ErnieForMultipleChoice behaves incorrectly in JIT environments.")
config.torchscript = True config.torchscript = True
model = model_class(config=config) model = model_class(config=config)
......
...@@ -290,11 +290,11 @@ class EsmModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -290,11 +290,11 @@ class EsmModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
self.assertEqual(position_ids.shape, expected_positions.shape) self.assertEqual(position_ids.shape, expected_positions.shape)
self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) self.assertTrue(torch.all(torch.eq(position_ids, expected_positions)))
@unittest.skip("Esm does not support embedding resizing") @unittest.skip(reason="Esm does not support embedding resizing")
def test_resize_embeddings_untied(self): def test_resize_embeddings_untied(self):
pass pass
@unittest.skip("Esm does not support embedding resizing") @unittest.skip(reason="Esm does not support embedding resizing")
def test_resize_tokens_embeddings(self): def test_resize_tokens_embeddings(self):
pass pass
......
...@@ -184,7 +184,7 @@ class EsmFoldModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -184,7 +184,7 @@ class EsmFoldModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip("Does not support attention outputs") @unittest.skip(reason="Does not support attention outputs")
def test_attention_outputs(self): def test_attention_outputs(self):
pass pass
...@@ -192,75 +192,77 @@ class EsmFoldModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -192,75 +192,77 @@ class EsmFoldModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
def test_correct_missing_keys(self): def test_correct_missing_keys(self):
pass pass
@unittest.skip("Esm does not support embedding resizing") @unittest.skip(reason="Esm does not support embedding resizing")
def test_resize_embeddings_untied(self): def test_resize_embeddings_untied(self):
pass pass
@unittest.skip("Esm does not support embedding resizing") @unittest.skip(reason="Esm does not support embedding resizing")
def test_resize_tokens_embeddings(self): def test_resize_tokens_embeddings(self):
pass pass
@unittest.skip("ESMFold does not support passing input embeds!") @unittest.skip(reason="ESMFold does not support passing input embeds!")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
@unittest.skip("ESMFold does not support head pruning.") @unittest.skip(reason="ESMFold does not support head pruning.")
def test_head_pruning(self): def test_head_pruning(self):
pass pass
@unittest.skip("ESMFold does not support head pruning.") @unittest.skip(reason="ESMFold does not support head pruning.")
def test_head_pruning_integration(self): def test_head_pruning_integration(self):
pass pass
@unittest.skip("ESMFold does not support head pruning.") @unittest.skip(reason="ESMFold does not support head pruning.")
def test_head_pruning_save_load_from_config_init(self): def test_head_pruning_save_load_from_config_init(self):
pass pass
@unittest.skip("ESMFold does not support head pruning.") @unittest.skip(reason="ESMFold does not support head pruning.")
def test_head_pruning_save_load_from_pretrained(self): def test_head_pruning_save_load_from_pretrained(self):
pass pass
@unittest.skip("ESMFold does not support head pruning.") @unittest.skip(reason="ESMFold does not support head pruning.")
def test_headmasking(self): def test_headmasking(self):
pass pass
@unittest.skip("ESMFold does not output hidden states in the normal way.") @unittest.skip(reason="ESMFold does not output hidden states in the normal way.")
def test_hidden_states_output(self): def test_hidden_states_output(self):
pass pass
@unittest.skip("ESMfold does not output hidden states in the normal way.") @unittest.skip(reason="ESMfold does not output hidden states in the normal way.")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
pass pass
@unittest.skip("ESMFold only has one output format.") @unittest.skip(reason="ESMFold only has one output format.")
def test_model_outputs_equivalence(self): def test_model_outputs_equivalence(self):
pass pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality") @unittest.skip(reason="This test doesn't work for ESMFold and doesn't test core functionality")
def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_from_base(self):
pass pass
@unittest.skip("ESMFold does not support input chunking.") @unittest.skip(reason="ESMFold does not support input chunking.")
def test_feed_forward_chunking(self): def test_feed_forward_chunking(self):
pass pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.") @unittest.skip(
reason="ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments."
)
def test_initialization(self): def test_initialization(self):
pass pass
@unittest.skip("ESMFold doesn't support torchscript compilation.") @unittest.skip(reason="ESMFold doesn't support torchscript compilation.")
def test_torchscript_output_attentions(self): def test_torchscript_output_attentions(self):
pass pass
@unittest.skip("ESMFold doesn't support torchscript compilation.") @unittest.skip(reason="ESMFold doesn't support torchscript compilation.")
def test_torchscript_output_hidden_state(self): def test_torchscript_output_hidden_state(self):
pass pass
@unittest.skip("ESMFold doesn't support torchscript compilation.") @unittest.skip(reason="ESMFold doesn't support torchscript compilation.")
def test_torchscript_simple(self): def test_torchscript_simple(self):
pass pass
@unittest.skip("ESMFold doesn't support data parallel.") @unittest.skip(reason="ESMFold doesn't support data parallel.")
def test_multi_gpu_data_parallel_forward(self): def test_multi_gpu_data_parallel_forward(self):
pass pass
......
...@@ -381,7 +381,7 @@ class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix ...@@ -381,7 +381,7 @@ class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix
# If it doesn't support cache, pass the test # If it doesn't support cache, pass the test
if not hasattr(config, "use_cache"): if not hasattr(config, "use_cache"):
return self.skipTest(reason="Model does not support cache")
model = model_class(config).to(torch_device) model = model_class(config).to(torch_device)
if "use_cache" not in inputs: if "use_cache" not in inputs:
...@@ -390,7 +390,7 @@ class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix ...@@ -390,7 +390,7 @@ class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs: if "past_key_values" not in outputs:
return self.skipTest(reason="Model does not return past_key_values")
num_hidden_layers = ( num_hidden_layers = (
getattr(config, "decoder_layers", None) getattr(config, "decoder_layers", None)
......
...@@ -174,7 +174,7 @@ class FastSpeech2ConformerTokenizerTest(TokenizerTesterMixin, unittest.TestCase) ...@@ -174,7 +174,7 @@ class FastSpeech2ConformerTokenizerTest(TokenizerTesterMixin, unittest.TestCase)
def test_convert_tokens_to_string_format(self): def test_convert_tokens_to_string_format(self):
pass pass
@unittest.skip("FastSpeech2Conformer tokenizer does not support pairs.") @unittest.skip(reason="FastSpeech2Conformer tokenizer does not support pairs.")
def test_maximum_encoding_length_pair_input(self): def test_maximum_encoding_length_pair_input(self):
pass pass
......
...@@ -477,7 +477,7 @@ class FlaubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase ...@@ -477,7 +477,7 @@ class FlaubertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments. # FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice: if model_class == FlaubertForMultipleChoice:
return self.skipTest(reason="FlauBertForMultipleChoice behaves incorrectly in JIT environments.")
config.torchscript = True config.torchscript = True
model = model_class(config=config) model = model_class(config=config)
......
...@@ -176,8 +176,8 @@ class FlavaImageModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -176,8 +176,8 @@ class FlavaImageModelTest(ModelTesterMixin, unittest.TestCase):
def test_config(self): def test_config(self):
self.config_tester.run_common_tests() self.config_tester.run_common_tests()
@unittest.skip("Flava does not use input_ids")
def test_inputs_embeds(self): def test_inputs_embeds(self):
# FLAVA does not use inputs_embeds
pass pass
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
...@@ -300,9 +300,11 @@ class FlavaImageModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -300,9 +300,11 @@ class FlavaImageModelTest(ModelTesterMixin, unittest.TestCase):
check_hidden_states_output(inputs_dict, config, model_class) check_hidden_states_output(inputs_dict, config, model_class)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -318,13 +320,13 @@ class FlavaImageModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -318,13 +320,13 @@ class FlavaImageModelTest(ModelTesterMixin, unittest.TestCase):
def test_training_gradient_checkpointing_use_reentrant_false(self): def test_training_gradient_checkpointing_use_reentrant_false(self):
pass pass
# skip this test as FlavaImageModel has no base class and is @unittest.skip(reason="FlavaImageModel has no base class and is not available in MODEL_MAPPING")
# not available in MODEL_MAPPING
def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_from_base(self):
pass pass
# skip this test as FlavaImageModel has no base class and is # skip this test as FlavaImageModel has no base class and is
# not available in MODEL_MAPPING # not available in MODEL_MAPPING
@unittest.skip(reason="FlavaImageModel has no base class and is not available in MODEL_MAPPING")
def test_save_load_fast_init_to_base(self): def test_save_load_fast_init_to_base(self):
pass pass
...@@ -459,9 +461,11 @@ class FlavaTextModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -459,9 +461,11 @@ class FlavaTextModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -477,17 +481,16 @@ class FlavaTextModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -477,17 +481,16 @@ class FlavaTextModelTest(ModelTesterMixin, unittest.TestCase):
def test_training_gradient_checkpointing_use_reentrant_false(self): def test_training_gradient_checkpointing_use_reentrant_false(self):
pass pass
@unittest.skip(reason="FLAVA does not use input_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
# FLAVA does not use inputs_embeds # FLAVA does not use inputs_embeds
pass pass
# skip this test as FlavaTextModel has no base class and is @unittest.skip(reason="FlavaTextModel has no base class and is not available in MODEL_MAPPING")
# not available in MODEL_MAPPING
def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_from_base(self):
pass pass
# skip this test as FlavaTextModel has no base class and is @unittest.skip(reason="FlavaTextModel has no base class and is not available in MODEL_MAPPING")
# not available in MODEL_MAPPING
def test_save_load_fast_init_to_base(self): def test_save_load_fast_init_to_base(self):
pass pass
...@@ -619,13 +622,15 @@ class FlavaMultimodalModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -619,13 +622,15 @@ class FlavaMultimodalModelTest(ModelTesterMixin, unittest.TestCase):
expected_arg_names = ["hidden_states"] expected_arg_names = ["hidden_states"]
self.assertListEqual(arg_names[:1], expected_arg_names) self.assertListEqual(arg_names[:1], expected_arg_names)
@unittest.skip("FLAVA does not have input embeddings")
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
# No embedding in multimodal model
pass pass
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -641,17 +646,15 @@ class FlavaMultimodalModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -641,17 +646,15 @@ class FlavaMultimodalModelTest(ModelTesterMixin, unittest.TestCase):
def test_training_gradient_checkpointing_use_reentrant_false(self): def test_training_gradient_checkpointing_use_reentrant_false(self):
pass pass
@unittest.skip(reason="FLAVA does not use input_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
# FLAVA does not use inputs_embeds
pass pass
# skip this test as FlavaMultimodalModel has no base class and is @unittest.skip(reason="FlavaMultimodalModel has no base class and is not available in MODEL_MAPPING")
# not available in MODEL_MAPPING
def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_from_base(self):
pass pass
# skip this test as FlavaMultimodalModel has no base class and is @unittest.skip(reason="FlavaMultimodalModel has no base class and is not available in MODEL_MAPPING")
# not available in MODEL_MAPPING
def test_save_load_fast_init_to_base(self): def test_save_load_fast_init_to_base(self):
pass pass
...@@ -742,20 +745,23 @@ class FlavaImageCodebookTest(ModelTesterMixin, unittest.TestCase): ...@@ -742,20 +745,23 @@ class FlavaImageCodebookTest(ModelTesterMixin, unittest.TestCase):
def test_attention_outputs(self): def test_attention_outputs(self):
pass pass
@unittest.skip(reason="No embedding in multimodal model")
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
# No embedding in multimodal model
pass pass
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_hidden_states_output(self): def test_hidden_states_output(self):
pass pass
@unittest.skip(reason="FlavaImageCodebook has no attentions")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
# no attentions
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
...@@ -771,20 +777,19 @@ class FlavaImageCodebookTest(ModelTesterMixin, unittest.TestCase): ...@@ -771,20 +777,19 @@ class FlavaImageCodebookTest(ModelTesterMixin, unittest.TestCase):
def test_training_gradient_checkpointing_use_reentrant_false(self): def test_training_gradient_checkpointing_use_reentrant_false(self):
pass pass
@unittest.skip(reason="FLAVA does not use input_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
# FLAVA does not use inputs_embeds
pass pass
@unittest.skip
def test_model_outputs_equivalence(self): def test_model_outputs_equivalence(self):
pass pass
# skip this test as FlavaImageCodebook has no base class and is @unittest.skip(reason="FlavaImageCodebook has no base class and is not available in MODEL_MAPPING")
# not available in MODEL_MAPPING
def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_from_base(self):
pass pass
# skip this test as FlavaImageCodebook has no base class and is @unittest.skip(reason="FlavaImageCodebook has no base class and is not available in MODEL_MAPPING")
# not available in MODEL_MAPPING
def test_save_load_fast_init_to_base(self): def test_save_load_fast_init_to_base(self):
pass pass
...@@ -931,19 +936,19 @@ class FlavaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -931,19 +936,19 @@ class FlavaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
# hidden_states are tested in individual model tests @unittest.skip(reason="tested in individual model tests")
def test_hidden_states_output(self): def test_hidden_states_output(self):
pass pass
# input_embeds are tested in individual model tests @unittest.skip(reason="tested in individual model tests")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# tested in individual model tests @unittest.skip(reason="tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
pass pass
# FlavaModel does not have input/output embeddings @unittest.skip(reason="FlavaModel does not have input/output embeddings")
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
...@@ -973,7 +978,7 @@ class FlavaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -973,7 +978,7 @@ class FlavaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict): def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript: if not self.test_torchscript:
return self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True configs_no_init.torchscript = True
......
...@@ -321,6 +321,7 @@ class FNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -321,6 +321,7 @@ class FNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
return inputs_dict return inputs_dict
# Overriden Tests # Overriden Tests
@unittest.skip
def test_attention_outputs(self): def test_attention_outputs(self):
pass pass
......
...@@ -69,7 +69,7 @@ class FNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -69,7 +69,7 @@ class FNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self): def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer() rust_tokenizer = self.get_rust_tokenizer()
...@@ -194,7 +194,7 @@ class FNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -194,7 +194,7 @@ class FNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_padding(self, max_length=50): def test_padding(self, max_length=50):
if not self.test_slow_tokenizer: if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions # as we don't have a slow version, we can't compare the outputs between slow and fast versions
return self.skipTest(reason="test_slow_tokenizer is set to False")
for tokenizer, pretrained_name, kwargs in self.tokenizers_list: for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
......
...@@ -263,7 +263,7 @@ class FSMTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin ...@@ -263,7 +263,7 @@ class FSMTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], []) self.assertEqual(info["missing_keys"], [])
@unittest.skip("Test has a segmentation fault on torch 1.8.0") @unittest.skip(reason="Test has a segmentation fault on torch 1.8.0")
def test_export_to_onnx(self): def test_export_to_onnx(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs() config, inputs_dict = self.model_tester.prepare_config_and_inputs()
model = FSMTModel(config).to(torch_device) model = FSMTModel(config).to(torch_device)
...@@ -312,23 +312,23 @@ class FSMTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin ...@@ -312,23 +312,23 @@ class FSMTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
2, 2,
) )
@unittest.skip("can't be implemented for FSMT due to dual vocab.") @unittest.skip(reason="can't be implemented for FSMT due to dual vocab.")
def test_resize_tokens_embeddings(self): def test_resize_tokens_embeddings(self):
pass pass
@unittest.skip("Passing inputs_embeds not implemented for FSMT.") @unittest.skip(reason="Passing inputs_embeds not implemented for FSMT.")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
@unittest.skip("Input ids is required for FSMT.") @unittest.skip(reason="Input ids is required for FSMT.")
def test_inputs_embeds_matches_input_ids(self): def test_inputs_embeds_matches_input_ids(self):
pass pass
@unittest.skip("model weights aren't tied in FSMT.") @unittest.skip(reason="model weights aren't tied in FSMT.")
def test_tie_model_weights(self): def test_tie_model_weights(self):
pass pass
@unittest.skip("TODO: Decoder embeddings cannot be resized at the moment") @unittest.skip(reason="TODO: Decoder embeddings cannot be resized at the moment")
def test_resize_embeddings_untied(self): def test_resize_embeddings_untied(self):
pass pass
...@@ -582,7 +582,7 @@ class TestSinusoidalPositionalEmbeddings(unittest.TestCase): ...@@ -582,7 +582,7 @@ class TestSinusoidalPositionalEmbeddings(unittest.TestCase):
# odd num_embeddings is allowed # odd num_embeddings is allowed
SinusoidalPositionalEmbedding(num_positions=5, embedding_dim=4, padding_idx=self.padding_idx).to(torch_device) SinusoidalPositionalEmbedding(num_positions=5, embedding_dim=4, padding_idx=self.padding_idx).to(torch_device)
@unittest.skip("different from marian (needs more research)") @unittest.skip(reason="different from marian (needs more research)")
def test_positional_emb_weights_against_marian(self): def test_positional_emb_weights_against_marian(self):
desired_weights = torch.tensor( desired_weights = torch.tensor(
[ [
......
...@@ -160,10 +160,10 @@ class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -160,10 +160,10 @@ class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
expected = ["us", "a</w>", "is</w>", "un", "i", "ted</w>", "st", "ates</w>", "of</w>", "am", "er", "ica</w>"] expected = ["us", "a</w>", "is</w>", "un", "i", "ted</w>", "st", "ates</w>", "of</w>", "am", "er", "ica</w>"]
self.assertListEqual(tokens, expected) self.assertListEqual(tokens, expected)
@unittest.skip("FSMTConfig.__init__ requires non-optional args") @unittest.skip(reason="FSMTConfig.__init__ requires non-optional args")
def test_torch_encode_plus_sent_to_model(self): def test_torch_encode_plus_sent_to_model(self):
pass pass
@unittest.skip("FSMTConfig.__init__ requires non-optional args") @unittest.skip(reason="FSMTConfig.__init__ requires non-optional args")
def test_np_encode_plus_sent_to_model(self): def test_np_encode_plus_sent_to_model(self):
pass pass
...@@ -295,17 +295,17 @@ class FuyuModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -295,17 +295,17 @@ class FuyuModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
pass pass
# TODO: Fix me (once this model gets more usage) # TODO: Fix me (once this model gets more usage)
@unittest.skip("Does not work on the tiny model.") @unittest.skip(reason="Does not work on the tiny model.")
def test_disk_offload_bin(self): def test_disk_offload_bin(self):
super().test_disk_offload() super().test_disk_offload()
# TODO: Fix me (once this model gets more usage) # TODO: Fix me (once this model gets more usage)
@unittest.skip("Does not work on the tiny model.") @unittest.skip(reason="Does not work on the tiny model.")
def test_disk_offload_safetensors(self): def test_disk_offload_safetensors(self):
super().test_disk_offload() super().test_disk_offload()
# TODO: Fix me (once this model gets more usage) # TODO: Fix me (once this model gets more usage)
@unittest.skip("Does not work on the tiny model.") @unittest.skip(reason="Does not work on the tiny model.")
def test_model_parallelism(self): def test_model_parallelism(self):
super().test_model_parallelism() super().test_model_parallelism()
......
...@@ -398,11 +398,11 @@ class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi ...@@ -398,11 +398,11 @@ class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi
(self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels), (self.model_tester.batch_size, self.model_tester.seq_length, self.model_tester.num_labels),
) )
@unittest.skip("Gemma buffers include complex numbers, which breaks this test") @unittest.skip(reason="Gemma buffers include complex numbers, which breaks this test")
def test_save_load_fast_init_from_base(self): def test_save_load_fast_init_from_base(self):
pass pass
@unittest.skip("Gemma uses GQA on all models so the KV cache is a non standard format") @unittest.skip(reason="Gemma uses GQA on all models so the KV cache is a non standard format")
def test_past_key_values_format(self): def test_past_key_values_format(self):
pass pass
...@@ -456,7 +456,7 @@ class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi ...@@ -456,7 +456,7 @@ class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi
@pytest.mark.flash_attn_test @pytest.mark.flash_attn_test
@slow @slow
def test_flash_attn_2_inference_equivalence_right_padding(self): def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest("Gemma flash attention does not support right padding") self.skipTest(reason="Gemma flash attention does not support right padding")
@require_torch_sdpa @require_torch_sdpa
@require_torch_gpu @require_torch_gpu
...@@ -464,7 +464,7 @@ class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi ...@@ -464,7 +464,7 @@ class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi
def test_sdpa_equivalence(self): def test_sdpa_equivalence(self):
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if not model_class._supports_sdpa: if not model_class._supports_sdpa:
return self.skipTest(reason="Model does not support SDPA")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config) model = model_class(config)
...@@ -498,7 +498,7 @@ class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi ...@@ -498,7 +498,7 @@ class GemmaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi
def test_flash_attn_2_equivalence(self): def test_flash_attn_2_equivalence(self):
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
if not model_class._supports_flash_attn_2: if not model_class._supports_flash_attn_2:
return self.skipTest(reason="Model does not support Flash Attention 2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config) model = model_class(config)
...@@ -749,7 +749,7 @@ class GemmaIntegrationTest(unittest.TestCase): ...@@ -749,7 +749,7 @@ class GemmaIntegrationTest(unittest.TestCase):
self.assertEqual(output_text, EXPECTED_TEXTS) self.assertEqual(output_text, EXPECTED_TEXTS)
@unittest.skip("The test will not fit our CI runners") @unittest.skip(reason="The test will not fit our CI runners")
@require_read_token @require_read_token
def test_model_7b_fp32(self): def test_model_7b_fp32(self):
model_id = "google/gemma-7b" model_id = "google/gemma-7b"
...@@ -877,7 +877,7 @@ class GemmaIntegrationTest(unittest.TestCase): ...@@ -877,7 +877,7 @@ class GemmaIntegrationTest(unittest.TestCase):
# `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2 # `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2
# work as intended. See https://github.com/pytorch/pytorch/issues/121943 # work as intended. See https://github.com/pytorch/pytorch/issues/121943
if version.parse(torch.__version__) < version.parse("2.3.0"): if version.parse(torch.__version__) < version.parse("2.3.0"):
self.skipTest("This test requires torch >= 2.3 to run.") self.skipTest(reason="This test requires torch >= 2.3 to run.")
NUM_TOKENS_TO_GENERATE = 40 NUM_TOKENS_TO_GENERATE = 40
# Note on `EXPECTED_TEXT_COMPLETION`'s diff: the current value matches the original test if the original test # Note on `EXPECTED_TEXT_COMPLETION`'s diff: the current value matches the original test if the original test
......
...@@ -23,7 +23,6 @@ from transformers import ( ...@@ -23,7 +23,6 @@ from transformers import (
AddedToken, AddedToken,
GemmaTokenizer, GemmaTokenizer,
GemmaTokenizerFast, GemmaTokenizerFast,
is_torch_available,
) )
from transformers.convert_slow_tokenizer import convert_slow_tokenizer from transformers.convert_slow_tokenizer import convert_slow_tokenizer
from transformers.testing_utils import ( from transformers.testing_utils import (
...@@ -43,10 +42,6 @@ from ...test_tokenization_common import TokenizerTesterMixin ...@@ -43,10 +42,6 @@ from ...test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
pass
@require_sentencepiece @require_sentencepiece
@require_tokenizers @require_tokenizers
class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
...@@ -68,7 +63,7 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -68,7 +63,7 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
@require_torch @require_torch
def test_batch_tokenization(self): def test_batch_tokenization(self):
if not self.test_seq2seq: if not self.test_seq2seq:
return self.skipTest(reason="test_seq2seq is set to False")
tokenizers = self.get_tokenizers() tokenizers = self.get_tokenizers()
for tokenizer in tokenizers: for tokenizer in tokenizers:
...@@ -88,7 +83,7 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -88,7 +83,7 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
return_tensors="pt", return_tensors="pt",
) )
except NotImplementedError: except NotImplementedError:
return self.skipTest(reason="Encountered NotImplementedError when calling tokenizer")
self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.input_ids.shape[1], 3)
# max_target_length will default to max_length if not specified # max_target_length will default to max_length if not specified
batch = tokenizer(text, max_length=3, return_tensors="pt") batch = tokenizer(text, max_length=3, return_tensors="pt")
...@@ -99,7 +94,7 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -99,7 +94,7 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", batch_encoder_only) self.assertNotIn("decoder_input_ids", batch_encoder_only)
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece.") @unittest.skip(reason="Unfortunately way too slow to build a BPE with SentencePiece.")
def test_save_slow_from_fast_and_reload_fast(self): def test_save_slow_from_fast_and_reload_fast(self):
pass pass
...@@ -147,15 +142,15 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -147,15 +142,15 @@ class GemmaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
padding=False, padding=False,
) )
@unittest.skip("worker 'gw4' crashed on CI, passing locally.") @unittest.skip(reason="worker 'gw4' crashed on CI, passing locally.")
def test_pickle_subword_regularization_tokenizer(self): def test_pickle_subword_regularization_tokenizer(self):
pass pass
@unittest.skip("worker 'gw4' crashed on CI, passing locally.") @unittest.skip(reason="worker 'gw4' crashed on CI, passing locally.")
def test_subword_regularization_tokenizer(self): def test_subword_regularization_tokenizer(self):
pass pass
@unittest.skip("Skipping") @unittest.skip(reason="Skipping")
def test_torch_encode_plus_sent_to_model(self): def test_torch_encode_plus_sent_to_model(self):
pass pass
...@@ -227,7 +222,7 @@ class GemmaIntegrationTest(unittest.TestCase): ...@@ -227,7 +222,7 @@ class GemmaIntegrationTest(unittest.TestCase):
self.tokenizer.add_eos_token = False self.tokenizer.add_eos_token = False
self.rust_tokenizer.add_eos_token = False self.rust_tokenizer.add_eos_token = False
@unittest.skip("Not super important and always failing. Let's skip it") @unittest.skip(reason="Not super important and always failing. Let's skip it")
@slow @slow
def test_conversion(self): def test_conversion(self):
# This is excruciatingly slow since it has to recreate the entire merge # This is excruciatingly slow since it has to recreate the entire merge
......
...@@ -167,9 +167,11 @@ class GitVisionModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -167,9 +167,11 @@ class GitVisionModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self): def test_training(self):
pass pass
@unittest.skip
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
pass pass
......
...@@ -168,11 +168,11 @@ class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -168,11 +168,11 @@ class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs) self.model_tester.create_and_check_for_depth_estimation(*config_and_inputs)
@unittest.skip("GLPN does not use inputs_embeds") @unittest.skip(reason="GLPN does not use inputs_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
@unittest.skip("GLPN does not have get_input_embeddings method and get_output_embeddings methods") @unittest.skip(reason="GLPN does not have get_input_embeddings method and get_output_embeddings methods")
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
...@@ -283,7 +283,7 @@ class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -283,7 +283,7 @@ class GLPNModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def test_training(self): def test_training(self):
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True config.return_dict = True
......
...@@ -98,7 +98,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -98,7 +98,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self): def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
...@@ -126,6 +126,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -126,6 +126,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19] input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@unittest.skip
def test_pretokenized_inputs(self, *args, **kwargs): def test_pretokenized_inputs(self, *args, **kwargs):
# It's very difficult to mix/test pretokenization with byte-level # It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
...@@ -247,7 +248,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -247,7 +248,7 @@ class GPT2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertTrue(decode_s.startswith(bos_token)) self.assertTrue(decode_s.startswith(bos_token))
self.assertTrue(all(d.startswith(bos_token) for d in decode_s2)) self.assertTrue(all(d.startswith(bos_token) for d in decode_s2))
# tokenizer has no padding token @unittest.skip(reason="tokenizer has no padding token")
def test_padding_different_model_input_name(self): def test_padding_different_model_input_name(self):
pass pass
...@@ -331,7 +332,7 @@ class OPTTokenizationTest(unittest.TestCase): ...@@ -331,7 +332,7 @@ class OPTTokenizationTest(unittest.TestCase):
# Same as above # Same as above
self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758]) self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758])
@unittest.skip("This test is failing because of a bug in the fast tokenizer") @unittest.skip(reason="This test is failing because of a bug in the fast tokenizer")
def test_users_can_modify_bos(self): def test_users_can_modify_bos(self):
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True) tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True)
......
...@@ -458,27 +458,27 @@ class GPTBigCodeModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste ...@@ -458,27 +458,27 @@ class GPTBigCodeModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
def test_config(self): def test_config(self):
self.config_tester.run_common_tests() self.config_tester.run_common_tests()
@unittest.skip("MQA models does not support retain_grad") @unittest.skip(reason="MQA models does not support retain_grad")
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
pass pass
@unittest.skip("Contrastive search not supported due to non-standard caching mechanism") @unittest.skip(reason="Contrastive search not supported due to non-standard caching mechanism")
def test_contrastive_generate(self): def test_contrastive_generate(self):
pass pass
@unittest.skip("Contrastive search not supported due to non-standard caching mechanism") @unittest.skip(reason="Contrastive search not supported due to non-standard caching mechanism")
def test_contrastive_generate_dict_outputs_use_cache(self): def test_contrastive_generate_dict_outputs_use_cache(self):
pass pass
@unittest.skip("CPU offload seems to be broken for some reason - tiny models keep hitting corner cases") @unittest.skip(reason="CPU offload seems to be broken for some reason - tiny models keep hitting corner cases")
def test_cpu_offload(self): def test_cpu_offload(self):
pass pass
@unittest.skip("Disk offload seems to be broken for some reason - tiny models keep hitting corner cases") @unittest.skip(reason="Disk offload seems to be broken for some reason - tiny models keep hitting corner cases")
def test_disk_offload(self): def test_disk_offload(self):
pass pass
@unittest.skip("BigCodeGPT has a non-standard KV cache format.") @unittest.skip(reason="BigCodeGPT has a non-standard KV cache format.")
def test_past_key_values_format(self): def test_past_key_values_format(self):
pass pass
......
...@@ -128,10 +128,11 @@ class GPTNeoXJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -128,10 +128,11 @@ class GPTNeoXJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
assert encoded_sentence == ids_1 assert encoded_sentence == ids_1
assert encoded_pair == ids_1 + ids_2 assert encoded_pair == ids_1 + ids_2
@unittest.skip
def test_conversion_reversible(self): def test_conversion_reversible(self):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese # Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass pass
@unittest.skip(reason="tokenizer has no padding token")
def test_padding_different_model_input_name(self): def test_padding_different_model_input_name(self):
# tokenizer has no padding token
pass pass
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment