Unverified Commit 1de7dc74 authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Skip tests properly (#31308)

* Skip tests properly

* [test_all]

* Add 'reason' as kwarg for skipTest

* [test_all] Fix up

* [test_all]
parent 1f9f57ab
......@@ -245,7 +245,7 @@ class VideoMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase
def test_attention_outputs(self):
if not self.has_attentions:
pass
self.skipTest(reason="Model does not have attentions")
else:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
......
......@@ -272,7 +272,7 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def test_training(self):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False.")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
......@@ -296,7 +296,7 @@ class ViltModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training:
return
self.skipTest(reason="model_tester.is_training is set to False.")
for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
......@@ -547,11 +547,11 @@ class ViltForImagesAndTextClassificationModelTest(ViltModelTest, unittest.TestCa
self.model_tester = ViltModelTester(self, modality_type_vocab_size=3, add_multiple_images=True, num_images=2)
self.config_tester = ConfigTester(self, config_class=ViltConfig, hidden_size=37)
@unittest.skip("We only test the model that takes in multiple images")
@unittest.skip(reason="We only test the model that takes in multiple images")
def test_model(self):
pass
@unittest.skip("We only test the model that takes in multiple images")
@unittest.skip(reason="We only test the model that takes in multiple images")
def test_for_token_classification(self):
pass
......
......@@ -642,7 +642,7 @@ class Swin2BartModelTest(EncoderDecoderMixin, unittest.TestCase):
(decoder_config.num_attention_heads, cross_attention_input_seq_len, encoder_seq_len),
)
# there are no published pretrained BART-causal checkpoints for now
@unittest.skip(reason="There are no published pretrained BART-causal checkpoints for now")
def test_real_model_save_load_from_pretrained(self):
pass
......@@ -677,7 +677,7 @@ class ViT2TrOCR(EncoderDecoderMixin, unittest.TestCase):
"labels": decoder_input_ids,
}
# there are no published pretrained TrOCR checkpoints for now
@unittest.skip(reason="There are no published pretrained TrOCR checkpoints for now")
def test_real_model_save_load_from_pretrained(self):
pass
......@@ -799,7 +799,7 @@ class LayoutLMv32TrOCR(EncoderDecoderMixin, unittest.TestCase):
)
self.assertEqual(generated_output.shape, (pixel_values.shape[0],) + (decoder_config.max_length,))
@unittest.skip("There are no published pretrained TrOCR checkpoints for now")
@unittest.skip(reason="There are no published pretrained TrOCR checkpoints for now")
def test_real_model_save_load_from_pretrained(self):
pass
......
......@@ -429,7 +429,7 @@ class DeiTRobertaModelTest(VisionTextDualEncoderMixin, unittest.TestCase):
"text_choice_labels": choice_labels,
}
# skip as DeiT is not available in Flax
@unittest.skip(reason="DeiT is not available in Flax")
def test_pt_flax_equivalence(self):
pass
......
......@@ -179,21 +179,21 @@ class VitDetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
super().test_initialization()
# TODO: Fix me (once this model gets more usage)
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases.")
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_cpu_offload(self):
super().test_cpu_offload()
# TODO: Fix me (once this model gets more usage)
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases.")
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_disk_offload_bin(self):
super().test_disk_offload()
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases.")
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_disk_offload_safetensors(self):
super().test_disk_offload()
# TODO: Fix me (once this model gets more usage)
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases.")
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_model_parallelism(self):
super().test_model_parallelism()
......
......@@ -181,7 +181,7 @@ class VitsModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def test_pipeline_feature_extraction(self):
super().test_pipeline_feature_extraction()
@unittest.skip("Need to fix this after #26538")
@unittest.skip(reason="Need to fix this after #26538")
def test_model_forward(self):
set_seed(12345)
global_rng.seed(12345)
......@@ -212,11 +212,11 @@ class VitsModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class)).waveform
@unittest.skip("VITS is not deterministic")
@unittest.skip(reason="VITS is not deterministic")
def test_determinism(self):
pass
@unittest.skip("VITS is not deterministic")
@unittest.skip(reason="VITS is not deterministic")
def test_batching_equivalence(self):
pass
......@@ -260,11 +260,11 @@ class VitsModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
)
@unittest.skip("VITS has no inputs_embeds")
@unittest.skip(reason="VITS has no inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip("VITS has no input embeddings")
@unittest.skip(reason="VITS has no input embeddings")
def test_model_get_set_embeddings(self):
pass
......
......@@ -62,15 +62,15 @@ class VitsTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
ids = tokenizer.encode(txt, add_special_tokens=False)
return txt, ids
@unittest.skip("Adding multicharacter tokens does not work with the VITS tokenizer")
@unittest.skip(reason="Adding multicharacter tokens does not work with the VITS tokenizer")
def test_add_tokens_tokenizer(self):
pass
@unittest.skip("Adding multicharacter tokens does not work with the VITS tokenizer")
@unittest.skip(reason="Adding multicharacter tokens does not work with the VITS tokenizer")
def test_encode_decode_with_spaces(self):
pass
@unittest.skip("The VITS tokenizer does not support `is_split_into_words`")
@unittest.skip(reason="The VITS tokenizer does not support `is_split_into_words`")
def test_pretokenized_inputs(self):
pass
......@@ -101,7 +101,7 @@ class VitsTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
shutil.rmtree(tmpdirname)
@unittest.skip("Adding multicharacter tokens does not work the VITS tokenizer")
@unittest.skip(reason="Adding multicharacter tokens does not work the VITS tokenizer")
def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self):
pass
......
......@@ -553,32 +553,29 @@ class Wav2Vec2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# Wav2Vec2 has no inputs_embeds
@unittest.skip(reason="Model has no inputs_embeds")
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
@unittest.skip(reason="Model has input_values instead of input_ids")
def test_forward_signature(self):
pass
# Wav2Vec2 cannot resize token embeddings
# since it has no tokens embeddings
@unittest.skip(reason="Model has no tokens embeds")
def test_resize_tokens_embeddings(self):
pass
# Wav2Vec2 has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
@unittest.skip(reason="Model has no inputs_embeds")
def test_model_get_set_embeddings(self):
pass
@is_pt_flax_cross_test
# non-robust architecture does not exist in Flax
@unittest.skip(reason="Non-rubst architecture does not exist in Flax")
def test_equivalence_flax_to_pt(self):
pass
@is_pt_flax_cross_test
# non-robust architecture does not exist in Flax
@unittest.skip(reason="Non-rubst architecture does not exist in Flax")
def test_equivalence_pt_to_flax(self):
pass
......@@ -729,10 +726,10 @@ class Wav2Vec2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase
# Wav2Vec2 cannot be torchscripted because of group norm.
def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False):
# TODO: fix it
self.skipTest("torch 2.1 breaks torch fx tests for wav2vec2/hubert.")
self.skipTest(reason="torch 2.1 breaks torch fx tests for wav2vec2/hubert.")
if not is_torch_fx_available() or not self.fx_compatible:
return
self.skipTest(reason="torch fx not available or not compatible with this model")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.return_dict = False
......@@ -907,22 +904,19 @@ class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# Wav2Vec2 has no inputs_embeds
@unittest.skip(reason="Model has no input_embeds")
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
@unittest.skip(reason="Model has input_values instead of input_ids")
def test_forward_signature(self):
pass
# Wav2Vec2 cannot resize token embeddings
# since it has no tokens embeddings
@unittest.skip(reason="Model has no token embeddings")
def test_resize_tokens_embeddings(self):
pass
# Wav2Vec2 has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
@unittest.skip(reason="Model has no input_embeds")
def test_model_get_set_embeddings(self):
pass
......
......@@ -762,11 +762,11 @@ class Wav2Vec2CTCTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-3], tokenizer.pad_token_id)
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
@unittest.skip(reason="The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def test_tf_encode_plus_sent_to_model(self):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
@unittest.skip(reason="The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def test_torch_encode_plus_sent_to_model(self):
pass
......
......@@ -512,32 +512,29 @@ class Wav2Vec2ConformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# Wav2Vec2Conformer has no inputs_embeds
@unittest.skip(reason="Wav2Vec2Conformer has not inputs_embeds")
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
@unittest.skip(reason="Wav2Vec2Conformer has input_values instead of input_ids")
def test_forward_signature(self):
pass
# Wav2Vec2Conformer cannot resize token embeddings
# since it has no tokens embeddings
@unittest.skip(reason="Wav2Vec2Conformer has not token embeddings")
def test_resize_tokens_embeddings(self):
pass
# Wav2Vec2Conformer has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
@unittest.skip(reason="Wav2Vec2Conformer has not inputs_embeds")
def test_model_get_set_embeddings(self):
pass
@is_pt_flax_cross_test
# non-robust architecture does not exist in Flax
@unittest.skip(reason="Non-robust architecture does not exist in Flax")
def test_equivalence_flax_to_pt(self):
pass
@is_pt_flax_cross_test
# non-robust architecture does not exist in Flax
@unittest.skip(reason="Non-robust architecture does not exist in Flax")
def test_equivalence_pt_to_flax(self):
pass
......
......@@ -325,19 +325,21 @@ class Wav2Vec2PhonemeCTCTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
outputs_char = [tokenizer.decode(ids, output_char_offsets=True) for ids in sample_ids]
check_list_tuples_equal(outputs_char_batch, outputs_char)
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes")
@unittest.skip(reason="Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes")
def test_added_tokens_do_lower_case(self):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes")
@unittest.skip(reason="Wav2Vec2PhonemeTokenizer always puts spaces between phonemes")
def test_encode_decode_with_spaces(self):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency")
@unittest.skip(
reason="encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency"
)
def test_internal_consistency(self):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing")
@unittest.skip(reason="Wav2Vec2PhonemeModel has no max model length => no testing")
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
......@@ -389,11 +391,11 @@ class Wav2Vec2PhonemeCTCTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-3], tokenizer.pad_token_id)
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
@unittest.skip(reason="The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def test_tf_encode_plus_sent_to_model(self):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
@unittest.skip(reason="The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.")
def test_torch_encode_plus_sent_to_model(self):
pass
......
......@@ -371,22 +371,19 @@ class WavLMModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# WavLM has no inputs_embeds
@unittest.skip(reason="WavLM has no inputs_embeds")
def test_inputs_embeds(self):
pass
# `input_ids` is renamed to `input_values`
@unittest.skip(reason="WavLM has no input_ids")
def test_forward_signature(self):
pass
# WavLM cannot resize token embeddings
# since it has no tokens embeddings
@unittest.skip(reason="WavLM has no token embeddings")
def test_resize_tokens_embeddings(self):
pass
# WavLM has no inputs_embeds
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_get_set_embeddings(self):
pass
......
......@@ -504,9 +504,11 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
model(**inputs)[0]
# training is not supported yet
@unittest.skip(reason="Training is not supported yet")
def test_training(self):
pass
@unittest.skip(reason="Training is not supported yet")
def test_training_gradient_checkpointing(self):
pass
......@@ -522,6 +524,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip
def test_generate_with_head_masking(self):
pass
......@@ -736,7 +739,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
self.skipTest(reason="test_resize_embeddings is False")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
......@@ -784,13 +787,13 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
inputs_dict,
) = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
return
self.skipTest(reason="test_resize_embeddings is False")
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
return
self.skipTest(reason="Model cannot untie embeddings")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
......@@ -827,6 +830,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
@unittest.skip
def test_generate_without_input_ids(self):
pass
......@@ -901,7 +905,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn_2:
return
self.skipTest(reason="Model does not support Flash Attention 2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
......@@ -947,7 +951,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn_2:
return
self.skipTest(reason="Model does not support flash_attention_2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = model_class(config)
......@@ -996,7 +1000,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
......@@ -1096,8 +1100,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
fx_model_class_name = "Flax" + model_class.__name__
if not hasattr(transformers, fx_model_class_name):
# no flax model exists for this class
return
self.skipTest(reason="No Flax model exists for this class")
# Output all for aggressive testing
config.output_hidden_states = True
......@@ -1169,8 +1172,7 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
fx_model_class_name = "Flax" + model_class.__name__
if not hasattr(transformers, fx_model_class_name):
# no flax model exists for this class
return
self.skipTest(reason="No Flax model exists for this class")
# Output all for aggressive testing
config.output_hidden_states = True
......@@ -3133,8 +3135,9 @@ class WhisperEncoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.
def test_model_parallelism(self):
pass
# input embeds is meaningless for an encoder-only acoustic model
@unittest.skip(reason="Not applicable for an encoder-only acoustic model")
def test_inputs_embeds(self):
# input embeds is meaningless for an encoder-only acoustic model
pass
# the equivalent test is passing the encoder outputs directly to the model
......@@ -3181,6 +3184,7 @@ class WhisperEncoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.
self.assertTrue(x is None or isinstance(x, torch.nn.Conv1d))
# WhisperEncoder cannot resize token embeddings since it has no tokens embeddings
@unittest.skip(reason="Model has no tokens embeds")
def test_resize_tokens_embeddings(self):
pass
......@@ -3194,8 +3198,7 @@ class WhisperEncoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.
fx_model_class_name = "Flax" + model_class.__name__
if not hasattr(transformers, fx_model_class_name):
# no flax model exists for this class
return
self.skipTest(reason="Flax model does not exist")
# Output all for aggressive testing
config.output_hidden_states = True
......@@ -3267,8 +3270,7 @@ class WhisperEncoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.
fx_model_class_name = "Flax" + model_class.__name__
if not hasattr(transformers, fx_model_class_name):
# no flax model exists for this class
return
self.skip("Flax model does not exist")
# Output all for aggressive testing
config.output_hidden_states = True
......@@ -3562,17 +3564,16 @@ class WhisperStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin,
config=config, input_ids=inputs_dict["input_ids"]
)
@unittest.skip("Generate needs input ids")
@unittest.skip(reason="Generate needs input ids")
def test_generate_without_input_ids(self):
# generate only works with input ids for whisper
pass
@unittest.skip("Decoder can't keep attention grads")
@unittest.skip(reason="Decoder can't keep attention grads")
def test_retain_grad_hidden_states_attentions(self):
# decoder cannot keep gradients
return
@unittest.skip("The model doesn't support fast init from base")
@unittest.skip(reason="The model doesn't support fast init from base")
def test_save_load_fast_init_from_base(self):
pass
......
......@@ -89,12 +89,15 @@ class WhisperTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
["I", "Ġwas", "Ġborn", "Ġin", "Ġ9", "2000", ",", "Ġand", "Ġthis", "Ġis", "Ġfals", "é", "."], # fmt: skip
)
@unittest.skip
def test_tokenizer_slow_store_full_signature(self):
pass
@unittest.skip
def test_tokenizer_fast_store_full_signature(self):
pass
@unittest.skip
def test_special_tokens_initialization(self):
# Whisper relies on specific additional special tokens, so we skip this
# general test. In particular, this test loads fast tokenizer from slow
......
......@@ -186,9 +186,11 @@ class XCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
......@@ -420,9 +422,11 @@ class XCLIPTextModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip
def test_training(self):
pass
@unittest.skip
def test_training_gradient_checkpointing(self):
pass
......@@ -596,7 +600,7 @@ class XCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript:
return
self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True
......
......@@ -353,7 +353,7 @@ class XGLMModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
model = XGLMModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases.")
@unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.")
def test_model_parallelism(self):
super().test_model_parallelism()
......
......@@ -150,7 +150,7 @@ class XGLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
......
......@@ -143,7 +143,7 @@ class XLMRobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_save_pretrained(self):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
self.skipTest(reason="test_slow_tokenizer is set to False")
self.tokenizers_list[0] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
......@@ -224,7 +224,7 @@ class XLMRobertaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
......
......@@ -612,8 +612,8 @@ class XLNetModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixi
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlnet_qa(*config_and_inputs)
@unittest.skip(reason="xlnet cannot keep gradients in attentions or hidden states")
def test_retain_grad_hidden_states_attentions(self):
# xlnet cannot keep gradients in attentions or hidden states
return
# overwrite from test_modeling_common
......
......@@ -206,8 +206,8 @@ class YolosModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="YOLOS does not use inputs_embeds")
def test_inputs_embeds(self):
# YOLOS does not use inputs_embeds
pass
def test_model_get_set_embeddings(self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment