"tests/t5/test_modeling_tf_t5.py" did not exist on "c8d3fa0dfd191c0272f8de5027430e2fc789b22c"
Unverified Commit 1de7dc74 authored by amyeroberts's avatar amyeroberts Committed by GitHub
Browse files

Skip tests properly (#31308)

* Skip tests properly

* [test_all]

* Add 'reason' as kwarg for skipTest

* [test_all] Fix up

* [test_all]
parent 1f9f57ab
...@@ -102,7 +102,7 @@ class ClvpTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -102,7 +102,7 @@ class ClvpTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
# Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.test_rust_and_python_full_tokenizers # Copied from transformers.tests.models.gpt2.test_tokenization_gpt2.GPT2TokenizationTest.test_rust_and_python_full_tokenizers
def test_rust_and_python_full_tokenizers(self): def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
......
...@@ -26,7 +26,6 @@ from transformers import ( ...@@ -26,7 +26,6 @@ from transformers import (
AddedToken, AddedToken,
CodeLlamaTokenizer, CodeLlamaTokenizer,
CodeLlamaTokenizerFast, CodeLlamaTokenizerFast,
is_torch_available,
) )
from transformers.convert_slow_tokenizer import convert_slow_tokenizer from transformers.convert_slow_tokenizer import convert_slow_tokenizer
from transformers.testing_utils import ( from transformers.testing_utils import (
...@@ -44,10 +43,6 @@ from ...test_tokenization_common import TokenizerTesterMixin ...@@ -44,10 +43,6 @@ from ...test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
pass
@require_sentencepiece @require_sentencepiece
@require_tokenizers @require_tokenizers
class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
...@@ -220,7 +215,7 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -220,7 +215,7 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
@require_torch @require_torch
def test_batch_tokenization(self): def test_batch_tokenization(self):
if not self.test_seq2seq: if not self.test_seq2seq:
return self.skipTest(reason="test_seq2seq is False")
tokenizers = self.get_tokenizers() tokenizers = self.get_tokenizers()
for tokenizer in tokenizers: for tokenizer in tokenizers:
...@@ -240,7 +235,7 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -240,7 +235,7 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
return_tensors="pt", return_tensors="pt",
) )
except NotImplementedError: except NotImplementedError:
return self.skipTest(reason="Encountered NotImplementedError when calling tokenizer")
self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.input_ids.shape[1], 3)
# max_target_length will default to max_length if not specified # max_target_length will default to max_length if not specified
batch = tokenizer(text, max_length=3, return_tensors="pt") batch = tokenizer(text, max_length=3, return_tensors="pt")
...@@ -251,7 +246,7 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -251,7 +246,7 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn("decoder_input_ids", batch_encoder_only) self.assertNotIn("decoder_input_ids", batch_encoder_only)
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece.") @unittest.skip(reason="Unfortunately way too slow to build a BPE with SentencePiece.")
def test_save_slow_from_fast_and_reload_fast(self): def test_save_slow_from_fast_and_reload_fast(self):
pass pass
...@@ -306,11 +301,11 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -306,11 +301,11 @@ class CodeLlamaTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
pickled_tokenizer = pickle.dumps(tokenizer) pickled_tokenizer = pickle.dumps(tokenizer)
pickle.loads(pickled_tokenizer) pickle.loads(pickled_tokenizer)
@unittest.skip("worker 'gw4' crashed on CI, passing locally.") @unittest.skip(reason="worker 'gw4' crashed on CI, passing locally.")
def test_pickle_subword_regularization_tokenizer(self): def test_pickle_subword_regularization_tokenizer(self):
pass pass
@unittest.skip("worker 'gw4' crashed on CI, passing locally.") @unittest.skip(reason="worker 'gw4' crashed on CI, passing locally.")
def test_subword_regularization_tokenizer(self): def test_subword_regularization_tokenizer(self):
pass pass
......
...@@ -99,7 +99,7 @@ class CodeGenTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -99,7 +99,7 @@ class CodeGenTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self): def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True) rust_tokenizer = self.get_rust_tokenizer(add_prefix_space=True)
...@@ -127,6 +127,7 @@ class CodeGenTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -127,6 +127,7 @@ class CodeGenTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19] input_bpe_tokens = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@unittest.skip
def test_pretokenized_inputs(self, *args, **kwargs): def test_pretokenized_inputs(self, *args, **kwargs):
# It's very difficult to mix/test pretokenization with byte-level # It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
...@@ -262,6 +263,7 @@ class CodeGenTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -262,6 +263,7 @@ class CodeGenTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
# TODO @ArthurZ outputs of the fast tokenizer are different in this case, un-related to the PR # TODO @ArthurZ outputs of the fast tokenizer are different in this case, un-related to the PR
# tokenizer has no padding token # tokenizer has no padding token
@unittest.skip(reason="tokenizer has no padding token")
def test_padding_different_model_input_name(self): def test_padding_different_model_input_name(self):
pass pass
......
...@@ -51,7 +51,7 @@ class CohereTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -51,7 +51,7 @@ class CohereTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_torch_encode_plus_sent_to_model(self): def test_torch_encode_plus_sent_to_model(self):
super().test_torch_encode_plus_sent_to_model() super().test_torch_encode_plus_sent_to_model()
@unittest.skip("This needs a slow tokenizer. Cohere does not have one!") @unittest.skip(reason="This needs a slow tokenizer. Cohere does not have one!")
def test_encode_decode_with_spaces(self): def test_encode_decode_with_spaces(self):
return return
......
...@@ -263,8 +263,8 @@ class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, Pipeline ...@@ -263,8 +263,8 @@ class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, Pipeline
pass pass
@slow @slow
@unittest.skip(reason="TODO Niels: fix me!")
def test_model_outputs_equivalence(self): def test_model_outputs_equivalence(self):
# TODO Niels: fix me!
pass pass
def test_attention_outputs(self): def test_attention_outputs(self):
......
...@@ -433,7 +433,7 @@ class ConvBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase ...@@ -433,7 +433,7 @@ class ConvBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
# ConvBertForMultipleChoice behaves incorrectly in JIT environments. # ConvBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == ConvBertForMultipleChoice: if model_class == ConvBertForMultipleChoice:
return self.skipTest(reason="ConvBertForMultipleChoice behaves incorrectly in JIT environments.")
config.torchscript = True config.torchscript = True
model = model_class(config=config) model = model_class(config=config)
......
...@@ -216,7 +216,7 @@ class ConvNextV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa ...@@ -216,7 +216,7 @@ class ConvNextV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
def test_training(self): def test_training(self):
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="ModelTester is not set to test training")
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_with_labels() config, inputs_dict = self.model_tester.prepare_config_and_inputs_with_labels()
...@@ -237,7 +237,7 @@ class ConvNextV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa ...@@ -237,7 +237,7 @@ class ConvNextV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="ModelTester is not set to test training")
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_with_labels() config, inputs_dict = self.model_tester.prepare_config_and_inputs_with_labels()
......
...@@ -154,7 +154,7 @@ class CpmAntModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -154,7 +154,7 @@ class CpmAntModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
self.config_tester.run_common_tests() self.config_tester.run_common_tests()
def test_inputs_embeds(self): def test_inputs_embeds(self):
unittest.skip("CPMAnt doesn't support input_embeds.")(self.test_inputs_embeds) unittest.skip(reason="CPMAnt doesn't support input_embeds.")(self.test_inputs_embeds)
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
unittest.skip( unittest.skip(
......
...@@ -426,22 +426,19 @@ class Data2VecAudioModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Tes ...@@ -426,22 +426,19 @@ class Data2VecAudioModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Tes
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs) self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
# Data2VecAudio has no inputs_embeds @unittest.skip(reason="Data2VecAudio has no inputs_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
# `input_ids` is renamed to `input_values` @unittest.skip(reason="`input_ids` is renamed to `input_values`")
def test_forward_signature(self): def test_forward_signature(self):
pass pass
# Data2VecAudio cannot resize token embeddings @unittest.skip(reason="Data2VecAudio has no tokens embeddings")
# since it has no tokens embeddings
def test_resize_tokens_embeddings(self): def test_resize_tokens_embeddings(self):
pass pass
# Data2VecAudio has no inputs_embeds @unittest.skip(reason="Data2VecAudio has no inputs_embeds")
# and thus the `get_input_embeddings` fn
# is not implemented
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
......
...@@ -196,8 +196,8 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te ...@@ -196,8 +196,8 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te
def test_config(self): def test_config(self):
self.config_tester.run_common_tests() self.config_tester.run_common_tests()
@unittest.skip(reason="Data2VecVision does not use inputs_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
# Data2VecVision does not use inputs_embeds
pass pass
@require_torch_multi_gpu @require_torch_multi_gpu
...@@ -226,7 +226,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te ...@@ -226,7 +226,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te
def test_training(self): def test_training(self):
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True config.return_dict = True
...@@ -245,7 +245,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te ...@@ -245,7 +245,7 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="model_tester.is_training is set to False")
config.use_cache = False config.use_cache = False
config.return_dict = True config.return_dict = True
......
...@@ -350,21 +350,21 @@ class DbrxModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin ...@@ -350,21 +350,21 @@ class DbrxModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
model = DbrxModel.from_pretrained(model_name) model = DbrxModel.from_pretrained(model_name)
self.assertIsNotNone(model) self.assertIsNotNone(model)
@unittest.skip("Dbrx models have weight tying disabled.") @unittest.skip(reason="Dbrx models have weight tying disabled.")
def test_tied_weights_keys(self): def test_tied_weights_keys(self):
pass pass
# Offload does not work with Dbrx models because of the forward of DbrxExperts where we chunk the experts. # Offload does not work with Dbrx models because of the forward of DbrxExperts where we chunk the experts.
# The issue is that the offloaded weights of the mlp layer are still on meta device (w1_chunked, v1_chunked, w2_chunked) # The issue is that the offloaded weights of the mlp layer are still on meta device (w1_chunked, v1_chunked, w2_chunked)
@unittest.skip("Dbrx models do not work with offload") @unittest.skip(reason="Dbrx models do not work with offload")
def test_cpu_offload(self): def test_cpu_offload(self):
pass pass
@unittest.skip("Dbrx models do not work with offload") @unittest.skip(reason="Dbrx models do not work with offload")
def test_disk_offload_safetensors(self): def test_disk_offload_safetensors(self):
pass pass
@unittest.skip("Dbrx models do not work with offload") @unittest.skip(reason="Dbrx models do not work with offload")
def test_disk_offload_bin(self): def test_disk_offload_bin(self):
pass pass
......
...@@ -79,11 +79,11 @@ class DebertaV2TokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -79,11 +79,11 @@ class DebertaV2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
self.assertListEqual(rust_tokens, tokens_target) self.assertListEqual(rust_tokens, tokens_target)
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.") @unittest.skip(reason="There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.")
def test_sentencepiece_tokenize_and_convert_tokens_to_string(self): def test_sentencepiece_tokenize_and_convert_tokens_to_string(self):
pass pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.") @unittest.skip(reason="There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.")
def test_sentencepiece_tokenize_and_decode(self): def test_sentencepiece_tokenize_and_decode(self):
pass pass
......
...@@ -606,15 +606,15 @@ class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineT ...@@ -606,15 +606,15 @@ class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineT
msg=f"Parameter {name} of model {model_class} seems not properly initialized", msg=f"Parameter {name} of model {model_class} seems not properly initialized",
) )
@unittest.skip("No support for low_cpu_mem_usage=True.") @unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage(self): def test_save_load_low_cpu_mem_usage(self):
pass pass
@unittest.skip("No support for low_cpu_mem_usage=True.") @unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_checkpoints(self): def test_save_load_low_cpu_mem_usage_checkpoints(self):
pass pass
@unittest.skip("No support for low_cpu_mem_usage=True.") @unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_no_safetensors(self): def test_save_load_low_cpu_mem_usage_no_safetensors(self):
pass pass
......
...@@ -274,7 +274,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -274,7 +274,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def test_training(self): def test_training(self):
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="model_tester.is_training is set to False")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True config.return_dict = True
...@@ -296,7 +296,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -296,7 +296,7 @@ class DeiTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
def test_training_gradient_checkpointing(self): def test_training_gradient_checkpointing(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training: if not self.model_tester.is_training:
return self.skipTest(reason="model_tester.is_training is set to False")
config.use_cache = False config.use_cache = False
config.return_dict = True config.return_dict = True
......
...@@ -263,8 +263,8 @@ class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin ...@@ -263,8 +263,8 @@ class DetrModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
pass pass
@slow @slow
@unittest.skip(reason="TODO Niels: fix me!")
def test_model_outputs_equivalence(self): def test_model_outputs_equivalence(self):
# TODO Niels: fix me!
pass pass
def test_attention_outputs(self): def test_attention_outputs(self):
......
...@@ -256,7 +256,7 @@ class DinatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -256,7 +256,7 @@ class DinatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
self.assertTrue(x is None or isinstance(x, nn.Linear)) self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_attention_outputs(self): def test_attention_outputs(self):
self.skipTest("Dinat's attention operation is handled entirely by NATTEN.") self.skipTest(reason="Dinat's attention operation is handled entirely by NATTEN.")
def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): def check_hidden_states_output(self, inputs_dict, config, model_class, image_size):
model = model_class(config) model = model_class(config)
......
...@@ -281,7 +281,7 @@ class DistilBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa ...@@ -281,7 +281,7 @@ class DistilBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments. # BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice: if model_class == DistilBertForMultipleChoice:
return self.skipTest(reason="DistilBertForMultipleChoice behaves incorrectly in JIT environments.")
config.torchscript = True config.torchscript = True
model = model_class(config=config) model = model_class(config=config)
......
...@@ -168,8 +168,8 @@ class DonutSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas ...@@ -168,8 +168,8 @@ class DonutSwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="DonutSwin does not use inputs_embeds")
def test_inputs_embeds(self): def test_inputs_embeds(self):
# DonutSwin does not use inputs_embeds
pass pass
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
......
...@@ -78,7 +78,7 @@ class ElectraTokenizationTest(TokenizerTesterMixin, unittest.TestCase): ...@@ -78,7 +78,7 @@ class ElectraTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
def test_rust_and_python_full_tokenizers(self): def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer: if not self.test_rust_tokenizer:
return self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer() tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer() rust_tokenizer = self.get_rust_tokenizer()
......
...@@ -178,29 +178,35 @@ class EncodecModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -178,29 +178,35 @@ class EncodecModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
expected_arg_names = ["input_values", "padding_mask", "bandwidth"] expected_arg_names = ["input_values", "padding_mask", "bandwidth"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
@unittest.skip("The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics") @unittest.skip(reason="The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics")
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
@unittest.skip("The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics") @unittest.skip(reason="The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics")
def test_model_get_set_embeddings(self): def test_model_get_set_embeddings(self):
pass pass
@unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") @unittest.skip(
reason="The EncodecModel is not transformers based, thus it does not have the usual `attention` logic"
)
def test_retain_grad_hidden_states_attentions(self): def test_retain_grad_hidden_states_attentions(self):
pass pass
@unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") @unittest.skip(
reason="The EncodecModel is not transformers based, thus it does not have the usual `attention` logic"
)
def test_torchscript_output_attentions(self): def test_torchscript_output_attentions(self):
pass pass
@unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic") @unittest.skip(
reason="The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic"
)
def test_torchscript_output_hidden_state(self): def test_torchscript_output_hidden_state(self):
pass pass
def _create_and_check_torchscript(self, config, inputs_dict): def _create_and_check_torchscript(self, config, inputs_dict):
if not self.test_torchscript: if not self.test_torchscript:
return self.skipTest(reason="test_torchscript is set to False")
configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init = _config_zero_init(config) # To be sure we have no Nan
configs_no_init.torchscript = True configs_no_init.torchscript = True
...@@ -288,7 +294,9 @@ class EncodecModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -288,7 +294,9 @@ class EncodecModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
# (Even with this call, there are still memory leak by ~0.04MB) # (Even with this call, there are still memory leak by ~0.04MB)
self.clear_torch_jit_class_registry() self.clear_torch_jit_class_registry()
@unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `attention` logic") @unittest.skip(
reason="The EncodecModel is not transformers based, thus it does not have the usual `attention` logic"
)
def test_attention_outputs(self): def test_attention_outputs(self):
pass pass
...@@ -321,19 +329,21 @@ class EncodecModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -321,19 +329,21 @@ class EncodecModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
hidden_states_with_chunk = model(**inputs)[0] hidden_states_with_chunk = model(**inputs)[0]
self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3)) self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3))
@unittest.skip("The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic") @unittest.skip(
reason="The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic"
)
def test_hidden_states_output(self): def test_hidden_states_output(self):
pass pass
@unittest.skip("No support for low_cpu_mem_usage=True.") @unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage(self): def test_save_load_low_cpu_mem_usage(self):
pass pass
@unittest.skip("No support for low_cpu_mem_usage=True.") @unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_checkpoints(self): def test_save_load_low_cpu_mem_usage_checkpoints(self):
pass pass
@unittest.skip("No support for low_cpu_mem_usage=True.") @unittest.skip(reason="No support for low_cpu_mem_usage=True.")
def test_save_load_low_cpu_mem_usage_no_safetensors(self): def test_save_load_low_cpu_mem_usage_no_safetensors(self):
pass pass
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment