"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "491a33d138491492d9481f08c1e4ce710289072e"
Unverified Commit 34037129 authored by Matt's avatar Matt Committed by GitHub
Browse files

Big TF test cleanup (#24282)

* Fix one BLIP arg not being optional, remove misspelled arg

* Remove the lxmert test overrides and just use the base test_saved_model_creation

* saved_model_creation fixes and re-enabling tests across the board

* Remove unnecessary skip

* Stop caching sinusoidal embeddings in speech_to_text

* Fix transfo_xl compilation

* Fix transfo_xl compilation

* Fix the conditionals in xglm

* Set the save spec only when building

* Clarify comment

* Move comment correctly

* Correct embeddings generation for speech2text

* Mark RAG generation tests as @slow

* Remove redundant else:

* Add comment to clarify the save_spec line in build()

* Fix size tests for XGLM at last!

* make fixup

* Remove one band_part operation

* Mark test_keras_fit as @slow
parent 896a58de
...@@ -20,7 +20,7 @@ import unittest ...@@ -20,7 +20,7 @@ import unittest
from transformers import MobileBertConfig, is_tf_available from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow, tooslow from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
...@@ -311,15 +311,6 @@ class TFMobileBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.Te ...@@ -311,15 +311,6 @@ class TFMobileBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.Te
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*config_and_inputs) self.model_tester.create_and_check_mobilebert_for_token_classification(*config_and_inputs)
@slow
def test_keras_fit(self):
# Override as it is a slow test on this model
super().test_keras_fit()
@tooslow
def test_saved_model_creation(self):
pass
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
......
...@@ -20,7 +20,7 @@ import unittest ...@@ -20,7 +20,7 @@ import unittest
import numpy as np import numpy as np
from transformers import OPTConfig, is_tf_available from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow, tooslow from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
...@@ -219,10 +219,6 @@ class TFOPTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase) ...@@ -219,10 +219,6 @@ class TFOPTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
models_equal = False models_equal = False
self.assertTrue(models_equal) self.assertTrue(models_equal)
@tooslow
def test_saved_model_creation(self):
pass
def _long_tensor(tok_lst): def _long_tensor(tok_lst):
return tf.constant(tok_lst, dtype=tf.int32) return tf.constant(tok_lst, dtype=tf.int32)
......
...@@ -18,7 +18,7 @@ from __future__ import annotations ...@@ -18,7 +18,7 @@ from __future__ import annotations
import unittest import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow, tooslow from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -206,10 +206,6 @@ class TFPegasusModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestC ...@@ -206,10 +206,6 @@ class TFPegasusModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestC
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs)
@tooslow
def test_saved_model_creation(self):
pass
@require_sentencepiece @require_sentencepiece
@require_tokenizers @require_tokenizers
......
...@@ -490,6 +490,7 @@ class TFRagTestMixin: ...@@ -490,6 +490,7 @@ class TFRagTestMixin:
inputs_dict = self.config_and_inputs inputs_dict = self.config_and_inputs
self.check_model_without_retriever(**inputs_dict) self.check_model_without_retriever(**inputs_dict)
@slow
def test_model_generate_from_context_input_ids(self): def test_model_generate_from_context_input_ids(self):
inputs_dict = self.config_and_inputs inputs_dict = self.config_and_inputs
self.check_model_generate_from_context_input_ids(**inputs_dict) self.check_model_generate_from_context_input_ids(**inputs_dict)
...@@ -498,6 +499,7 @@ class TFRagTestMixin: ...@@ -498,6 +499,7 @@ class TFRagTestMixin:
inputs_dict = self.config_and_inputs inputs_dict = self.config_and_inputs
self.check_model_with_encoder_outputs(**inputs_dict) self.check_model_with_encoder_outputs(**inputs_dict)
@slow
def test_model_generate(self): def test_model_generate(self):
inputs_dict = self.config_and_inputs inputs_dict = self.config_and_inputs
self.check_model_generate(**inputs_dict) self.check_model_generate(**inputs_dict)
......
...@@ -148,6 +148,7 @@ class TFRegNetModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCa ...@@ -148,6 +148,7 @@ class TFRegNetModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCa
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0,
reason="TF does not support backprop for grouped convolutions on CPU.", reason="TF does not support backprop for grouped convolutions on CPU.",
) )
@slow
def test_keras_fit(self): def test_keras_fit(self):
super().test_keras_fit() super().test_keras_fit()
......
...@@ -347,6 +347,7 @@ class TFSegformerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.Tes ...@@ -347,6 +347,7 @@ class TFSegformerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.Tes
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0,
reason="TF does not support backprop for grouped convolutions on CPU.", reason="TF does not support backprop for grouped convolutions on CPU.",
) )
@slow
def test_keras_fit(self): def test_keras_fit(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common() config, _ = self.model_tester.prepare_config_and_inputs_for_common()
......
...@@ -722,6 +722,10 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest ...@@ -722,6 +722,10 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
self.assertTrue(models_equal) self.assertTrue(models_equal)
def test_pt_tf_model_equivalence(self, allow_missing_keys=True):
# Allow missing keys since TF doesn't cache the sinusoidal embeddings in an attribute
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
@require_torch @require_torch
@require_torchaudio @require_torchaudio
......
...@@ -558,6 +558,10 @@ class TFSpeech2TextModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.T ...@@ -558,6 +558,10 @@ class TFSpeech2TextModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.T
] ]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_pt_tf_model_equivalence(self, allow_missing_keys=True):
# Allow missing keys since TF doesn't cache the sinusoidal embeddings in an attribute
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
@require_tf @require_tf
@require_sentencepiece @require_sentencepiece
......
...@@ -23,7 +23,7 @@ import unittest ...@@ -23,7 +23,7 @@ import unittest
import numpy as np import numpy as np
from transformers import SwinConfig from transformers import SwinConfig
from transformers.testing_utils import require_tf, require_vision, slow, to_2tuple, tooslow from transformers.testing_utils import require_tf, require_vision, slow, to_2tuple
from transformers.utils import cached_property, is_tf_available, is_vision_available from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -232,10 +232,6 @@ class TFSwinModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase ...@@ -232,10 +232,6 @@ class TFSwinModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase
def test_inputs_embeds(self): def test_inputs_embeds(self):
pass pass
@tooslow
def test_saved_model_creation(self):
pass
def test_model_common_attributes(self): def test_model_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common() config, _ = self.model_tester.prepare_config_and_inputs_for_common()
......
...@@ -18,7 +18,7 @@ from __future__ import annotations ...@@ -18,7 +18,7 @@ from __future__ import annotations
import unittest import unittest
from transformers import T5Config, is_tf_available from transformers import T5Config, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow, tooslow from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
...@@ -300,10 +300,6 @@ class TFT5ModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): ...@@ -300,10 +300,6 @@ class TFT5ModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
self.model_tester.create_and_check_t5_decoder_model_past_large_inputs(*config_and_inputs) self.model_tester.create_and_check_t5_decoder_model_past_large_inputs(*config_and_inputs)
@tooslow
def test_saved_model_creation(self):
pass
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
model = TFT5Model.from_pretrained("t5-small") model = TFT5Model.from_pretrained("t5-small")
......
...@@ -1415,6 +1415,7 @@ class TFModelTesterMixin: ...@@ -1415,6 +1415,7 @@ class TFModelTesterMixin:
def check_keras_fit_results(self, val_loss1, val_loss2, atol=1e-2, rtol=1e-3): def check_keras_fit_results(self, val_loss1, val_loss2, atol=1e-2, rtol=1e-3):
self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol))
@slow
def test_keras_fit(self): def test_keras_fit(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes: for model_class in self.all_model_classes:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment