"tests/vscode:/vscode.git/clone" did not exist on "d0b3797a3be095f74659341ed396cc8bccff96f6"
Unverified Commit 19420fd9 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Move test model folders (#17034)



* move test model folders (TODO: fix imports and others)

* fix (potentially partially) imports (in model test modules)

* fix (potentially partially) imports (in tokenization test modules)

* fix (potentially partially) imports (in feature extraction test modules)

* fix import utils.test_modeling_tf_core

* fix path ../fixtures/

* fix imports about generation.test_generation_flax_utils

* fix more imports

* fix fixture path

* fix get_test_dir

* update module_to_test_file

* fix get_tests_dir from wrong transformers.utils

* update config.yml (CircleCI)

* fix style

* remove missing imports

* update new model script

* update check_repo

* update SPECIAL_MODULE_TO_TEST_MAP

* fix style

* add __init__

* update self-scheduled

* fix add_new_model scripts

* check one way to get location back

* python setup.py build install

* fix import in test auto

* update self-scheduled.yml

* update slack notification script

* Add comments about artifact names

* fix for yolos
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent cd9274d0
...@@ -21,10 +21,10 @@ import numpy as np ...@@ -21,10 +21,10 @@ import numpy as np
from transformers import is_flax_available, is_torch_available from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow, torch_device from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow, torch_device
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bart.test_modeling_flax_bart import FlaxBartStandaloneDecoderModelTester from ..bart.test_modeling_flax_bart import FlaxBartStandaloneDecoderModelTester
from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..gpt2.test_modeling_flax_gpt2 import FlaxGPT2ModelTester from ..gpt2.test_modeling_flax_gpt2 import FlaxGPT2ModelTester
from ..test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..wav2vec2.test_modeling_flax_wav2vec2 import FlaxWav2Vec2ModelTester from ..wav2vec2.test_modeling_flax_wav2vec2 import FlaxWav2Vec2ModelTester
......
...@@ -20,10 +20,10 @@ import unittest ...@@ -20,10 +20,10 @@ import unittest
from transformers import is_torch_available from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device from transformers.testing_utils import require_torch, slow, torch_device
from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_bert import BertModelTester from ..bert.test_modeling_bert import BertModelTester
from ..speech_to_text.test_modeling_speech_to_text import Speech2TextModelTester from ..speech_to_text.test_modeling_speech_to_text import Speech2TextModelTester
from ..speech_to_text_2.test_modeling_speech_to_text_2 import Speech2Text2StandaloneDecoderModelTester from ..speech_to_text_2.test_modeling_speech_to_text_2 import Speech2Text2StandaloneDecoderModelTester
from ..test_modeling_common import floats_tensor, ids_tensor, random_attention_mask
from ..wav2vec2.test_modeling_wav2vec2 import Wav2Vec2ModelTester from ..wav2vec2.test_modeling_wav2vec2 import Wav2Vec2ModelTester
......
...@@ -23,7 +23,7 @@ import numpy as np ...@@ -23,7 +23,7 @@ import numpy as np
from transformers import is_speech_available from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio from transformers.testing_utils import require_torch, require_torchaudio
from ..test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available(): if is_speech_available():
......
...@@ -32,9 +32,9 @@ from transformers.testing_utils import ( ...@@ -32,9 +32,9 @@ from transformers.testing_utils import (
) )
from transformers.utils import cached_property from transformers.utils import cached_property
from ..generation.test_generation_utils import GenerationTesterMixin from ...generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
if is_torch_available(): if is_torch_available():
......
...@@ -21,8 +21,8 @@ from transformers import Speech2TextConfig ...@@ -21,8 +21,8 @@ from transformers import Speech2TextConfig
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property, is_tf_available from transformers.utils import cached_property, is_tf_available
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
if is_tf_available(): if is_tf_available():
......
...@@ -12,17 +12,15 @@ ...@@ -12,17 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import shutil import shutil
import tempfile import tempfile
import unittest import unittest
from os.path import dirname
from pathlib import Path from pathlib import Path
from shutil import copyfile from shutil import copyfile
from transformers import Speech2TextTokenizer, is_speech_available from transformers import Speech2TextTokenizer, is_speech_available
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import require_sentencepiece, require_torch, require_torchaudio from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, require_torchaudio
from transformers.utils import FEATURE_EXTRACTOR_NAME from transformers.utils import FEATURE_EXTRACTOR_NAME
from .test_feature_extraction_speech_to_text import floats_list from .test_feature_extraction_speech_to_text import floats_list
...@@ -32,7 +30,7 @@ if is_speech_available(): ...@@ -32,7 +30,7 @@ if is_speech_available():
from transformers import Speech2TextFeatureExtractor, Speech2TextProcessor from transformers import Speech2TextFeatureExtractor, Speech2TextProcessor
SAMPLE_SP = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/test_sentencepiece.model") SAMPLE_SP = get_tests_dir("fixtures/test_sentencepiece.model")
@require_torch @require_torch
......
...@@ -12,21 +12,19 @@ ...@@ -12,21 +12,19 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import unittest import unittest
from os.path import dirname
from pathlib import Path from pathlib import Path
from shutil import copyfile from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import Speech2TextTokenizer from transformers.models.speech_to_text import Speech2TextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import require_sentencepiece, require_tokenizers, slow from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ..test_tokenization_common import TokenizerTesterMixin from ...test_tokenization_common import TokenizerTesterMixin
SAMPLE_SP = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/test_sentencepiece.model") SAMPLE_SP = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available(): if is_sentencepiece_available():
import sentencepiece as sp import sentencepiece as sp
......
...@@ -19,9 +19,9 @@ import unittest ...@@ -19,9 +19,9 @@ import unittest
from transformers import Speech2Text2Config from transformers import Speech2Text2Config
from transformers.testing_utils import is_torch_available, require_torch, torch_device from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ..generation.test_generation_utils import GenerationTesterMixin from ...generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor from ...test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available(): if is_torch_available():
......
...@@ -21,7 +21,7 @@ import unittest ...@@ -21,7 +21,7 @@ import unittest
from transformers.models.speech_to_text_2 import Speech2Text2Tokenizer from transformers.models.speech_to_text_2 import Speech2Text2Tokenizer
from transformers.models.speech_to_text_2.tokenization_speech_to_text_2 import VOCAB_FILES_NAMES from transformers.models.speech_to_text_2.tokenization_speech_to_text_2 import VOCAB_FILES_NAMES
from ..test_tokenization_common import TokenizerTesterMixin from ...test_tokenization_common import TokenizerTesterMixin
class SpeechToTextTokenizerTest(TokenizerTesterMixin, unittest.TestCase): class SpeechToTextTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
......
...@@ -20,8 +20,8 @@ import unittest ...@@ -20,8 +20,8 @@ import unittest
from transformers import is_torch_available from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device from transformers.testing_utils import require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available(): if is_torch_available():
......
...@@ -19,8 +19,8 @@ import unittest ...@@ -19,8 +19,8 @@ import unittest
from transformers import SqueezeBertConfig, is_torch_available from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available(): if is_torch_available():
......
...@@ -22,8 +22,8 @@ from transformers import SwinConfig ...@@ -22,8 +22,8 @@ from transformers import SwinConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available from transformers.utils import cached_property, is_torch_available, is_vision_available
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available(): if is_torch_available():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment