Unverified Commit 19420fd9 authored by Yih-Dar's avatar Yih-Dar Committed by GitHub
Browse files

Move test model folders (#17034)



* move test model folders (TODO: fix imports and others)

* fix (potentially partially) imports (in model test modules)

* fix (potentially partially) imports (in tokenization test modules)

* fix (potentially partially) imports (in feature extraction test modules)

* fix import utils.test_modeling_tf_core

* fix path ../fixtures/

* fix imports about generation.test_generation_flax_utils

* fix more imports

* fix fixture path

* fix get_test_dir

* update module_to_test_file

* fix get_tests_dir from wrong transformers.utils

* update config.yml (CircleCI)

* fix style

* remove missing imports

* update new model script

* update check_repo

* update SPECIAL_MODULE_TO_TEST_MAP

* fix style

* add __init__

* update self-scheduled

* fix add_new_model scripts

* check one way to get location back

* python setup.py build install

* fix import in test auto

* update self-scheduled.yml

* update slack notification script

* Add comments about artifact names

* fix for yolos
Co-authored-by: default avatarydshieh <ydshieh@users.noreply.github.com>
parent cd9274d0
...@@ -8,7 +8,7 @@ import transformers ...@@ -8,7 +8,7 @@ import transformers
from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig, is_flax_available, is_torch_available from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow
from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available(): if is_flax_available():
......
...@@ -26,8 +26,8 @@ from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig ...@@ -26,8 +26,8 @@ from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
from transformers.testing_utils import require_tf, require_vision, slow from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available from transformers.utils import is_tf_available, is_vision_available
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_tf_available(): if is_tf_available():
......
...@@ -22,7 +22,7 @@ from transformers import CLIPTokenizer, CLIPTokenizerFast ...@@ -22,7 +22,7 @@ from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers from transformers.testing_utils import require_ftfy, require_tokenizers
from ..test_tokenization_common import TokenizerTesterMixin from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers @require_tokenizers
......
...@@ -21,8 +21,8 @@ from transformers import ConvBertConfig, is_torch_available ...@@ -21,8 +21,8 @@ from transformers import ConvBertConfig, is_torch_available
from transformers.models.auto import get_values from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available(): if is_torch_available():
......
...@@ -19,8 +19,8 @@ import unittest ...@@ -19,8 +19,8 @@ import unittest
from transformers import ConvBertConfig, is_tf_available from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow from transformers.testing_utils import require_tf, slow
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available(): if is_tf_available():
......
...@@ -21,7 +21,7 @@ import numpy as np ...@@ -21,7 +21,7 @@ import numpy as np
from transformers.testing_utils import require_torch, require_vision from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available from transformers.utils import is_torch_available, is_vision_available
from ..test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs
if is_torch_available(): if is_torch_available():
......
...@@ -22,8 +22,8 @@ from transformers import ConvNextConfig ...@@ -22,8 +22,8 @@ from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available from transformers.utils import cached_property, is_torch_available, is_vision_available
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
if is_torch_available(): if is_torch_available():
......
...@@ -22,8 +22,8 @@ from transformers import ConvNextConfig ...@@ -22,8 +22,8 @@ from transformers import ConvNextConfig
from transformers.testing_utils import require_tf, require_vision, slow from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available from transformers.utils import cached_property, is_tf_available, is_vision_available
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
if is_tf_available(): if is_tf_available():
......
...@@ -18,9 +18,9 @@ import unittest ...@@ -18,9 +18,9 @@ import unittest
from transformers import CTRLConfig, is_torch_available from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device from transformers.testing_utils import require_torch, slow, torch_device
from ..generation.test_generation_utils import GenerationTesterMixin from ...generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available(): if is_torch_available():
......
...@@ -19,8 +19,8 @@ import unittest ...@@ -19,8 +19,8 @@ import unittest
from transformers import CTRLConfig, is_tf_available from transformers import CTRLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow from transformers.testing_utils import require_tf, slow
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available(): if is_tf_available():
......
...@@ -19,7 +19,7 @@ import unittest ...@@ -19,7 +19,7 @@ import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ..test_tokenization_common import TokenizerTesterMixin from ...test_tokenization_common import TokenizerTesterMixin
class CTRLTokenizationTest(TokenizerTesterMixin, unittest.TestCase): class CTRLTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
......
...@@ -24,8 +24,8 @@ from tests.test_modeling_common import floats_tensor, ids_tensor, random_attenti ...@@ -24,8 +24,8 @@ from tests.test_modeling_common import floats_tensor, ids_tensor, random_attenti
from transformers import Data2VecAudioConfig, is_torch_available from transformers import Data2VecAudioConfig, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_soundfile, require_torch, slow, torch_device from transformers.testing_utils import is_pt_flax_cross_test, require_soundfile, require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, _config_zero_init from ...test_modeling_common import ModelTesterMixin, _config_zero_init
if is_torch_available(): if is_torch_available():
......
...@@ -20,9 +20,9 @@ from tests.test_modeling_common import floats_tensor, ids_tensor, random_attenti ...@@ -20,9 +20,9 @@ from tests.test_modeling_common import floats_tensor, ids_tensor, random_attenti
from transformers import Data2VecTextConfig, is_torch_available from transformers import Data2VecTextConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ..generation.test_generation_utils import GenerationTesterMixin from ...generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester from ...test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin from ...test_modeling_common import ModelTesterMixin
if is_torch_available(): if is_torch_available():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment