Unverified Commit 29c10a41 authored by Lysandre Debut's avatar Lysandre Debut Committed by GitHub
Browse files

[Test refactor 1/5] Per-folder tests reorganization (#15725)



* Per-folder tests reorganization
Co-authored-by: default avatarsgugger <sylvain.gugger@gmail.com>
Co-authored-by: default avatarStas Bekman <stas@stason.org>
parent fecb08c2
......@@ -23,9 +23,9 @@ from transformers import FSMTConfig, is_torch_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, ids_tensor
from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
......
......@@ -22,7 +22,7 @@ from transformers.file_utils import cached_property
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES, FSMTTokenizer
from transformers.testing_utils import slow
from .test_tokenization_common import TokenizerTesterMixin
from ..test_tokenization_common import TokenizerTesterMixin
# using a different tiny model than the one used for default params defined in init to ensure proper testing
......
......@@ -20,8 +20,8 @@ from transformers import FunnelConfig, FunnelTokenizer, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
......
......@@ -19,8 +19,8 @@ import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
......
......@@ -21,7 +21,7 @@ from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from .test_tokenization_common import TokenizerTesterMixin
from ..test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
......
......@@ -19,7 +19,7 @@ import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from .test_modeling_common import floats_tensor, ids_tensor
from ..test_modeling_common import floats_tensor, ids_tensor
if is_torch_available():
......
......@@ -21,7 +21,7 @@ import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from .test_modeling_flax_common import ids_tensor
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
......
......@@ -19,7 +19,7 @@ import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from .test_modeling_common import ids_tensor
from ..test_modeling_common import ids_tensor
if is_torch_available():
......
# coding=utf-8
# Copyright 2020 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from .test_modeling_common import ids_tensor
from ..test_modeling_common import ids_tensor
if is_torch_available():
......
......@@ -32,7 +32,7 @@ if is_tf_available():
)
from transformers.tf_utils import set_tensor_by_indices_to_value
from .test_modeling_tf_common import ids_tensor
from ..test_modeling_tf_common import ids_tensor
@require_tf
......
......@@ -20,7 +20,7 @@ import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_modeling_common import floats_tensor, ids_tensor
from ..test_modeling_common import floats_tensor, ids_tensor
if is_torch_available():
......@@ -2312,8 +2312,8 @@ class GenerationIntegrationTests(unittest.TestCase):
@slow
def test_constrained_beam_search(self):
model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("../gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("../gpt2")
force_tokens = tokenizer.encode(" scared", return_tensors="pt").to(torch_device)[0]
force_tokens_2 = tokenizer.encode(" big weapons", return_tensors="pt").to(torch_device)[0]
......
......@@ -22,8 +22,8 @@ import transformers
from transformers import GPT2Config, GPT2Tokenizer, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, slow
from .test_generation_flax_utils import FlaxGenerationTesterMixin
from .test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ..generation.test_generation_flax_utils import FlaxGenerationTesterMixin
from ..test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
......
......@@ -21,9 +21,9 @@ import unittest
from transformers import GPT2Config, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_generation_utils import GenerationTesterMixin
from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ..generation.test_generation_utils import GenerationTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
......
......@@ -18,9 +18,9 @@ import unittest
from transformers import GPT2Config, is_tf_available
from transformers.testing_utils import require_tf, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from .test_modeling_tf_core import TFCoreModelTesterMixin
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ..utils.test_modeling_tf_core import TFCoreModelTesterMixin
if is_tf_available():
......
......@@ -22,7 +22,7 @@ from transformers import GPT2Tokenizer, GPT2TokenizerFast
from transformers.models.gpt2.tokenization_gpt2 import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from .test_tokenization_common import TokenizerTesterMixin
from ..test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment