Commit 158e82e0 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Sort imports with isort.

This is the result of:

    $ isort --recursive examples templates transformers utils hubconf.py setup.py
parent bc1715c1
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import, division, print_function
import unittest
......@@ -8,6 +6,7 @@ from transformers import is_tf_available
from .utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
......
import unittest
from typing import Iterable
from transformers import pipeline
from transformers.tests.utils import require_tf, require_torch
QA_FINETUNED_MODELS = {
("bert-base-uncased", "bert-large-uncased-whole-word-masking-finetuned-squad", None),
("bert-base-cased", "bert-large-cased-whole-word-masking-finetuned-squad", None),
......
......@@ -17,10 +17,11 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import os
import unittest
from transformers.tokenization_albert import AlbertTokenizer, SPIECE_UNDERLINE
from transformers.tokenization_albert import SPIECE_UNDERLINE, AlbertTokenizer
from .tokenization_tests_commons import CommonTestCases
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/spiece.model")
......
......@@ -12,18 +12,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import, division, print_function
import unittest
import shutil
import logging
import shutil
import unittest
from transformers import AutoTokenizer, BertTokenizer, AutoTokenizer, GPT2Tokenizer
from transformers import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertTokenizer,
GPT2Tokenizer,
)
from .utils import slow, SMALL_MODEL_IDENTIFIER
from .utils import SMALL_MODEL_IDENTIFIER, slow
class AutoTokenizerTest(unittest.TestCase):
......
......@@ -20,14 +20,14 @@ from io import open
from transformers.tokenization_bert import WordpieceTokenizer
from transformers.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
MecabTokenizer,
CharacterTokenizer,
VOCAB_FILES_NAMES,
MecabTokenizer,
)
from .tokenization_tests_commons import CommonTestCases
from .utils import slow, custom_tokenizers
from .utils import custom_tokenizers, slow
@custom_tokenizers
......
......@@ -19,13 +19,13 @@ import unittest
from io import open
from transformers.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
VOCAB_FILES_NAMES,
)
from .tokenization_tests_commons import CommonTestCases
......
......@@ -13,12 +13,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import unittest
import json
from io import open
from transformers.tokenization_ctrl import CTRLTokenizer, VOCAB_FILES_NAMES
from transformers.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from .tokenization_tests_commons import CommonTestCases
......
......@@ -20,8 +20,8 @@ from io import open
from transformers.tokenization_distilbert import DistilBertTokenizer
from .tokenization_tests_commons import CommonTestCases
from .tokenization_bert_test import BertTokenizationTest
from .tokenization_tests_commons import CommonTestCases
from .utils import slow
......
......@@ -14,12 +14,12 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import unittest
import json
from io import open
from transformers.tokenization_gpt2 import GPT2Tokenizer, VOCAB_FILES_NAMES
from transformers.tokenization_gpt2 import VOCAB_FILES_NAMES, GPT2Tokenizer
from .tokenization_tests_commons import CommonTestCases
......
......@@ -14,11 +14,11 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import unittest
import json
from transformers.tokenization_openai import OpenAIGPTTokenizer, VOCAB_FILES_NAMES
from transformers.tokenization_openai import VOCAB_FILES_NAMES, OpenAIGPTTokenizer
from .tokenization_tests_commons import CommonTestCases
......
......@@ -14,12 +14,13 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import json
import os
import unittest
from io import open
from transformers.tokenization_roberta import RobertaTokenizer, VOCAB_FILES_NAMES
from transformers.tokenization_roberta import VOCAB_FILES_NAMES, RobertaTokenizer
from .tokenization_tests_commons import CommonTestCases
from .utils import slow
......
......@@ -22,6 +22,7 @@ from transformers.tokenization_xlnet import SPIECE_UNDERLINE
from .tokenization_tests_commons import CommonTestCases
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
......
......@@ -15,11 +15,12 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
import sys
from io import open
import tempfile
import shutil
import unittest
from io import open
if sys.version_info[0] == 2:
import cPickle as pickle
......
......@@ -20,13 +20,14 @@ from io import open
from transformers import is_torch_available
from .tokenization_tests_commons import CommonTestCases
from .utils import require_torch
if is_torch_available():
import torch
from transformers.tokenization_transfo_xl import TransfoXLTokenizer, VOCAB_FILES_NAMES
from .tokenization_tests_commons import CommonTestCases
from .utils import require_torch
@require_torch
class TransfoXLTokenizationTest(CommonTestCases.CommonTokenizerTester):
......
......@@ -12,11 +12,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import, division, print_function
import unittest
import six
from transformers import PreTrainedTokenizer
......
......@@ -14,11 +14,11 @@
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import unittest
import json
from transformers.tokenization_xlm import XLMTokenizer, VOCAB_FILES_NAMES
from transformers.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from .tokenization_tests_commons import CommonTestCases
from .utils import slow
......
......@@ -17,11 +17,12 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import os
import unittest
from transformers.tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE
from transformers.tokenization_xlnet import SPIECE_UNDERLINE, XLNetTokenizer
from .tokenization_tests_commons import CommonTestCases
from .utils import slow
SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model")
......
import os
import unittest
import tempfile
import unittest
from distutils.util import strtobool
from transformers.file_utils import _tf_available, _torch_available
......
......@@ -15,13 +15,16 @@
""" Tokenization classes for ALBERT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
from .tokenization_utils import PreTrainedTokenizer
import logging
import unicodedata
import six
import os
import unicodedata
from shutil import copyfile
import six
from .tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"}
......
......@@ -18,20 +18,21 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import logging
from .tokenization_albert import AlbertTokenizer
from .tokenization_bert import BertTokenizer
from .tokenization_bert_japanese import BertJapaneseTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_transfo_xl import TransfoXLTokenizer
from .tokenization_xlnet import XLNetTokenizer
from .tokenization_xlm import XLMTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_distilbert import DistilBertTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_albert import AlbertTokenizer
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLTokenizer
from .tokenization_xlm import XLMTokenizer
from .tokenization_xlm_roberta import XLMRobertaTokenizer
from .tokenization_xlnet import XLNetTokenizer
logger = logging.getLogger(__name__)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment