Commit 158e82e0 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Sort imports with isort.

This is the result of:

    $ isort --recursive examples templates transformers utils hubconf.py setup.py
parent bc1715c1
...@@ -20,11 +20,11 @@ ...@@ -20,11 +20,11 @@
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
import os import collections
import json import json
import math
import logging import logging
import collections import math
import os
import sys import sys
from io import open from io import open
...@@ -34,10 +34,11 @@ import torch.nn.functional as F ...@@ -34,10 +34,11 @@ import torch.nn.functional as F
from torch.nn import CrossEntropyLoss from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter from torch.nn.parameter import Parameter
from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from .configuration_transfo_xl import TransfoXLConfig from .configuration_transfo_xl import TransfoXLConfig
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax, sample_logits, LogUniformSampler
from .file_utils import add_start_docstrings from .file_utils import add_start_docstrings
from .modeling_transfo_xl_utilities import LogUniformSampler, ProjectedAdaptiveLogSoftmax, sample_logits
from .modeling_utils import Conv1D, PreTrainedModel, SequenceSummary, prune_conv1d_layer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -20,11 +20,11 @@ ...@@ -20,11 +20,11 @@
from collections import defaultdict from collections import defaultdict
import numpy as np import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
# CUDA_MAJOR = int(torch.version.cuda.split('.')[0]) # CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
# CUDA_MINOR = int(torch.version.cuda.split('.')[1]) # CUDA_MINOR = int(torch.version.cuda.split('.')[1])
......
...@@ -31,15 +31,16 @@ from torch.nn import functional as F ...@@ -31,15 +31,16 @@ from torch.nn import functional as F
from .configuration_utils import PretrainedConfig from .configuration_utils import PretrainedConfig
from .file_utils import ( from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME, TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME, TF_WEIGHTS_NAME,
WEIGHTS_NAME, WEIGHTS_NAME,
DUMMY_INPUTS,
cached_path, cached_path,
hf_bucket_url, hf_bucket_url,
is_remote_url, is_remote_url,
) )
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
try: try:
......
...@@ -16,20 +16,20 @@ ...@@ -16,20 +16,20 @@
""" """
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import logging import logging
import math import math
import itertools
import numpy as np import numpy as np
import torch import torch
from torch import nn from torch import nn
from torch.nn import functional as F
from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from .modeling_utils import PreTrainedModel, prune_linear_layer, SequenceSummary, SQuADHead
from .configuration_xlm import XLMConfig from .configuration_xlm import XLMConfig
from .file_utils import add_start_docstrings from .file_utils import add_start_docstrings
from .modeling_utils import PreTrainedModel, SequenceSummary, SQuADHead, prune_linear_layer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -19,15 +19,16 @@ from __future__ import absolute_import, division, print_function, unicode_litera ...@@ -19,15 +19,16 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import logging import logging
from .configuration_xlm_roberta import XLMRobertaConfig
from .file_utils import add_start_docstrings
from .modeling_roberta import ( from .modeling_roberta import (
RobertaModel,
RobertaForMaskedLM, RobertaForMaskedLM,
RobertaForSequenceClassification,
RobertaForMultipleChoice, RobertaForMultipleChoice,
RobertaForSequenceClassification,
RobertaForTokenClassification, RobertaForTokenClassification,
RobertaModel,
) )
from .configuration_xlm_roberta import XLMRobertaConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -26,19 +26,19 @@ from io import open ...@@ -26,19 +26,19 @@ from io import open
import torch import torch
from torch import nn from torch import nn
from torch.nn import functional as F
from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from .configuration_xlnet import XLNetConfig
from .file_utils import add_start_docstrings
from .modeling_utils import ( from .modeling_utils import (
PreTrainedModel,
prune_linear_layer,
SequenceSummary,
PoolerAnswerClass, PoolerAnswerClass,
PoolerEndLogits, PoolerEndLogits,
PoolerStartLogits, PoolerStartLogits,
PreTrainedModel,
SequenceSummary,
prune_linear_layer,
) )
from .configuration_xlnet import XLNetConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -21,6 +21,7 @@ import torch ...@@ -21,6 +21,7 @@ import torch
from torch.optim import Optimizer from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR from torch.optim.lr_scheduler import LambdaLR
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -14,9 +14,7 @@ ...@@ -14,9 +14,7 @@
# ============================================================================== # ==============================================================================
"""Functions and classes related to optimization (weight updates).""" """Functions and classes related to optimization (weight updates)."""
from __future__ import absolute_import from __future__ import absolute_import, division, print_function
from __future__ import division
from __future__ import print_function
import re import re
......
...@@ -14,36 +14,36 @@ ...@@ -14,36 +14,36 @@
# limitations under the License. # limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import csv import csv
import json import json
import logging
import os import os
import pickle import pickle
import logging import sys
import six
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from contextlib import contextmanager from contextlib import contextmanager
from itertools import groupby from itertools import groupby
from os.path import abspath, exists from os.path import abspath, exists
from typing import Union, Optional, Tuple, List, Dict from typing import Dict, List, Optional, Tuple, Union
import numpy as np import numpy as np
import six
from transformers import ( from transformers import (
ALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoConfig, AutoConfig,
AutoTokenizer, AutoTokenizer,
PreTrainedTokenizer, BasicTokenizer,
PretrainedConfig,
ModelCard, ModelCard,
PretrainedConfig,
PreTrainedTokenizer,
SquadExample, SquadExample,
squad_convert_examples_to_features,
is_tf_available, is_tf_available,
is_torch_available, is_torch_available,
BasicTokenizer, squad_convert_examples_to_features,
ALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
) )
if is_tf_available(): if is_tf_available():
import tensorflow as tf import tensorflow as tf
from transformers import ( from transformers import (
......
...@@ -12,15 +12,13 @@ ...@@ -12,15 +12,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import, division, print_function
from __future__ import division
from __future__ import print_function
import os
import json import json
import os
import tempfile import tempfile
import unittest import unittest
from .tokenization_tests_commons import TemporaryDirectory from .tokenization_tests_commons import TemporaryDirectory
......
...@@ -23,6 +23,7 @@ import six ...@@ -23,6 +23,7 @@ import six
from transformers.hf_api import HfApi, HfFolder, HTTPError, PresignedUrl, S3Obj from transformers.hf_api import HfApi, HfFolder, HTTPError, PresignedUrl, S3Obj
USER = "__DUMMY_TRANSFORMERS_USER__" USER = "__DUMMY_TRANSFORMERS_USER__"
PASS = "__DUMMY_TRANSFORMERS_PASS__" PASS = "__DUMMY_TRANSFORMERS_PASS__"
FILES = [ FILES = [
......
...@@ -14,11 +14,12 @@ ...@@ -14,11 +14,12 @@
# limitations under the License. # limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
import os
import json import json
import os
import unittest import unittest
from transformers.modelcard import ModelCard from transformers.modelcard import ModelCard
from .tokenization_tests_commons import TemporaryDirectory from .tokenization_tests_commons import TemporaryDirectory
......
...@@ -12,18 +12,17 @@ ...@@ -12,18 +12,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import, division, print_function
from __future__ import division
from __future__ import print_function
import unittest import unittest
from transformers import is_torch_available from transformers import is_torch_available
from .modeling_common_test import CommonTestCases, ids_tensor
from .configuration_common_test import ConfigTester from .configuration_common_test import ConfigTester
from .modeling_common_test import CommonTestCases, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device from .utils import CACHE_DIR, require_torch, slow, torch_device
if is_torch_available(): if is_torch_available():
from transformers import ( from transformers import (
AlbertConfig, AlbertConfig,
......
...@@ -12,17 +12,16 @@ ...@@ -12,17 +12,16 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import, division, print_function
from __future__ import division
from __future__ import print_function
import unittest
import shutil
import logging import logging
import shutil
import unittest
from transformers import is_torch_available from transformers import is_torch_available
from .utils import require_torch, slow, SMALL_MODEL_IDENTIFIER from .utils import SMALL_MODEL_IDENTIFIER, require_torch, slow
if is_torch_available(): if is_torch_available():
from transformers import ( from transformers import (
......
...@@ -12,18 +12,17 @@ ...@@ -12,18 +12,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import, division, print_function
from __future__ import division
from __future__ import print_function
import unittest import unittest
from transformers import is_torch_available from transformers import is_torch_available
from .modeling_common_test import CommonTestCases, ids_tensor, floats_tensor
from .configuration_common_test import ConfigTester from .configuration_common_test import ConfigTester
from .modeling_common_test import CommonTestCases, floats_tensor, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device from .utils import CACHE_DIR, require_torch, slow, torch_device
if is_torch_available(): if is_torch_available():
from transformers import ( from transformers import (
BertConfig, BertConfig,
......
...@@ -12,26 +12,24 @@ ...@@ -12,26 +12,24 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import, division, print_function
from __future__ import division
from __future__ import print_function
import copy import copy
import sys import json
import logging
import os.path import os.path
import random
import shutil import shutil
import sys
import tempfile import tempfile
import json
import random
import uuid
import unittest import unittest
import logging import uuid
from transformers import is_torch_available from transformers import is_torch_available
from .utils import CACHE_DIR, require_torch, slow, torch_device from .utils import CACHE_DIR, require_torch, slow, torch_device
if is_torch_available(): if is_torch_available():
import torch import torch
import numpy as np import numpy as np
......
...@@ -11,23 +11,22 @@ ...@@ -11,23 +11,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import, division, print_function
from __future__ import division
from __future__ import print_function
import unittest
import pdb import pdb
import unittest
from transformers import is_torch_available from transformers import is_torch_available
if is_torch_available():
from transformers import CTRLConfig, CTRLModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRLLMHeadModel
from .modeling_common_test import CommonTestCases, ids_tensor
from .configuration_common_test import ConfigTester from .configuration_common_test import ConfigTester
from .modeling_common_test import CommonTestCases, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device from .utils import CACHE_DIR, require_torch, slow, torch_device
if is_torch_available():
from transformers import CTRLConfig, CTRLModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRLLMHeadModel
@require_torch @require_torch
class CTRLModelTest(CommonTestCases.CommonModelTester): class CTRLModelTest(CommonTestCases.CommonModelTester):
......
...@@ -12,14 +12,17 @@ ...@@ -12,14 +12,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import, division, print_function
from __future__ import division
from __future__ import print_function
import unittest import unittest
from transformers import is_torch_available from transformers import is_torch_available
from .configuration_common_test import ConfigTester
from .modeling_common_test import CommonTestCases, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
if is_torch_available(): if is_torch_available():
from transformers import ( from transformers import (
DistilBertConfig, DistilBertConfig,
...@@ -30,10 +33,6 @@ if is_torch_available(): ...@@ -30,10 +33,6 @@ if is_torch_available():
DistilBertForSequenceClassification, DistilBertForSequenceClassification,
) )
from .modeling_common_test import CommonTestCases, ids_tensor
from .configuration_common_test import ConfigTester
from .utils import CACHE_DIR, require_torch, slow, torch_device
@require_torch @require_torch
class DistilBertModelTest(CommonTestCases.CommonModelTester): class DistilBertModelTest(CommonTestCases.CommonModelTester):
......
...@@ -17,8 +17,10 @@ import logging ...@@ -17,8 +17,10 @@ import logging
import unittest import unittest
from transformers import is_torch_available from transformers import is_torch_available
from .utils import require_torch, slow from .utils import require_torch, slow
if is_torch_available(): if is_torch_available():
from transformers import BertModel, BertForMaskedLM, Model2Model from transformers import BertModel, BertForMaskedLM, Model2Model
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
......
...@@ -12,14 +12,17 @@ ...@@ -12,14 +12,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import, division, print_function
from __future__ import division
from __future__ import print_function
import unittest import unittest
from transformers import is_torch_available from transformers import is_torch_available
from .configuration_common_test import ConfigTester
from .modeling_common_test import CommonTestCases, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
if is_torch_available(): if is_torch_available():
from transformers import ( from transformers import (
GPT2Config, GPT2Config,
...@@ -29,10 +32,6 @@ if is_torch_available(): ...@@ -29,10 +32,6 @@ if is_torch_available():
GPT2DoubleHeadsModel, GPT2DoubleHeadsModel,
) )
from .modeling_common_test import CommonTestCases, ids_tensor
from .configuration_common_test import ConfigTester
from .utils import CACHE_DIR, require_torch, slow, torch_device
@require_torch @require_torch
class GPT2ModelTest(CommonTestCases.CommonModelTester): class GPT2ModelTest(CommonTestCases.CommonModelTester):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment