Commit 158e82e0 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Sort imports with isort.

This is the result of:

    $ isort --recursive examples templates transformers utils hubconf.py setup.py
parent bc1715c1
......@@ -15,13 +15,14 @@
""" DistilBERT model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import json
import logging
import sys
from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
......
......@@ -24,6 +24,7 @@ from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
......
......@@ -19,6 +19,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import logging
logger = logging.getLogger(__name__)
......
......@@ -24,6 +24,7 @@ from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
......
......@@ -21,6 +21,7 @@ import logging
from .configuration_bert import BertConfig
logger = logging.getLogger(__name__)
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
......
......@@ -19,11 +19,13 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import json
import logging
import sys
import six
from io import open
import six
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
T5_PRETRAINED_CONFIG_ARCHIVE_MAP = {
......
......@@ -24,6 +24,7 @@ from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
......
......@@ -23,7 +23,8 @@ import logging
import os
from io import open
from .file_utils import CONFIG_NAME, cached_path, is_remote_url, hf_bucket_url
from .file_utils import CONFIG_NAME, cached_path, hf_bucket_url, is_remote_url
logger = logging.getLogger(__name__)
......
......@@ -22,6 +22,7 @@ from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
......
......@@ -21,6 +21,7 @@ import logging
from .configuration_roberta import RobertaConfig
logger = logging.getLogger(__name__)
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
......
......@@ -23,6 +23,7 @@ from io import open
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
......
......@@ -14,16 +14,15 @@
# limitations under the License.
"""Convert ALBERT checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import, division, print_function
import argparse
import logging
import torch
from transformers import AlbertConfig, AlbertForMaskedLM, load_tf_weights_in_albert
import logging
logging.basicConfig(level=logging.INFO)
......
......@@ -14,16 +14,15 @@
# limitations under the License.
"""Convert BERT checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import, division, print_function
import argparse
import logging
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
import logging
logging.basicConfig(level=logging.INFO)
......
......@@ -15,11 +15,13 @@
"""Convert Huggingface Pytorch checkpoint to Tensorflow checkpoint."""
import os
import argparse
import torch
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
......
......@@ -17,13 +17,13 @@
from __future__ import absolute_import, division, print_function
import argparse
import logging
from io import open
import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME, GPT2Config, GPT2Model, load_tf_weights_in_gpt2
import logging
logging.basicConfig(level=logging.INFO)
......
......@@ -17,13 +17,13 @@
from __future__ import absolute_import, division, print_function
import argparse
import logging
from io import open
import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME, OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
import logging
logging.basicConfig(level=logging.INFO)
......
......@@ -14,58 +14,59 @@
# limitations under the License.
""" Convert pytorch checkpoints to TensorFlow """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import, division, print_function
import os
import argparse
import tensorflow as tf
import logging
import os
from transformers import is_torch_available, cached_path
import tensorflow as tf
from transformers import (
load_pytorch_checkpoint_in_tf2_model,
BertConfig,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2Config,
TFGPT2LMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNetConfig,
TFXLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMConfig,
TFXLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaConfig,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BertConfig,
CTRLConfig,
DistilBertConfig,
GPT2Config,
OpenAIGPTConfig,
RobertaConfig,
T5Config,
TFAlbertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRLConfig,
TFCTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
TFAlbertForMaskedLM,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5Config,
TFGPT2LMHeadModel,
TFOpenAIGPTLMHeadModel,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFT5WithLMHeadModel,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TFTransfoXLLMHeadModel,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
XLMConfig,
XLNetConfig,
cached_path,
is_torch_available,
load_pytorch_checkpoint_in_tf2_model,
)
if is_torch_available():
import torch
import numpy as np
......@@ -158,8 +159,6 @@ else:
)
import logging
logging.basicConfig(level=logging.INFO)
MODEL_CLASSES = {
......
......@@ -18,16 +18,13 @@ from __future__ import absolute_import, division, print_function
import argparse
import logging
import numpy as np
import torch
import pathlib
import fairseq
import numpy as np
import torch
from packaging import version
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
import fairseq
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from transformers.modeling_bert import (
......@@ -47,6 +44,11 @@ from transformers.modeling_roberta import (
RobertaModel,
)
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
......
......@@ -14,16 +14,15 @@
# limitations under the License.
"""Convert T5 checkpoint."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import, division, print_function
import argparse
import logging
import torch
from transformers import T5Config, T5Model, load_tf_weights_in_t5
import logging
logging.basicConfig(level=logging.INFO)
......
......@@ -17,6 +17,7 @@
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import sys
from io import open
......@@ -24,17 +25,21 @@ from io import open
import torch
import transformers.tokenization_transfo_xl as data_utils
from transformers import CONFIG_NAME, WEIGHTS_NAME
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
TransfoXLConfig,
TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl,
)
from transformers.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import logging
logging.basicConfig(level=logging.INFO)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment