Commit 939148b0 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Fix F401 flake8 warning (x28).

Do manually what autoflake couldn't manage.
parent 783a6169
......@@ -31,18 +31,6 @@ from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from run_glue import ALL_MODELS, MODEL_CLASSES, load_and_cache_examples, set_seed
from transformers import (
WEIGHTS_NAME,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
)
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors
......
......@@ -30,7 +30,6 @@ if is_tf_available():
TFXxxForSequenceClassification,
TFXxxForTokenClassification,
TFXxxForQuestionAnswering,
TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP,
)
......
......@@ -28,12 +28,9 @@ if is_torch_available():
XxxConfig,
XxxModel,
XxxForMaskedLM,
XxxForNextSentencePrediction,
XxxForPreTraining,
XxxForQuestionAnswering,
XxxForSequenceClassification,
XxxForTokenClassification,
XxxForMultipleChoice,
)
from transformers.modeling_xxx import XXX_PRETRAINED_MODEL_ARCHIVE_MAP
......
......@@ -47,7 +47,6 @@ from transformers import (
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFGPT2LMHeadModel,
TFOpenAIGPTLMHeadModel,
TFRobertaForMaskedLM,
......
......@@ -28,20 +28,13 @@ from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from transformers.modeling_bert import (
BertConfig,
BertEncoder,
BertIntermediate,
BertLayer,
BertModel,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.modeling_roberta import (
RobertaEmbeddings,
RobertaForMaskedLM,
RobertaForSequenceClassification,
RobertaModel,
)
from transformers.modeling_roberta import RobertaForMaskedLM, RobertaForSequenceClassification
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
......
......@@ -50,7 +50,6 @@ from .modeling_bert import (
from .modeling_camembert import (
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
......@@ -85,7 +84,6 @@ from .modeling_xlm import (
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
......
......@@ -25,15 +25,7 @@ import tensorflow as tf
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_remote_url,
)
from .file_utils import DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, cached_path, hf_bucket_url, is_remote_url
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
......
......@@ -25,14 +25,7 @@ import tensorflow as tf
from .configuration_xlm import XLMConfig
from .file_utils import add_start_docstrings
from .modeling_tf_utils import (
DUMMY_INPUTS,
TFPreTrainedModel,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
shape_list,
)
from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, get_initializer, shape_list
logger = logging.getLogger(__name__)
......
......@@ -28,14 +28,7 @@ from torch.nn import functional as F
from .configuration_xlnet import XLNetConfig
from .file_utils import add_start_docstrings
from .modeling_utils import (
PoolerAnswerClass,
PoolerEndLogits,
PoolerStartLogits,
PreTrainedModel,
SequenceSummary,
prune_linear_layer,
)
from .modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary
logger = logging.getLogger(__name__)
......
......@@ -41,9 +41,6 @@ if is_torch_available():
BertModel,
BertConfig,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
GPT2LMHeadModel,
GPT2Config,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
)
if sys.version_info[0] == 2:
......
......@@ -34,7 +34,6 @@ if is_tf_available():
TFBertForMultipleChoice,
TFBertForTokenClassification,
TFBertForQuestionAnswering,
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment