Commit 939148b0 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Fix F401 flake8 warning (x28).

Do manually what autoflake couldn't manage.
parent 783a6169
...@@ -31,18 +31,6 @@ from torch.utils.data.distributed import DistributedSampler ...@@ -31,18 +31,6 @@ from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm from tqdm import tqdm
from run_glue import ALL_MODELS, MODEL_CLASSES, load_and_cache_examples, set_seed from run_glue import ALL_MODELS, MODEL_CLASSES, load_and_cache_examples, set_seed
from transformers import (
WEIGHTS_NAME,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
)
from transformers import glue_compute_metrics as compute_metrics from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_output_modes as output_modes from transformers import glue_output_modes as output_modes
from transformers import glue_processors as processors from transformers import glue_processors as processors
......
...@@ -30,7 +30,6 @@ if is_tf_available(): ...@@ -30,7 +30,6 @@ if is_tf_available():
TFXxxForSequenceClassification, TFXxxForSequenceClassification,
TFXxxForTokenClassification, TFXxxForTokenClassification,
TFXxxForQuestionAnswering, TFXxxForQuestionAnswering,
TF_XXX_PRETRAINED_MODEL_ARCHIVE_MAP,
) )
......
...@@ -28,12 +28,9 @@ if is_torch_available(): ...@@ -28,12 +28,9 @@ if is_torch_available():
XxxConfig, XxxConfig,
XxxModel, XxxModel,
XxxForMaskedLM, XxxForMaskedLM,
XxxForNextSentencePrediction,
XxxForPreTraining,
XxxForQuestionAnswering, XxxForQuestionAnswering,
XxxForSequenceClassification, XxxForSequenceClassification,
XxxForTokenClassification, XxxForTokenClassification,
XxxForMultipleChoice,
) )
from transformers.modeling_xxx import XXX_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_xxx import XXX_PRETRAINED_MODEL_ARCHIVE_MAP
......
...@@ -47,7 +47,6 @@ from transformers import ( ...@@ -47,7 +47,6 @@ from transformers import (
TFCTRLLMHeadModel, TFCTRLLMHeadModel,
TFDistilBertForMaskedLM, TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering, TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFGPT2LMHeadModel, TFGPT2LMHeadModel,
TFOpenAIGPTLMHeadModel, TFOpenAIGPTLMHeadModel,
TFRobertaForMaskedLM, TFRobertaForMaskedLM,
......
...@@ -28,20 +28,13 @@ from fairseq.models.roberta import RobertaModel as FairseqRobertaModel ...@@ -28,20 +28,13 @@ from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer from fairseq.modules import TransformerSentenceEncoderLayer
from transformers.modeling_bert import ( from transformers.modeling_bert import (
BertConfig, BertConfig,
BertEncoder,
BertIntermediate, BertIntermediate,
BertLayer, BertLayer,
BertModel,
BertOutput, BertOutput,
BertSelfAttention, BertSelfAttention,
BertSelfOutput, BertSelfOutput,
) )
from transformers.modeling_roberta import ( from transformers.modeling_roberta import RobertaForMaskedLM, RobertaForSequenceClassification
RobertaEmbeddings,
RobertaForMaskedLM,
RobertaForSequenceClassification,
RobertaModel,
)
if version.parse(fairseq.__version__) < version.parse("0.9.0"): if version.parse(fairseq.__version__) < version.parse("0.9.0"):
......
...@@ -50,7 +50,6 @@ from .modeling_bert import ( ...@@ -50,7 +50,6 @@ from .modeling_bert import (
from .modeling_camembert import ( from .modeling_camembert import (
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
CamembertForMaskedLM, CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForSequenceClassification, CamembertForSequenceClassification,
CamembertForTokenClassification, CamembertForTokenClassification,
CamembertModel, CamembertModel,
...@@ -85,7 +84,6 @@ from .modeling_xlm import ( ...@@ -85,7 +84,6 @@ from .modeling_xlm import (
from .modeling_xlm_roberta import ( from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
XLMRobertaForMaskedLM, XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForSequenceClassification, XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification, XLMRobertaForTokenClassification,
XLMRobertaModel, XLMRobertaModel,
......
...@@ -25,15 +25,7 @@ import tensorflow as tf ...@@ -25,15 +25,7 @@ import tensorflow as tf
from tensorflow.python.keras.saving import hdf5_format from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig from .configuration_utils import PretrainedConfig
from .file_utils import ( from .file_utils import DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, cached_path, hf_bucket_url, is_remote_url
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_remote_url,
)
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
......
...@@ -25,14 +25,7 @@ import tensorflow as tf ...@@ -25,14 +25,7 @@ import tensorflow as tf
from .configuration_xlm import XLMConfig from .configuration_xlm import XLMConfig
from .file_utils import add_start_docstrings from .file_utils import add_start_docstrings
from .modeling_tf_utils import ( from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, get_initializer, shape_list
DUMMY_INPUTS,
TFPreTrainedModel,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
shape_list,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -28,14 +28,7 @@ from torch.nn import functional as F ...@@ -28,14 +28,7 @@ from torch.nn import functional as F
from .configuration_xlnet import XLNetConfig from .configuration_xlnet import XLNetConfig
from .file_utils import add_start_docstrings from .file_utils import add_start_docstrings
from .modeling_utils import ( from .modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary
PoolerAnswerClass,
PoolerEndLogits,
PoolerStartLogits,
PreTrainedModel,
SequenceSummary,
prune_linear_layer,
)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -41,9 +41,6 @@ if is_torch_available(): ...@@ -41,9 +41,6 @@ if is_torch_available():
BertModel, BertModel,
BertConfig, BertConfig,
BERT_PRETRAINED_MODEL_ARCHIVE_MAP, BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
GPT2LMHeadModel,
GPT2Config,
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP,
) )
if sys.version_info[0] == 2: if sys.version_info[0] == 2:
......
...@@ -34,7 +34,6 @@ if is_tf_available(): ...@@ -34,7 +34,6 @@ if is_tf_available():
TFBertForMultipleChoice, TFBertForMultipleChoice,
TFBertForTokenClassification, TFBertForTokenClassification,
TFBertForQuestionAnswering, TFBertForQuestionAnswering,
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment