Commit 783a6169 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Fix F401 flake8 warning (x88 / 116).

This change is mostly autogenerated with:

    $ python -m autoflake --in-place --recursive --remove-all-unused-imports --ignore-init-module-imports examples templates transformers utils hubconf.py setup.py

I made minor changes in the generated diff.
parent 80327a13
......@@ -19,14 +19,13 @@ import math
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import AdamW
from torch.utils.data import BatchSampler, DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from tqdm import tqdm
import psutil
from grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups
......
......@@ -20,7 +20,7 @@ import argparse
import torch
from transformers import BertForMaskedLM, GPT2LMHeadModel, RobertaForMaskedLM
from transformers import GPT2LMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
......
......@@ -20,7 +20,7 @@ import argparse
import torch
from transformers import BertForMaskedLM, RobertaForMaskedLM
from transformers import BertForMaskedLM
if __name__ == "__main__":
......
......@@ -26,8 +26,7 @@ from datetime import datetime
import numpy as np
import torch
from torch.nn import CrossEntropyLoss, MSELoss
from torch.utils.data import DataLoader, SequentialSampler, Subset, TensorDataset
from torch.utils.data import DataLoader, SequentialSampler, Subset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
......
......@@ -26,7 +26,7 @@ import timeit
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
......
# coding=utf-8
import _pickle as pickle
import collections
import datetime
import glob
......
......@@ -18,8 +18,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import logging
import six
from .configuration_utils import PretrainedConfig
......
......@@ -21,10 +21,8 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import logging
import numpy as np
import tensorflow as tf
from .configuration_xxx import XxxConfig
......
......@@ -20,7 +20,6 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import logging
import os
......@@ -30,7 +29,7 @@ from torch.nn import CrossEntropyLoss, MSELoss
from .configuration_xxx import XxxConfig
from .file_utils import add_start_docstrings
from .modeling_utils import PreTrainedModel, prune_linear_layer
from .modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__)
......
......@@ -24,7 +24,6 @@ from .utils import CACHE_DIR, require_tf, slow
if is_tf_available():
import tensorflow as tf
from transformers.modeling_tf_xxx import (
TFXxxModel,
TFXxxForMaskedLM,
......
......@@ -19,7 +19,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import collections
import logging
import os
import unicodedata
from io import open
from .tokenization_utils import PreTrainedTokenizer
......
from argparse import ArgumentParser, Namespace
from logging import getLogger
from transformers import AutoModel, AutoTokenizer
from transformers.commands import BaseTransformersCLICommand
......
......@@ -18,8 +18,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import logging
import six
from .configuration_utils import PretrainedConfig
......
......@@ -20,8 +20,6 @@ import argparse
import logging
import os
import tensorflow as tf
from transformers import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
......
......@@ -20,7 +20,6 @@ import argparse
import logging
import pathlib
import numpy as np
import torch
from packaging import version
......
......@@ -16,9 +16,7 @@ import re
import string
from io import open
from tqdm import tqdm
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
from transformers.tokenization_bert import BasicTokenizer
logger = logging.getLogger(__name__)
......
......@@ -8,8 +8,8 @@ import numpy as np
from tqdm import tqdm
from ...file_utils import is_tf_available, is_torch_available
from ...tokenization_bert import BasicTokenizer, whitespace_tokenize
from .utils import DataProcessor, InputExample, InputFeatures
from ...tokenization_bert import whitespace_tokenize
from .utils import DataProcessor
if is_torch_available():
......
......@@ -21,7 +21,6 @@ from typing import List
import requests
import six
from requests.exceptions import HTTPError
from tqdm import tqdm
......
......@@ -32,7 +32,6 @@ from .configuration_auto import (
XLMRobertaConfig,
XLNetConfig,
)
from .file_utils import add_start_docstrings
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
AlbertForMaskedLM,
......@@ -76,7 +75,6 @@ from .modeling_roberta import (
)
from .modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_MAP, T5Model, T5WithLMHeadModel
from .modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP, TransfoXLLMHeadModel, TransfoXLModel
from .modeling_utils import PreTrainedModel, SequenceSummary
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_MAP,
XLMForQuestionAnswering,
......
......@@ -23,11 +23,10 @@ import numpy as np
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings
from .modeling_utils import Conv1D, PreTrainedModel, SequenceSummary, prune_conv1d_layer
from .modeling_utils import Conv1D, PreTrainedModel
logger = logging.getLogger(__name__)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment