Commit 783a6169 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Fix F401 flake8 warning (x88 / 116).

This change is mostly autogenerated with:

    $ python -m autoflake --in-place --recursive --remove-all-unused-imports --ignore-init-module-imports examples templates transformers utils hubconf.py setup.py

I made minor changes in the generated diff.
parent 80327a13
......@@ -19,7 +19,6 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import itertools
import logging
import math
......
......@@ -21,7 +21,6 @@ import os
import torch
from torch import nn
from tqdm import trange
from .modeling_auto import AutoModel, AutoModelWithLMHead
......
......@@ -24,7 +24,6 @@ import os
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .configuration_gpt2 import GPT2Config
from .file_utils import add_start_docstrings
......@@ -47,7 +46,6 @@ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
......
......@@ -26,7 +26,6 @@ from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings
......
......@@ -25,7 +25,7 @@ import os
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import CrossEntropyLoss
from .configuration_t5 import T5Config
from .file_utils import DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings
......
......@@ -29,7 +29,6 @@ from .configuration_auto import (
XLMConfig,
XLNetConfig,
)
from .file_utils import add_start_docstrings
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP,
TFAlbertForMaskedLM,
......
......@@ -24,7 +24,7 @@ import tensorflow as tf
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, get_initializer, shape_list
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, shape_list
logger = logging.getLogger(__name__)
......
......@@ -16,7 +16,6 @@
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import itertools
import logging
import math
......
......@@ -75,8 +75,8 @@ def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_i
""" Load pytorch checkpoints in a TF 2.0 model
"""
try:
import tensorflow as tf
import torch
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError as e:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
......@@ -109,8 +109,8 @@ def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, a
""" Load pytorch state_dict in a TF 2.0 model.
"""
try:
import torch
import tensorflow as tf
import torch # noqa: F401
import tensorflow as tf # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError as e:
logger.error(
......@@ -208,8 +208,8 @@ def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs
(see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf
import torch
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError as e:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
......@@ -250,8 +250,8 @@ def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=F
""" Load TF2.0 symbolic weights in a PyTorch model
"""
try:
import tensorflow as tf
import torch
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError as e:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
......
......@@ -23,7 +23,7 @@ import tensorflow as tf
from .configuration_roberta import RobertaConfig
from .file_utils import add_start_docstrings
from .modeling_tf_bert import TFBertEmbeddings, TFBertMainLayer, gelu, gelu_new
from .modeling_tf_bert import TFBertEmbeddings, TFBertMainLayer, gelu
from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list
......
......@@ -20,13 +20,12 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import logging
import numpy as np
import tensorflow as tf
from .configuration_transfo_xl import TransfoXLConfig
from .file_utils import add_start_docstrings
from .modeling_tf_transfo_xl_utilities import TFAdaptiveSoftmaxMask
from .modeling_tf_utils import TFConv1D, TFPreTrainedModel, TFSequenceSummary, get_initializer, shape_list
from .modeling_tf_utils import TFPreTrainedModel, get_initializer, shape_list
logger = logging.getLogger(__name__)
......
......@@ -17,7 +17,6 @@
"""
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import shape_list
......
......@@ -25,13 +25,11 @@ import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .configuration_transfo_xl import TransfoXLConfig
from .file_utils import add_start_docstrings
from .modeling_transfo_xl_utilities import LogUniformSampler, ProjectedAdaptiveLogSoftmax, sample_logits
from .modeling_utils import Conv1D, PreTrainedModel, SequenceSummary, prune_conv1d_layer
from .modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__)
......
......@@ -18,7 +18,6 @@
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
......
......@@ -20,7 +20,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import logging
import os
import six
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
......
......@@ -22,7 +22,6 @@ import pickle
import sys
from abc import ABC, abstractmethod
from contextlib import contextmanager
from itertools import groupby
from os.path import abspath, exists
from typing import Dict, List, Optional, Tuple, Union
......
......@@ -37,9 +37,6 @@ if is_torch_available():
)
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_common_test import CommonTestCases, ids_tensor
from .configuration_common_test import ConfigTester
@require_torch
class AutoModelTest(unittest.TestCase):
......
......@@ -20,7 +20,7 @@ from transformers import is_torch_available
from .configuration_common_test import ConfigTester
from .modeling_common_test import CommonTestCases, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
from .utils import require_torch, torch_device
if is_torch_available():
......
......@@ -19,8 +19,8 @@ import unittest
from transformers import is_torch_available
from .configuration_common_test import ConfigTester
from .modeling_common_test import CommonTestCases, floats_tensor, ids_tensor
from .utils import CACHE_DIR, require_torch, slow, torch_device
from .modeling_common_test import CommonTestCases, ids_tensor
from .utils import CACHE_DIR, require_torch, slow
if is_torch_available():
......
......@@ -24,7 +24,6 @@ from .utils import CACHE_DIR, require_tf, slow
if is_tf_available():
import tensorflow as tf
from transformers.modeling_tf_albert import (
TFAlbertModel,
TFAlbertForMaskedLM,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment