"web/vscode:/vscode.git/clone" did not exist on "16a493a19042227baadd939fc095305716ae58db"
Commit 783a6169 authored by Aymeric Augustin's avatar Aymeric Augustin
Browse files

Fix F401 flake8 warning (x88 / 116).

This change is mostly autogenerated with:

    $ python -m autoflake --in-place --recursive --remove-all-unused-imports --ignore-init-module-imports examples templates transformers utils hubconf.py setup.py

I made minor changes in the generated diff.
parent 80327a13
...@@ -35,10 +35,6 @@ if is_tf_available(): ...@@ -35,10 +35,6 @@ if is_tf_available():
TFAutoModelForQuestionAnswering, TFAutoModelForQuestionAnswering,
TFBertForQuestionAnswering, TFBertForQuestionAnswering,
) )
from transformers.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP
from .modeling_common_test import CommonTestCases, ids_tensor
from .configuration_common_test import ConfigTester
@require_tf @require_tf
......
...@@ -24,13 +24,12 @@ import unittest ...@@ -24,13 +24,12 @@ import unittest
from transformers import is_tf_available, is_torch_available from transformers import is_tf_available, is_torch_available
from .utils import require_tf, slow from .utils import require_tf
if is_tf_available(): if is_tf_available():
import tensorflow as tf import tensorflow as tf
import numpy as np import numpy as np
from transformers import TFPreTrainedModel
# from transformers.modeling_bert import BertModel, BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP # from transformers.modeling_bert import BertModel, BertConfig, BERT_PRETRAINED_MODEL_ARCHIVE_MAP
......
...@@ -24,7 +24,6 @@ from .utils import CACHE_DIR, require_tf, slow ...@@ -24,7 +24,6 @@ from .utils import CACHE_DIR, require_tf, slow
if is_tf_available(): if is_tf_available():
import tensorflow as tf
from transformers.modeling_tf_ctrl import TFCTRLModel, TFCTRLLMHeadModel, TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP from transformers.modeling_tf_ctrl import TFCTRLModel, TFCTRLLMHeadModel, TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP
......
...@@ -20,11 +20,10 @@ from transformers import DistilBertConfig, is_tf_available ...@@ -20,11 +20,10 @@ from transformers import DistilBertConfig, is_tf_available
from .configuration_common_test import ConfigTester from .configuration_common_test import ConfigTester
from .modeling_tf_common_test import TFCommonTestCases, ids_tensor from .modeling_tf_common_test import TFCommonTestCases, ids_tensor
from .utils import CACHE_DIR, require_tf, slow from .utils import require_tf
if is_tf_available(): if is_tf_available():
import tensorflow as tf
from transformers.modeling_tf_distilbert import ( from transformers.modeling_tf_distilbert import (
TFDistilBertModel, TFDistilBertModel,
TFDistilBertForMaskedLM, TFDistilBertForMaskedLM,
......
...@@ -24,8 +24,7 @@ from .utils import CACHE_DIR, require_tf, slow ...@@ -24,8 +24,7 @@ from .utils import CACHE_DIR, require_tf, slow
if is_tf_available(): if is_tf_available():
import tensorflow as tf from transformers.modeling_tf_t5 import TFT5Model, TFT5WithLMHeadModel
from transformers.modeling_tf_t5 import TFT5Model, TFT5WithLMHeadModel, TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP
@require_tf @require_tf
......
...@@ -17,7 +17,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera ...@@ -17,7 +17,7 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import os import os
import unittest import unittest
from transformers.tokenization_albert import SPIECE_UNDERLINE, AlbertTokenizer from transformers.tokenization_albert import AlbertTokenizer
from .tokenization_tests_commons import CommonTestCases from .tokenization_tests_commons import CommonTestCases
......
...@@ -19,7 +19,6 @@ import unittest ...@@ -19,7 +19,6 @@ import unittest
from transformers.tokenization_distilbert import DistilBertTokenizer from transformers.tokenization_distilbert import DistilBertTokenizer
from .tokenization_bert_test import BertTokenizationTest from .tokenization_bert_test import BertTokenizationTest
from .tokenization_tests_commons import CommonTestCases
from .utils import slow from .utils import slow
......
...@@ -25,7 +25,6 @@ from .utils import require_torch ...@@ -25,7 +25,6 @@ from .utils import require_torch
if is_torch_available(): if is_torch_available():
import torch
from transformers.tokenization_transfo_xl import TransfoXLTokenizer, VOCAB_FILES_NAMES from transformers.tokenization_transfo_xl import TransfoXLTokenizer, VOCAB_FILES_NAMES
......
...@@ -24,7 +24,6 @@ import unicodedata ...@@ -24,7 +24,6 @@ import unicodedata
import six import six
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer, load_vocab from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer, load_vocab
from .tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
from __future__ import absolute_import, division, print_function, unicode_literals from __future__ import absolute_import, division, print_function, unicode_literals
import logging import logging
import unicodedata
from .tokenization_bert import BertTokenizer from .tokenization_bert import BertTokenizer
......
...@@ -17,8 +17,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera ...@@ -17,8 +17,6 @@ from __future__ import absolute_import, division, print_function, unicode_litera
import logging import logging
import regex as re
from .tokenization_gpt2 import GPT2Tokenizer from .tokenization_gpt2 import GPT2Tokenizer
......
...@@ -25,7 +25,6 @@ from io import open ...@@ -25,7 +25,6 @@ from io import open
import sacremoses as sm import sacremoses as sm
from .tokenization_bert import BasicTokenizer
from .tokenization_utils import PreTrainedTokenizer from .tokenization_utils import PreTrainedTokenizer
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment