Unverified Commit a5737779 authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Update repo to isort v5 (#6686)

* Run new isort

* More changes

* Update CI, CONTRIBUTING and benchmarks
parent d329c9b0
......@@ -8,10 +8,7 @@ from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import (
PyTorchBenchmarkArguments,
PyTorchBenchmark,
)
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
......
......@@ -9,6 +9,7 @@ from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
......
......@@ -20,7 +20,6 @@ import unittest
import requests
from requests.exceptions import HTTPError
from transformers.hf_api import HfApi, HfFolder, ModelInfo, PresignedUrl, S3Obj
......
......@@ -26,13 +26,13 @@ from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention
if is_torch_available():
from transformers import (
AlbertConfig,
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
AlbertModel,
)
from transformers.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
......
......@@ -23,42 +23,42 @@ from transformers.testing_utils import DUMMY_UNKWOWN_IDENTIFIER, SMALL_MODEL_IDE
if is_torch_available():
from transformers import (
AutoConfig,
BertConfig,
GPT2Config,
T5Config,
AutoModel,
BertModel,
AutoModelForPreTraining,
BertForPreTraining,
AutoModelForCausalLM,
GPT2LMHeadModel,
AutoModelWithLMHead,
AutoModelForMaskedLM,
BertForMaskedLM,
RobertaForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
T5ForConditionalGeneration,
AutoModelForSequenceClassification,
BertForSequenceClassification,
AutoModelForQuestionAnswering,
BertForQuestionAnswering,
AutoModelForTokenClassification,
AutoModelWithLMHead,
BertConfig,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertModel,
GPT2Config,
GPT2LMHeadModel,
RobertaForMaskedLM,
T5Config,
T5ForConditionalGeneration,
)
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_auto import (
MODEL_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
)
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
......
......@@ -28,24 +28,25 @@ from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
AutoModel,
AutoModelForSequenceClassification,
AutoTokenizer,
BartModel,
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartForQuestionAnswering,
BartConfig,
BartForSequenceClassification,
BartModel,
BartTokenizer,
BartTokenizerFast,
pipeline,
)
from transformers.modeling_bart import (
shift_tokens_right,
invert_mask,
_prepare_bart_decoder_inputs,
SinusoidalPositionalEmbedding,
_prepare_bart_decoder_inputs,
invert_mask,
shift_tokens_right,
)
PGE_ARTICLE = """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."""
......
......@@ -21,6 +21,7 @@ from transformers.testing_utils import require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import CamembertModel
......
......@@ -29,19 +29,19 @@ if is_torch_available():
import torch
from transformers import (
AdaptiveEmbedding,
PretrainedConfig,
PreTrainedModel,
BertConfig,
BertModel,
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
AdaptiveEmbedding,
BertConfig,
BertModel,
PretrainedConfig,
PreTrainedModel,
top_k_top_p_filtering,
)
......
......@@ -24,7 +24,8 @@ from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention
if is_torch_available():
import torch
from transformers import CTRLConfig, CTRLModel, CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLLMHeadModel
from transformers import CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLConfig, CTRLLMHeadModel, CTRLModel
class CTRLModelTester:
......
......@@ -25,14 +25,14 @@ from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention
if is_torch_available():
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertConfig,
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForTokenClassification,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForTokenClassification,
DistilBertModel,
)
class DistilBertModelTester(object):
......
......@@ -26,13 +26,13 @@ from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention
if is_torch_available():
from transformers import (
ElectraConfig,
ElectraModel,
ElectraForMaskedLM,
ElectraForTokenClassification,
ElectraForPreTraining,
ElectraForMultipleChoice,
ElectraForSequenceClassification,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
)
from transformers.modeling_electra import ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST
......
......@@ -27,17 +27,18 @@ from .test_modeling_roberta import RobertaModelTester
if is_torch_available():
import numpy as np
import torch
from transformers import (
BertModel,
BertLMHeadModel,
BertModel,
EncoderDecoderConfig,
EncoderDecoderModel,
GPT2LMHeadModel,
RobertaModel,
RobertaForCausalLM,
EncoderDecoderModel,
EncoderDecoderConfig,
RobertaModel,
)
import numpy as np
import torch
@require_torch
......
......@@ -26,13 +26,13 @@ from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention
if is_torch_available():
from transformers import (
FlaubertConfig,
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
......
......@@ -25,12 +25,13 @@ from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, r
if is_torch_available():
import torch
from transformers import (
GPT2Config,
GPT2Model,
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST,
GPT2LMHeadModel,
GPT2Config,
GPT2DoubleHeadsModel,
GPT2LMHeadModel,
GPT2Model,
)
......
......@@ -25,14 +25,15 @@ from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention
if is_torch_available():
import torch
from transformers import (
LongformerConfig,
LongformerModel,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerForQuestionAnswering,
LongformerForMultipleChoice,
LongformerModel,
LongformerSelfAttention,
)
......
......@@ -24,18 +24,19 @@ from transformers.testing_utils import require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import (
AutoTokenizer,
MarianConfig,
AutoConfig,
AutoModelWithLMHead,
MarianTokenizer,
AutoTokenizer,
MarianConfig,
MarianMTModel,
MarianTokenizer,
)
from transformers.convert_marian_to_pytorch import (
ORG_NAME,
convert_hf_name_to_opus_name,
convert_opus_name_to_hf_name,
ORG_NAME,
)
from transformers.pipelines import TranslationPipeline
......
......@@ -9,12 +9,13 @@ from .test_modeling_bart import TOLERANCE, _assert_tensors_equal, _long_tensor
if is_torch_available():
import torch
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
BatchEncoding,
MBartConfig,
MBartForConditionalGeneration,
BatchEncoding,
AutoTokenizer,
)
......
......@@ -25,16 +25,17 @@ from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, r
if is_torch_available():
import torch
from transformers import (
MobileBertConfig,
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertForMultipleChoice,
MobileBertModel,
)
......
......@@ -25,12 +25,13 @@ from .test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import (
OpenAIGPTConfig,
OpenAIGPTModel,
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTLMHeadModel,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
......
......@@ -23,18 +23,19 @@ from .test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, r
if is_torch_available():
import torch
from transformers import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerConfig,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerForSequenceClassification,
ReformerTokenizer,
ReformerLayer,
ReformerForQuestionAnswering,
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
import torch
class ReformerModelTester:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment