Unverified Commit a5737779 authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Update repo to isort v5 (#6686)

* Run new isort

* More changes

* Update CI, CONTRIBUTING and benchmarks
parent d329c9b0
...@@ -6,9 +6,10 @@ from enum import Enum ...@@ -6,9 +6,10 @@ from enum import Enum
from typing import List, Optional, Union from typing import List, Optional, Union
import torch import torch
from filelock import FileLock
from torch.utils.data.dataset import Dataset from torch.utils.data.dataset import Dataset
from filelock import FileLock
from ...tokenization_bart import BartTokenizer, BartTokenizerFast from ...tokenization_bart import BartTokenizer, BartTokenizerFast
from ...tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast from ...tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils import PreTrainedTokenizer
......
...@@ -4,9 +4,10 @@ import pickle ...@@ -4,9 +4,10 @@ import pickle
import time import time
import torch import torch
from filelock import FileLock
from torch.utils.data.dataset import Dataset from torch.utils.data.dataset import Dataset
from filelock import FileLock
from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils import PreTrainedTokenizer
......
...@@ -6,9 +6,10 @@ from enum import Enum ...@@ -6,9 +6,10 @@ from enum import Enum
from typing import Dict, List, Optional, Union from typing import Dict, List, Optional, Union
import torch import torch
from filelock import FileLock
from torch.utils.data.dataset import Dataset from torch.utils.data.dataset import Dataset
from filelock import FileLock
from ...modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils import PreTrainedTokenizer
from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
......
...@@ -15,8 +15,9 @@ ...@@ -15,8 +15,9 @@
# limitations under the License. # limitations under the License.
try: try:
from sklearn.metrics import f1_score, matthews_corrcoef
from scipy.stats import pearsonr, spearmanr from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
_has_sklearn = True _has_sklearn = True
except (AttributeError, ImportError): except (AttributeError, ImportError):
......
...@@ -11,10 +11,7 @@ from transformers.testing_utils import require_torch ...@@ -11,10 +11,7 @@ from transformers.testing_utils import require_torch
if is_torch_available(): if is_torch_available():
import torch import torch
from transformers import ( from transformers import MarianConfig, MarianMTModel
MarianConfig,
MarianMTModel,
)
@require_torch @require_torch
......
...@@ -24,9 +24,10 @@ from urllib.parse import urlparse ...@@ -24,9 +24,10 @@ from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile from zipfile import ZipFile, is_zipfile
import numpy as np import numpy as np
from tqdm.auto import tqdm
import requests import requests
from filelock import FileLock from filelock import FileLock
from tqdm.auto import tqdm
from . import __version__ from . import __version__
......
...@@ -19,9 +19,10 @@ import os ...@@ -19,9 +19,10 @@ import os
from os.path import expanduser from os.path import expanduser
from typing import Dict, List, Optional, Tuple from typing import Dict, List, Optional, Tuple
import requests
from tqdm import tqdm from tqdm import tqdm
import requests
ENDPOINT = "https://huggingface.co" ENDPOINT = "https://huggingface.co"
......
...@@ -50,6 +50,7 @@ def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_ ...@@ -50,6 +50,7 @@ def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_
""" """
try: try:
import re import re
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
except ImportError: except ImportError:
......
...@@ -65,6 +65,7 @@ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): ...@@ -65,6 +65,7 @@ def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
""" """
try: try:
import re import re
import tensorflow as tf import tensorflow as tf
except ImportError: except ImportError:
logger.error( logger.error(
......
...@@ -68,6 +68,7 @@ def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path): ...@@ -68,6 +68,7 @@ def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):
""" """
try: try:
import re import re
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
except ImportError: except ImportError:
......
...@@ -62,6 +62,7 @@ def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path): ...@@ -62,6 +62,7 @@ def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here) """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
""" """
import re import re
import numpy as np import numpy as np
if ".ckpt" in openai_checkpoint_folder_path: if ".ckpt" in openai_checkpoint_folder_path:
......
...@@ -66,6 +66,7 @@ def load_tf_weights_in_t5(model, config, tf_checkpoint_path): ...@@ -66,6 +66,7 @@ def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
""" """
try: try:
import re import re
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
except ImportError: except ImportError:
......
...@@ -108,8 +108,8 @@ def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, a ...@@ -108,8 +108,8 @@ def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, a
""" Load pytorch state_dict in a TF 2.0 model. """ Load pytorch state_dict in a TF 2.0 model.
""" """
try: try:
import torch # noqa: F401
import tensorflow as tf # noqa: F401 import tensorflow as tf # noqa: F401
import torch # noqa: F401
from tensorflow.python.keras import backend as K from tensorflow.python.keras import backend as K
except ImportError: except ImportError:
logger.error( logger.error(
......
...@@ -43,39 +43,41 @@ from .tokenization_utils_base import BatchEncoding, PaddingStrategy ...@@ -43,39 +43,41 @@ from .tokenization_utils_base import BatchEncoding, PaddingStrategy
if is_tf_available(): if is_tf_available():
import tensorflow as tf import tensorflow as tf
from .modeling_tf_auto import ( from .modeling_tf_auto import (
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TFAutoModel, TFAutoModel,
TFAutoModelForSequenceClassification, TFAutoModelForCausalLM,
TFAutoModelForQuestionAnswering, TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification, TFAutoModelForTokenClassification,
TFAutoModelWithLMHead, TFAutoModelWithLMHead,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TFAutoModelForCausalLM,
) )
if is_torch_available(): if is_torch_available():
import torch import torch
from .modeling_auto import ( from .modeling_auto import (
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
AutoModel, AutoModel,
AutoModelForSequenceClassification,
AutoModelForQuestionAnswering,
AutoModelForTokenClassification,
AutoModelForSeq2SeqLM,
AutoModelForCausalLM, AutoModelForCausalLM,
AutoModelForMaskedLM, AutoModelForMaskedLM,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, AutoModelForQuestionAnswering,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoModelForSeq2SeqLM,
MODEL_FOR_QUESTION_ANSWERING_MAPPING, AutoModelForSequenceClassification,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, AutoModelForTokenClassification,
MODEL_FOR_MASKED_LM_MAPPING,
) )
if TYPE_CHECKING: if TYPE_CHECKING:
from .modeling_utils import PreTrainedModel
from .modeling_tf_utils import TFPreTrainedModel from .modeling_tf_utils import TFPreTrainedModel
from .modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
...@@ -27,6 +27,7 @@ from collections import Counter, OrderedDict ...@@ -27,6 +27,7 @@ from collections import Counter, OrderedDict
from typing import Optional from typing import Optional
import numpy as np import numpy as np
from tokenizers import Tokenizer from tokenizers import Tokenizer
from tokenizers.implementations import BaseTokenizer from tokenizers.implementations import BaseTokenizer
from tokenizers.models import WordLevel from tokenizers.models import WordLevel
......
...@@ -28,6 +28,7 @@ from enum import Enum ...@@ -28,6 +28,7 @@ from enum import Enum
from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
import numpy as np import numpy as np
from tokenizers import AddedToken from tokenizers import AddedToken
from tokenizers import Encoding as EncodingFast from tokenizers import Encoding as EncodingFast
......
...@@ -63,6 +63,7 @@ def load_tf_weights_in_xxx(model, config, tf_checkpoint_path): ...@@ -63,6 +63,7 @@ def load_tf_weights_in_xxx(model, config, tf_checkpoint_path):
""" """
try: try:
import re import re
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
except ImportError: except ImportError:
......
...@@ -25,13 +25,14 @@ from .utils import CACHE_DIR, require_tf, slow ...@@ -25,13 +25,14 @@ from .utils import CACHE_DIR, require_tf, slow
if is_tf_available(): if is_tf_available():
import tensorflow as tf import tensorflow as tf
from transformers.modeling_tf_xxx import ( from transformers.modeling_tf_xxx import (
TFXxxModel,
TFXxxForMaskedLM, TFXxxForMaskedLM,
TFXxxForMultipleChoice, TFXxxForMultipleChoice,
TFXxxForQuestionAnswering,
TFXxxForSequenceClassification, TFXxxForSequenceClassification,
TFXxxForTokenClassification, TFXxxForTokenClassification,
TFXxxForQuestionAnswering, TFXxxModel,
) )
......
...@@ -25,14 +25,14 @@ from .utils import require_torch, require_torch_and_cuda, slow, torch_device ...@@ -25,14 +25,14 @@ from .utils import require_torch, require_torch_and_cuda, slow, torch_device
if is_torch_available(): if is_torch_available():
from transformers import ( from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
XxxConfig, XxxConfig,
XxxModel,
XxxForMaskedLM, XxxForMaskedLM,
XxxForQuestionAnswering, XxxForQuestionAnswering,
XxxForSequenceClassification, XxxForSequenceClassification,
XxxForTokenClassification, XxxForTokenClassification,
AutoModelForMaskedLM, XxxModel,
AutoTokenizer,
) )
from transformers.file_utils import cached_property from transformers.file_utils import cached_property
......
...@@ -5,9 +5,10 @@ from transformers.testing_utils import require_torch ...@@ -5,9 +5,10 @@ from transformers.testing_utils import require_torch
if is_torch_available(): if is_torch_available():
from transformers.activations import _gelu_python, get_activation, gelu_new
import torch import torch
from transformers.activations import _gelu_python, gelu_new, get_activation
@require_torch @require_torch
class TestActivations(unittest.TestCase): class TestActivations(unittest.TestCase):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment