Unverified Commit a5737779 authored by Sylvain Gugger's avatar Sylvain Gugger Committed by GitHub
Browse files

Update repo to isort v5 (#6686)

* Run new isort

* More changes

* Update CI, CONTRIBUTING and benchmarks
parent d329c9b0
......@@ -235,8 +235,7 @@ jobs:
- v0.3-code_quality-{{ checksum "setup.py" }}
- v0.3-{{ checksum "setup.py" }}
- run: pip install --upgrade pip
# we need a version of isort with https://github.com/timothycrosley/isort/pull/1000
- run: pip install git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort
- run: pip install isort
- run: pip install .[tf,torch,quality]
- save_cache:
key: v0.3-code_quality-{{ checksum "setup.py" }}
......
......@@ -134,12 +134,6 @@ Follow these steps to start contributing:
it with `pip uninstall transformers` before reinstalling it in editable
mode with the `-e` flag.)
Right now, we need an unreleased version of `isort` to avoid a
[bug](https://github.com/timothycrosley/isort/pull/1000):
```bash
$ pip install -U git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort
```
5. Develop the features on your branch.
As you work on the features, you should make sure that the test suite
......
......@@ -4,7 +4,7 @@
quality:
black --check --line-length 119 --target-version py35 examples templates tests src utils
isort --check-only --recursive examples templates tests src utils
isort --check-only examples templates tests src utils
flake8 examples templates tests src utils
python utils/check_repo.py
......@@ -12,7 +12,7 @@ quality:
style:
black --line-length 119 --target-version py35 examples templates tests src utils
isort --recursive examples templates tests src utils
isort examples templates tests src utils
# Run tests for the library
......
......@@ -20,8 +20,8 @@ from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
......
......@@ -26,8 +26,8 @@ from enum import Enum
from typing import List, Optional
import tqdm
from filelock import FileLock
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
......
......@@ -44,9 +44,10 @@ def evaluate(args):
reference_summaries = []
generated_summaries = []
import rouge
import nltk
import rouge
nltk.download("punkt")
rouge_evaluator = rouge.Rouge(
metrics=["rouge-n", "rouge-l"],
......
......@@ -15,27 +15,27 @@ from transformers import BartConfig, BartForConditionalGeneration, MBartTokenize
try:
from .finetune import SummarizationModule, TranslationModule
from .initialization_utils import init_student, copy_layers
from .finetune import main as ft_main
from .initialization_utils import copy_layers, init_student
from .utils import (
use_task_specific_params,
pickle_load,
freeze_params,
assert_all_frozen,
any_requires_grad,
assert_all_frozen,
calculate_bleu_score,
freeze_params,
pickle_load,
use_task_specific_params,
)
from .finetune import main as ft_main
except ImportError:
from finetune import SummarizationModule, TranslationModule
from finetune import main as ft_main
from initialization_utils import init_student, copy_layers
from initialization_utils import copy_layers, init_student
from utils import (
use_task_specific_params,
pickle_load,
freeze_params,
assert_all_frozen,
any_requires_grad,
assert_all_frozen,
calculate_bleu_score,
freeze_params,
pickle_load,
use_task_specific_params,
)
......
......@@ -17,44 +17,43 @@ from transformers import MarianTokenizer, MBartTokenizer, T5ForConditionalGenera
try:
from .callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from .utils import (
ROUGE_KEYS,
Seq2SeqDataset,
TranslationDataset,
assert_all_frozen,
use_task_specific_params,
lmap,
calculate_bleu_score,
calculate_rouge,
flatten_list,
pickle_save,
save_git_info,
save_json,
freeze_params,
calculate_rouge,
get_git_info,
ROUGE_KEYS,
calculate_bleu_score,
Seq2SeqDataset,
TranslationDataset,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
from .callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
except ImportError:
from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from utils import (
ROUGE_KEYS,
Seq2SeqDataset,
TranslationDataset,
assert_all_frozen,
use_task_specific_params,
lmap,
calculate_bleu_score,
calculate_rouge,
flatten_list,
pickle_save,
save_git_info,
save_json,
freeze_params,
calculate_rouge,
get_git_info,
ROUGE_KEYS,
calculate_bleu_score,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
logger = logging.getLogger(__name__)
......
......@@ -9,9 +9,9 @@ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
try:
from .utils import calculate_rouge, use_task_specific_params, calculate_bleu_score, trim_batch
from .utils import calculate_bleu_score, calculate_rouge, trim_batch, use_task_specific_params
except ImportError:
from utils import calculate_rouge, use_task_specific_params, calculate_bleu_score, trim_batch
from utils import calculate_bleu_score, calculate_rouge, trim_batch, use_task_specific_params
DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
......
......@@ -35,8 +35,8 @@ sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_generation
import run_glue
import run_pl_glue
import run_language_modeling
import run_pl_glue
import run_squad
......
......@@ -23,7 +23,6 @@ from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
......
[isort]
default_section = FIRSTPARTY
ensure_newline_before_comments = True
force_grid_wrap = 0
include_trailing_comma = True
......
......@@ -91,12 +91,7 @@ extras["all"] = extras["serving"] + ["tensorflow", "torch"]
extras["testing"] = ["pytest", "pytest-xdist", "timeout-decorator", "psutil"]
# sphinx-rtd-theme==0.5.0 introduced big changes in the style.
extras["docs"] = ["recommonmark", "sphinx", "sphinx-markdown-tables", "sphinx-rtd-theme==0.4.3", "sphinx-copybutton"]
extras["quality"] = [
"black",
# "isort",
"isort @ git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort",
"flake8",
]
extras["quality"] = ["black", "isort >= 5", "flake8"]
extras["dev"] = extras["testing"] + extras["quality"] + extras["ja"] + ["scikit-learn", "tensorflow", "torch"]
setup(
......
This diff is collapsed.
......@@ -22,14 +22,9 @@ import logging
import timeit
from typing import Callable, Optional
from transformers import (
MODEL_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
PretrainedConfig,
is_py3nvml_available,
is_torch_available,
)
from ..configuration_utils import PretrainedConfig
from ..file_utils import is_py3nvml_available, is_torch_available
from ..modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING
from .benchmark_utils import (
Benchmark,
Memory,
......@@ -42,6 +37,7 @@ from .benchmark_utils import (
if is_torch_available():
import torch
from .benchmark_args import PyTorchBenchmarkArguments
......
......@@ -24,14 +24,9 @@ import timeit
from functools import wraps
from typing import Callable, Optional
from transformers import (
TF_MODEL_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
PretrainedConfig,
is_py3nvml_available,
is_tf_available,
)
from ..configuration_utils import PretrainedConfig
from ..file_utils import is_py3nvml_available, is_tf_available
from ..modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from .benchmark_utils import (
Benchmark,
Memory,
......@@ -44,9 +39,10 @@ from .benchmark_utils import (
if is_tf_available():
import tensorflow as tf
from .benchmark_args_tf import TensorFlowBenchmarkArguments
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_py3nvml_available():
import py3nvml.py3nvml as nvml
......
......@@ -8,11 +8,11 @@ from transformers.pipelines import SUPPORTED_TASKS, pipeline
try:
from uvicorn import run
from fastapi import FastAPI, HTTPException, Body
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_serve_dependencies_installed = True
except (ImportError, AttributeError):
......
......@@ -5,7 +5,6 @@ from getpass import getpass
from typing import List, Union
from requests.exceptions import HTTPError
from transformers.commands import BaseTransformersCLICommand
from transformers.hf_api import HfApi, HfFolder
......
......@@ -273,7 +273,9 @@ def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):
try:
import tensorflow as tf
from keras2onnx import convert_keras, save_model, __version__ as k2ov
from keras2onnx import __version__ as k2ov
from keras2onnx import convert_keras, save_model
print(f"Using framework TensorFlow: {tf.version.VERSION}, keras2onnx: {k2ov}")
......@@ -340,7 +342,7 @@ def optimize(onnx_model_path: Path) -> Path:
Returns: Path where the optimized model binary description has been saved
"""
from onnxruntime import SessionOptions, InferenceSession
from onnxruntime import InferenceSession, SessionOptions
# Generate model name with suffix "optimized"
opt_model_path = generate_identified_filename(onnx_model_path, "-optimized")
......@@ -364,7 +366,7 @@ def quantize(onnx_model_path: Path) -> Path:
"""
try:
import onnx
from onnxruntime.quantization import quantize, QuantizationMode
from onnxruntime.quantization import QuantizationMode, quantize
onnx_model = onnx.load(onnx_model_path.as_posix())
......
......@@ -78,28 +78,29 @@ from transformers.file_utils import hf_bucket_url
if is_torch_available():
import torch
import numpy as np
import torch
from transformers import (
AlbertForPreTraining,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPT2LMHeadModel,
XLNetLMHeadModel,
XLMWithLMHeadModel,
XLMRobertaForMaskedLM,
TransfoXLLMHeadModel,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
CamembertForMaskedLM,
FlaubertWithLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
CTRLLMHeadModel,
AlbertForPreTraining,
T5ForConditionalGeneration,
ElectraForPreTraining,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment