Unverified Commit b81d189d authored by Nicolas Hug's avatar Nicolas Hug Committed by GitHub
Browse files

Fix all outstanding flake8 issues (#4535)

parent 5f0edb97
...@@ -32,7 +32,6 @@ A diff output is produced and a sensible exit code is returned. ...@@ -32,7 +32,6 @@ A diff output is produced and a sensible exit code is returned.
""" """
import argparse import argparse
import codecs
import difflib import difflib
import fnmatch import fnmatch
import io import io
......
...@@ -19,7 +19,6 @@ from base64 import urlsafe_b64encode ...@@ -19,7 +19,6 @@ from base64 import urlsafe_b64encode
# Third party imports # Third party imports
if sys.platform == "linux": if sys.platform == "linux":
from auditwheel.lddtree import lddtree from auditwheel.lddtree import lddtree
from wheel.bdist_wheel import get_abi_tag
ALLOWLIST = { ALLOWLIST = {
......
...@@ -167,7 +167,9 @@ class ExponentialMovingAverage(torch.optim.swa_utils.AveragedModel): ...@@ -167,7 +167,9 @@ class ExponentialMovingAverage(torch.optim.swa_utils.AveragedModel):
""" """
def __init__(self, model, decay, device="cpu"): def __init__(self, model, decay, device="cpu"):
ema_avg = lambda avg_model_param, model_param, num_averaged: decay * avg_model_param + (1 - decay) * model_param def ema_avg(avg_model_param, model_param, num_averaged):
return decay * avg_model_param + (1 - decay) * model_param
super().__init__(model, device, ema_avg) super().__init__(model, device, ema_avg)
def update_parameters(self, model): def update_parameters(self, model):
......
...@@ -5,7 +5,6 @@ import torch ...@@ -5,7 +5,6 @@ import torch
import torch.utils.data import torch.utils.data
import torchvision import torchvision
import transforms as T import transforms as T
from PIL import Image
from pycocotools import mask as coco_mask from pycocotools import mask as coco_mask
from pycocotools.coco import COCO from pycocotools.coco import COCO
......
...@@ -8,8 +8,9 @@ license_file = LICENSE ...@@ -8,8 +8,9 @@ license_file = LICENSE
max-line-length = 120 max-line-length = 120
[flake8] [flake8]
# note: we ignore all 501s (line too long) anyway as they're taken care of by black
max-line-length = 120 max-line-length = 120
ignore = E203, E402, W503, W504, F821 ignore = E203, E402, W503, W504, F821, E501
per-file-ignores = per-file-ignores =
__init__.py: F401, F403, F405 __init__.py: F401, F403, F405
./hubconf.py: F401 ./hubconf.py: F401
......
...@@ -3,11 +3,9 @@ import distutils.spawn ...@@ -3,11 +3,9 @@ import distutils.spawn
import glob import glob
import io import io
import os import os
import re
import shutil import shutil
import subprocess import subprocess
import sys import sys
from distutils.version import StrictVersion
import torch import torch
from pkg_resources import parse_version, get_distribution, DistributionNotFound from pkg_resources import parse_version, get_distribution, DistributionNotFound
......
import argparse
import contextlib import contextlib
import functools import functools
import inspect
import os import os
import random import random
import shutil import shutil
import sys
import tempfile import tempfile
import unittest
from collections import OrderedDict
from numbers import Number
import numpy as np import numpy as np
import pytest
import torch import torch
from PIL import Image from PIL import Image
from torch._six import string_classes
from torchvision import io from torchvision import io
import __main__ import __main__ # noqa: 401
IN_CIRCLE_CI = os.getenv("CIRCLECI", False) == "true" IN_CIRCLE_CI = os.getenv("CIRCLECI", False) == "true"
......
import random import random
from functools import partial
from itertools import chain from itertools import chain
import pytest import pytest
import torch import torch
import torchvision
from common_utils import set_rng_seed from common_utils import set_rng_seed
from torchvision import models from torchvision import models
from torchvision.models._utils import IntermediateLayerGetter from torchvision.models._utils import IntermediateLayerGetter
......
...@@ -5,7 +5,7 @@ import unittest ...@@ -5,7 +5,7 @@ import unittest
import torch import torch
import torchvision.transforms.functional as F import torchvision.transforms.functional as F
from PIL import Image from PIL import Image
from torchvision import models, transforms from torchvision import models
try: try:
from torchvision import _C_tests from torchvision import _C_tests
......
import contextlib
import os
import sys
import pytest import pytest
import torch import torch
from common_utils import get_list_of_videos, assert_equal from common_utils import get_list_of_videos, assert_equal
from torchvision import get_video_backend
from torchvision import io from torchvision import io
from torchvision.datasets.samplers import ( from torchvision.datasets.samplers import (
DistributedSampler, DistributedSampler,
RandomClipSampler, RandomClipSampler,
UniformClipSampler, UniformClipSampler,
) )
from torchvision.datasets.video_utils import VideoClips, unfold from torchvision.datasets.video_utils import VideoClips
@pytest.mark.skipif(not io.video._av_available(), reason="this test requires av") @pytest.mark.skipif(not io.video._av_available(), reason="this test requires av")
......
import bz2
import contextlib import contextlib
import gzip import gzip
import itertools
import lzma
import os import os
import tarfile import tarfile
import warnings
import zipfile import zipfile
from urllib.error import URLError
import pytest import pytest
import torchvision.datasets.utils as utils import torchvision.datasets.utils as utils
......
import contextlib
import os
import pytest import pytest
import torch import torch
from common_utils import get_list_of_videos, assert_equal from common_utils import get_list_of_videos, assert_equal
......
import unittest import unittest
import test_datasets_video_utils import test_datasets_video_utils
from torchvision import set_video_backend from torchvision import set_video_backend # noqa: 401
# Disabling the video backend switching temporarily # Disabling the video backend switching temporarily
# set_video_backend('video_reader') # set_video_backend('video_reader')
......
...@@ -2,8 +2,7 @@ import colorsys ...@@ -2,8 +2,7 @@ import colorsys
import itertools import itertools
import math import math
import os import os
from functools import partial from typing import Sequence
from typing import Dict, List, Sequence, Tuple
import numpy as np import numpy as np
import pytest import pytest
......
...@@ -6,7 +6,6 @@ cleanly ignored in FB internal test infra. ...@@ -6,7 +6,6 @@ cleanly ignored in FB internal test infra.
""" """
import os import os
import warnings
from urllib.error import URLError from urllib.error import URLError
import pytest import pytest
......
...@@ -2,8 +2,6 @@ import contextlib ...@@ -2,8 +2,6 @@ import contextlib
import os import os
import sys import sys
import tempfile import tempfile
import warnings
from urllib.error import URLError
import pytest import pytest
import torch import torch
......
import unittest import unittest
import test_io import test_io
from torchvision import set_video_backend from torchvision import set_video_backend # noqa: 401
# Disabling the video backend switching temporarily # Disabling the video backend switching temporarily
......
...@@ -2,7 +2,6 @@ import functools ...@@ -2,7 +2,6 @@ import functools
import io import io
import operator import operator
import os import os
import sys
import traceback import traceback
import warnings import warnings
from collections import OrderedDict from collections import OrderedDict
......
import os import os
from typing import Sequence
import numpy as np import numpy as np
import pytest import pytest
......
...@@ -11,9 +11,9 @@ def _is_remote_location_available() -> bool: ...@@ -11,9 +11,9 @@ def _is_remote_location_available() -> bool:
try: try:
from torch.hub import load_state_dict_from_url from torch.hub import load_state_dict_from_url # noqa: 401
except ImportError: except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url from torch.utils.model_zoo import load_url as load_state_dict_from_url # noqa: 401
def _get_extension_path(lib_name): def _get_extension_path(lib_name):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment