Unverified Commit 6ca9c76a authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

Upgrade usort to `1.0.2` and black to 22.3.0 (#5106)



* upgrade usort to

* Also update black

* Actually use 1.0.2

* Apply pre-commit
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
parent 9293be7e
......@@ -4,7 +4,7 @@ import torchvision.models
from common_utils import assert_equal
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
from torchvision.models.detection.rpn import AnchorGenerator, RegionProposalNetwork, RPNHead
from torchvision.ops import MultiScaleRoIAlign
......@@ -60,7 +60,7 @@ class TestModelsDetectionNegativeSamples:
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(4 * resolution ** 2, representation_size)
box_head = TwoMLPHead(4 * resolution**2, representation_size)
representation_size = 1024
box_predictor = FastRCNNPredictor(representation_size, 2)
......
......@@ -4,13 +4,12 @@ from typing import List, Tuple
import pytest
import torch
from common_utils import set_rng_seed, assert_equal
from torchvision import models
from torchvision import ops
from common_utils import assert_equal, set_rng_seed
from torchvision import models, ops
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
from torchvision.models.detection.image_list import ImageList
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork
from torchvision.models.detection.rpn import AnchorGenerator, RegionProposalNetwork, RPNHead
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from torchvision.ops._register_onnx_ops import _onnx_opset_version
......@@ -265,7 +264,7 @@ class TestONNXExporter:
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(out_channels * resolution ** 2, representation_size)
box_head = TwoMLPHead(out_channels * resolution**2, representation_size)
representation_size = 1024
box_predictor = FastRCNNPredictor(representation_size, num_classes)
......
......@@ -79,7 +79,7 @@ class RoIOpTester(ABC):
rois_dtype = self.dtype if rois_dtype is None else rois_dtype
pool_size = 5
# n_channels % (pool_size ** 2) == 0 required for PS opeartions.
n_channels = 2 * (pool_size ** 2)
n_channels = 2 * (pool_size**2)
x = torch.rand(2, n_channels, 10, 10, dtype=x_dtype, device=device)
if not contiguous:
x = x.permute(0, 1, 3, 2)
......@@ -115,7 +115,7 @@ class RoIOpTester(ABC):
def test_backward(self, seed, device, contiguous):
torch.random.manual_seed(seed)
pool_size = 2
x = torch.rand(1, 2 * (pool_size ** 2), 5, 5, dtype=self.dtype, device=device, requires_grad=True)
x = torch.rand(1, 2 * (pool_size**2), 5, 5, dtype=self.dtype, device=device, requires_grad=True)
if not contiguous:
x = x.permute(0, 1, 3, 2)
rois = torch.tensor(
......
......@@ -5,14 +5,14 @@ from pathlib import Path
import pytest
import torch
from builtin_dataset_mocks import parametrize_dataset_mocks, DATASET_MOCKS
from torch.testing._comparison import assert_equal, TensorLikePair, ObjectPair
from builtin_dataset_mocks import DATASET_MOCKS, parametrize_dataset_mocks
from torch.testing._comparison import assert_equal, ObjectPair, TensorLikePair
from torch.utils.data import DataLoader
from torch.utils.data.graph import traverse
from torch.utils.data.graph_settings import get_all_graph_pipes
from torchdata.datapipes.iter import Shuffler, ShardingFilter
from torchdata.datapipes.iter import ShardingFilter, Shuffler
from torchvision._utils import sequence_to_str
from torchvision.prototype import transforms, datasets
from torchvision.prototype import datasets, transforms
from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE
from torchvision.prototype.features import Image, Label
......
......@@ -9,8 +9,8 @@ from datasets_utils import make_fake_flo_file, make_tar
from torchdata.datapipes.iter import FileOpener, TarArchiveLoader
from torchvision.datasets._optical_flow import _read_flo as read_flo_ref
from torchvision.datasets.utils import _decompress
from torchvision.prototype.datasets.utils import HttpResource, GDriveResource, Dataset, OnlineResource
from torchvision.prototype.datasets.utils._internal import read_flo, fromfile
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import fromfile, read_flo
@pytest.mark.filterwarnings("error:The given NumPy array is not writeable:UserWarning")
......
......@@ -2,7 +2,7 @@ import pytest
import test_models as TM
import torch
import torchvision.prototype.models.depth.stereo.raft_stereo as raft_stereo
from common_utils import set_rng_seed, cpu_and_gpu
from common_utils import cpu_and_gpu, set_rng_seed
@pytest.mark.parametrize("model_builder", (raft_stereo.raft_stereo_base, raft_stereo.raft_stereo_realtime))
......
......@@ -3,13 +3,9 @@ import itertools
import pytest
import torch
from common_utils import assert_equal
from test_prototype_transforms_functional import (
make_images,
make_bounding_boxes,
make_one_hot_labels,
)
from torchvision.prototype import transforms, features
from torchvision.transforms.functional import to_pil_image, pil_to_tensor
from test_prototype_transforms_functional import make_bounding_boxes, make_images, make_one_hot_labels
from torchvision.prototype import features, transforms
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
def make_vanilla_tensor_images(*args, **kwargs):
......
......@@ -24,7 +24,7 @@ try:
except ImportError:
stats = None
from common_utils import cycle_over, int_dtypes, float_dtypes, assert_equal
from common_utils import assert_equal, cycle_over, float_dtypes, int_dtypes
GRACE_HOPPER = get_file_path_2(
......
......@@ -6,19 +6,18 @@ import pytest
import torch
import torchvision.transforms._pil_constants as _pil_constants
from common_utils import (
get_tmp_dir,
int_dtypes,
float_dtypes,
_assert_approx_equal_tensor_to_pil,
_assert_equal_tensor_to_pil,
_create_data,
_create_data_batch,
_assert_equal_tensor_to_pil,
_assert_approx_equal_tensor_to_pil,
cpu_and_gpu,
assert_equal,
cpu_and_gpu,
float_dtypes,
get_tmp_dir,
int_dtypes,
)
from torchvision import transforms as T
from torchvision.transforms import InterpolationMode
from torchvision.transforms import functional as F
from torchvision.transforms import functional as F, InterpolationMode
from torchvision.transforms.autoaugment import _apply_op
NEAREST, BILINEAR, BICUBIC = InterpolationMode.NEAREST, InterpolationMode.BILINEAR, InterpolationMode.BICUBIC
......
......@@ -10,7 +10,7 @@ import torch
import torchvision.transforms.functional as F
import torchvision.utils as utils
from common_utils import assert_equal
from PIL import Image, __version__ as PILLOW_VERSION, ImageColor
from PIL import __version__ as PILLOW_VERSION, Image, ImageColor
PILLOW_VERSION = tuple(int(x) for x in PILLOW_VERSION.split("."))
......@@ -45,8 +45,8 @@ def test_normalize_in_make_grid():
# Rounding the result to one decimal for comparison
n_digits = 1
rounded_grid_max = torch.round(grid_max * 10 ** n_digits) / (10 ** n_digits)
rounded_grid_min = torch.round(grid_min * 10 ** n_digits) / (10 ** n_digits)
rounded_grid_max = torch.round(grid_max * 10**n_digits) / (10**n_digits)
rounded_grid_min = torch.round(grid_min * 10**n_digits) / (10**n_digits)
assert_equal(norm_max, rounded_grid_max, msg="Normalized max is not equal to 1")
assert_equal(norm_min, rounded_grid_min, msg="Normalized min is not equal to 0")
......
......@@ -2,12 +2,7 @@ import os
import warnings
import torch
from torchvision import datasets
from torchvision import io
from torchvision import models
from torchvision import ops
from torchvision import transforms
from torchvision import utils
from torchvision import datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
......
import enum
from typing import Sequence, TypeVar, Type
from typing import Sequence, Type, TypeVar
T = TypeVar("T", bound=enum.Enum)
......
from ._optical_flow import KittiFlow, Sintel, FlyingChairs, FlyingThings3D, HD1K
from ._optical_flow import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
from .caltech import Caltech101, Caltech256
from .celeba import CelebA
from .cifar import CIFAR10, CIFAR100
......@@ -11,19 +11,19 @@ from .eurosat import EuroSAT
from .fakedata import FakeData
from .fer2013 import FER2013
from .fgvc_aircraft import FGVCAircraft
from .flickr import Flickr8k, Flickr30k
from .flickr import Flickr30k, Flickr8k
from .flowers102 import Flowers102
from .folder import ImageFolder, DatasetFolder
from .folder import DatasetFolder, ImageFolder
from .food101 import Food101
from .gtsrb import GTSRB
from .hmdb51 import HMDB51
from .imagenet import ImageNet
from .inaturalist import INaturalist
from .kinetics import Kinetics400, Kinetics
from .kinetics import Kinetics, Kinetics400
from .kitti import Kitti
from .lfw import LFWPeople, LFWPairs
from .lfw import LFWPairs, LFWPeople
from .lsun import LSUN, LSUNClass
from .mnist import MNIST, EMNIST, FashionMNIST, KMNIST, QMNIST
from .mnist import EMNIST, FashionMNIST, KMNIST, MNIST, QMNIST
from .omniglot import Omniglot
from .oxford_iiit_pet import OxfordIIITPet
from .pcam import PCAM
......@@ -40,7 +40,7 @@ from .svhn import SVHN
from .ucf101 import UCF101
from .usps import USPS
from .vision import VisionDataset
from .voc import VOCSegmentation, VOCDetection
from .voc import VOCDetection, VOCSegmentation
from .widerface import WIDERFace
__all__ = (
......
......@@ -9,7 +9,7 @@ import torch
from PIL import Image
from ..io.image import _read_png_16
from .utils import verify_str_arg, _read_pfm
from .utils import _read_pfm, verify_str_arg
from .vision import VisionDataset
......@@ -466,7 +466,7 @@ def _read_16bits_png_with_flow_and_valid_mask(file_name):
flow_and_valid = _read_png_16(file_name).to(torch.float32)
flow, valid_flow_mask = flow_and_valid[:2, :, :], flow_and_valid[2, :, :]
flow = (flow - 2 ** 15) / 64 # This conversion is explained somewhere on the kitti archive
flow = (flow - 2**15) / 64 # This conversion is explained somewhere on the kitti archive
valid_flow_mask = valid_flow_mask.bool()
# For consistency with other datasets, we convert to numpy
......
import os
import os.path
from typing import Any, Callable, List, Optional, Union, Tuple
from typing import Any, Callable, List, Optional, Tuple, Union
from PIL import Image
......
import csv
import os
from collections import namedtuple
from typing import Any, Callable, List, Optional, Union, Tuple
from typing import Any, Callable, List, Optional, Tuple, Union
import PIL
import torch
from .utils import download_file_from_google_drive, check_integrity, verify_str_arg, extract_archive
from .utils import check_integrity, download_file_from_google_drive, extract_archive, verify_str_arg
from .vision import VisionDataset
CSV = namedtuple("CSV", ["header", "index", "data"])
......
import json
import os
from collections import namedtuple
from typing import Any, Callable, Dict, List, Optional, Union, Tuple
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from PIL import Image
from .utils import extract_archive, verify_str_arg, iterable_to_str
from .utils import extract_archive, iterable_to_str, verify_str_arg
from .vision import VisionDataset
......
import json
import pathlib
from typing import Any, Callable, Optional, Tuple, List
from typing import Any, Callable, List, Optional, Tuple
from urllib.parse import urlparse
from PIL import Image
......
import os.path
from typing import Any, Callable, Optional, Tuple, List
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
......
......@@ -2,7 +2,7 @@ from pathlib import Path
from typing import Callable, Optional
from .folder import ImageFolder
from .utils import verify_str_arg, download_and_extract_archive
from .utils import download_and_extract_archive, verify_str_arg
class Country211(ImageFolder):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment