"docs/vscode:/vscode.git/clone" did not exist on "cbfde17b007c73bb08cc49e554cac98e8febf54c"
Unverified Commit 6ca9c76a authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

Upgrade usort to `1.0.2` and black to 22.3.0 (#5106)



* upgrade usort to

* Also update black

* Actually use 1.0.2

* Apply pre-commit
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
parent 9293be7e
......@@ -15,8 +15,8 @@ repos:
hooks:
- id: ufmt
additional_dependencies:
- black == 21.9b0
- usort == 0.6.4
- black == 22.3.0
- usort == 1.0.2
- repo: https://gitlab.com/pycqa/flake8
rev: 3.9.2
......
......@@ -3,8 +3,8 @@ dependencies = ["torch"]
from torchvision.models import get_weight
from torchvision.models.alexnet import alexnet
from torchvision.models.convnext import convnext_tiny, convnext_small, convnext_base, convnext_large
from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161
from torchvision.models.convnext import convnext_base, convnext_large, convnext_small, convnext_tiny
from torchvision.models.densenet import densenet121, densenet161, densenet169, densenet201
from torchvision.models.efficientnet import (
efficientnet_b0,
efficientnet_b1,
......@@ -14,9 +14,9 @@ from torchvision.models.efficientnet import (
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
efficientnet_v2_s,
efficientnet_v2_m,
efficientnet_v2_l,
efficientnet_v2_m,
efficientnet_v2_s,
)
from torchvision.models.googlenet import googlenet
from torchvision.models.inception import inception_v3
......@@ -25,40 +25,40 @@ from torchvision.models.mobilenetv2 import mobilenet_v2
from torchvision.models.mobilenetv3 import mobilenet_v3_large, mobilenet_v3_small
from torchvision.models.optical_flow import raft_large, raft_small
from torchvision.models.regnet import (
regnet_y_400mf,
regnet_y_800mf,
regnet_y_1_6gf,
regnet_y_3_2gf,
regnet_y_8gf,
regnet_y_16gf,
regnet_y_32gf,
regnet_y_128gf,
regnet_x_400mf,
regnet_x_800mf,
regnet_x_16gf,
regnet_x_1_6gf,
regnet_x_32gf,
regnet_x_3_2gf,
regnet_x_400mf,
regnet_x_800mf,
regnet_x_8gf,
regnet_x_16gf,
regnet_x_32gf,
regnet_y_128gf,
regnet_y_16gf,
regnet_y_1_6gf,
regnet_y_32gf,
regnet_y_3_2gf,
regnet_y_400mf,
regnet_y_800mf,
regnet_y_8gf,
)
from torchvision.models.resnet import (
resnet101,
resnet152,
resnet18,
resnet34,
resnet50,
resnet101,
resnet152,
resnext50_32x4d,
resnext101_32x8d,
resnext101_64x4d,
wide_resnet50_2,
resnext50_32x4d,
wide_resnet101_2,
wide_resnet50_2,
)
from torchvision.models.segmentation import (
fcn_resnet50,
fcn_resnet101,
deeplabv3_resnet50,
deeplabv3_resnet101,
deeplabv3_mobilenet_v3_large,
deeplabv3_resnet101,
deeplabv3_resnet50,
fcn_resnet101,
fcn_resnet50,
lraspp_mobilenet_v3_large,
)
from torchvision.models.shufflenetv2 import (
......@@ -68,12 +68,6 @@ from torchvision.models.shufflenetv2 import (
shufflenet_v2_x2_0,
)
from torchvision.models.squeezenet import squeezenet1_0, squeezenet1_1
from torchvision.models.swin_transformer import swin_t, swin_s, swin_b
from torchvision.models.vgg import vgg11, vgg13, vgg16, vgg19, vgg11_bn, vgg13_bn, vgg16_bn, vgg19_bn
from torchvision.models.vision_transformer import (
vit_b_16,
vit_b_32,
vit_l_16,
vit_l_32,
vit_h_14,
)
from torchvision.models.swin_transformer import swin_b, swin_s, swin_t
from torchvision.models.vgg import vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19, vgg19_bn
from torchvision.models.vision_transformer import vit_b_16, vit_b_32, vit_h_14, vit_l_16, vit_l_32
......@@ -9,7 +9,7 @@ import torch.utils.data
import torchvision
import utils
from torch import nn
from train import train_one_epoch, evaluate, load_data
from train import evaluate, load_data, train_one_epoch
def main(args):
......
......@@ -2,7 +2,7 @@ import bisect
import copy
import math
from collections import defaultdict
from itertools import repeat, chain
from itertools import chain, repeat
import numpy as np
import torch
......
......@@ -29,8 +29,8 @@ import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
import utils
from coco_utils import get_coco, get_coco_kp
from engine import train_one_epoch, evaluate
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
from engine import evaluate, train_one_epoch
from group_by_aspect_ratio import create_aspect_ratio_groups, GroupedBatchSampler
from torchvision.transforms import InterpolationMode
from transforms import SimpleCopyPaste
......
from typing import List, Tuple, Dict, Optional, Union
from typing import Dict, List, Optional, Tuple, Union
import torch
import torchvision
from torch import nn, Tensor
from torchvision import ops
from torchvision.transforms import functional as F
from torchvision.transforms import transforms as T, InterpolationMode
from torchvision.transforms import functional as F, InterpolationMode, transforms as T
def _flip_coco_person_keypoints(kps, width):
......
......@@ -6,8 +6,8 @@ from pathlib import Path
import torch
import torchvision.models.optical_flow
import utils
from presets import OpticalFlowPresetTrain, OpticalFlowPresetEval
from torchvision.datasets import KittiFlow, FlyingChairs, FlyingThings3D, Sintel, HD1K
from presets import OpticalFlowPresetEval, OpticalFlowPresetTrain
from torchvision.datasets import FlyingChairs, FlyingThings3D, HD1K, KittiFlow, Sintel
def get_train_dataset(stage, dataset_root):
......
import datetime
import os
import time
from collections import defaultdict
from collections import deque
from collections import defaultdict, deque
import torch
import torch.distributed as dist
......@@ -158,7 +157,7 @@ class MetricLogger:
def compute_metrics(flow_pred, flow_gt, valid_flow_mask=None):
epe = ((flow_pred - flow_gt) ** 2).sum(dim=1).sqrt()
flow_norm = (flow_gt ** 2).sum(dim=1).sqrt()
flow_norm = (flow_gt**2).sum(dim=1).sqrt()
if valid_flow_mask is not None:
epe = epe[valid_flow_mask]
......@@ -183,7 +182,7 @@ def sequence_loss(flow_preds, flow_gt, valid_flow_mask, gamma=0.8, max_flow=400)
raise ValueError(f"Gamma should be < 1, got {gamma}.")
# exlude invalid pixels and extremely large diplacements
flow_norm = torch.sum(flow_gt ** 2, dim=1).sqrt()
flow_norm = torch.sum(flow_gt**2, dim=1).sqrt()
valid_flow_mask = valid_flow_mask & (flow_norm < max_flow)
valid_flow_mask = valid_flow_mask[:, None, :, :]
......
......@@ -75,7 +75,7 @@ class ConfusionMatrix:
with torch.inference_mode():
k = (a >= 0) & (a < n)
inds = n * a[k].to(torch.int64) + b[k]
self.mat += torch.bincount(inds, minlength=n ** 2).reshape(n, n)
self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n)
def reset(self):
self.mat.zero_()
......
......@@ -7,9 +7,9 @@ import subprocess
import sys
import torch
from pkg_resources import parse_version, get_distribution, DistributionNotFound
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from pkg_resources import DistributionNotFound, get_distribution, parse_version
from setuptools import find_packages, setup
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDA_HOME, CUDAExtension
def read(*names, **kwargs):
......
......@@ -14,12 +14,12 @@ import shutil
import unittest.mock
import warnings
import xml.etree.ElementTree as ET
from collections import defaultdict, Counter
from collections import Counter, defaultdict
import numpy as np
import pytest
import torch
from datasets_utils import make_zip, make_tar, create_image_folder, create_image_file, combinations_grid
from datasets_utils import combinations_grid, create_image_file, create_image_folder, make_tar, make_zip
from torch.nn.functional import one_hot
from torch.testing import make_tensor as _make_tensor
from torchvision.prototype import datasets
......
......@@ -3,7 +3,7 @@ import random
import numpy as np
import pytest
import torch
from common_utils import IN_CIRCLE_CI, CIRCLECI_GPU_NO_CUDA_MSG, IN_FBCODE, IN_RE_WORKER, CUDA_NOT_AVAILABLE_MSG
from common_utils import CIRCLECI_GPU_NO_CUDA_MSG, CUDA_NOT_AVAILABLE_MSG, IN_CIRCLE_CI, IN_FBCODE, IN_RE_WORKER
def pytest_configure(config):
......
......@@ -22,7 +22,7 @@ import pytest
import torch
import torchvision.datasets
import torchvision.io
from common_utils import get_tmp_dir, disable_console_output
from common_utils import disable_console_output, get_tmp_dir
__all__ = [
......
......@@ -9,15 +9,15 @@ from distutils import dir_util
from os import path
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse
from urllib.request import urlopen, Request
from urllib.request import Request, urlopen
import pytest
from torchvision import datasets
from torchvision.datasets.utils import (
download_url,
_get_redirect_url,
check_integrity,
download_file_from_google_drive,
_get_redirect_url,
download_url,
USER_AGENT,
)
......
import pytest
import torch
from common_utils import get_list_of_videos, assert_equal
from common_utils import assert_equal, get_list_of_videos
from torchvision import io
from torchvision.datasets.samplers import (
DistributedSampler,
RandomClipSampler,
UniformClipSampler,
)
from torchvision.datasets.samplers import DistributedSampler, RandomClipSampler, UniformClipSampler
from torchvision.datasets.video_utils import VideoClips
......
import pytest
import torch
from common_utils import get_list_of_videos, assert_equal
from common_utils import assert_equal, get_list_of_videos
from torchvision import io
from torchvision.datasets.video_utils import VideoClips, unfold
from torchvision.datasets.video_utils import unfold, VideoClips
class TestVideo:
......
......@@ -5,7 +5,7 @@ import pytest
import test_models as TM
import torch
from torchvision import models
from torchvision.models._api import WeightsEnum, Weights
from torchvision.models._api import Weights, WeightsEnum
from torchvision.models._utils import handle_legacy_interface
......
......@@ -14,14 +14,14 @@ import torchvision.transforms.functional as F
import torchvision.transforms.functional_pil as F_pil
import torchvision.transforms.functional_tensor as F_t
from common_utils import (
cpu_and_gpu,
needs_cuda,
_assert_approx_equal_tensor_to_pil,
_assert_equal_tensor_to_pil,
_create_data,
_create_data_batch,
_assert_equal_tensor_to_pil,
_assert_approx_equal_tensor_to_pil,
_test_fn_on_batch,
assert_equal,
cpu_and_gpu,
needs_cuda,
)
from torchvision.transforms import InterpolationMode
......
......@@ -8,21 +8,21 @@ import numpy as np
import pytest
import torch
import torchvision.transforms.functional as F
from common_utils import needs_cuda, assert_equal
from PIL import Image, __version__ as PILLOW_VERSION
from common_utils import assert_equal, needs_cuda
from PIL import __version__ as PILLOW_VERSION, Image
from torchvision.io.image import (
decode_png,
_read_png_16,
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
write_jpeg,
decode_image,
read_file,
encode_png,
write_png,
write_file,
ImageReadMode,
read_file,
read_image,
_read_png_16,
write_file,
write_jpeg,
write_png,
)
IMAGE_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets")
......@@ -168,7 +168,7 @@ def test_decode_png(img_path, pil_mode, mode):
img_lpng = _read_png_16(img_path, mode=mode)
assert img_lpng.dtype == torch.int32
# PIL converts 16 bits pngs in uint8
img_lpng = torch.round(img_lpng / (2 ** 16 - 1) * 255).to(torch.uint8)
img_lpng = torch.round(img_lpng / (2**16 - 1) * 255).to(torch.uint8)
else:
data = read_file(img_path)
img_lpng = decode_image(data, mode=mode)
......
......@@ -14,7 +14,7 @@ import torch
import torch.fx
import torch.nn as nn
from _utils_internal import get_relative_path
from common_utils import map_nested_tensor_object, freeze_rng_state, set_rng_seed, cpu_and_gpu, needs_cuda
from common_utils import cpu_and_gpu, freeze_rng_state, map_nested_tensor_object, needs_cuda, set_rng_seed
from torchvision import models
ACCEPT = os.getenv("EXPECTTEST_ACCEPT", "0") == "1"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment