Unverified Commit 6ca9c76a authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

Upgrade usort to `1.0.2` and black to 22.3.0 (#5106)



* upgrade usort to

* Also update black

* Actually use 1.0.2

* Apply pre-commit
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
parent 9293be7e
from __future__ import annotations
from typing import Any, List, Tuple, Union, Optional, Sequence
from typing import Any, List, Optional, Sequence, Tuple, Union
import torch
from torchvision._utils import StrEnum
......
......@@ -2,7 +2,7 @@ from __future__ import annotations
import os
import sys
from typing import BinaryIO, Tuple, Type, TypeVar, Union, Optional, Any
from typing import Any, BinaryIO, Optional, Tuple, Type, TypeVar, Union
import PIL.Image
import torch
......
from typing import Any, cast, TypeVar, Union, Optional, Type, Callable, List, Tuple, Sequence, Mapping
from typing import Any, Callable, cast, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
from torch._C import _TensorBase, DisableTorchFunction
......
from __future__ import annotations
import warnings
from typing import Any, List, Optional, Union, Sequence, Tuple, cast
from typing import Any, cast, List, Optional, Sequence, Tuple, Union
import torch
from torchvision._utils import StrEnum
from torchvision.transforms.functional import to_pil_image, InterpolationMode
from torchvision.utils import draw_bounding_boxes
from torchvision.utils import make_grid
from torchvision.transforms.functional import InterpolationMode, to_pil_image
from torchvision.utils import draw_bounding_boxes, make_grid
from ._bounding_box import BoundingBox
from ._feature import _Feature
......
from __future__ import annotations
from typing import Any, Optional, Sequence, cast, Union
from typing import Any, cast, Optional, Sequence, Union
import torch
from torchvision.prototype.utils._internal import apply_recursively
......
from __future__ import annotations
from typing import List, Optional, Union, Sequence
from typing import List, Optional, Sequence, Union
from torchvision.transforms import InterpolationMode
......
from typing import List, Optional, Callable, Tuple
from typing import Callable, List, Optional, Tuple
import torch
import torch.nn as nn
......@@ -6,8 +6,8 @@ import torch.nn.functional as F
import torchvision.models.optical_flow.raft as raft
from torch import Tensor
from torchvision.models._api import WeightsEnum
from torchvision.models.optical_flow._utils import make_coords_grid, grid_sample, upsample_flow
from torchvision.models.optical_flow.raft import ResidualBlock, MotionEncoder, FlowHead
from torchvision.models.optical_flow._utils import grid_sample, make_coords_grid, upsample_flow
from torchvision.models.optical_flow.raft import FlowHead, MotionEncoder, ResidualBlock
from torchvision.ops import Conv2dNormActivation
from torchvision.utils import _log_api_usage_once
......
......@@ -2,26 +2,26 @@ from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import RandomErasing, RandomMixup, RandomCutmix
from ._auto_augment import RandAugment, TrivialAugmentWide, AutoAugment, AugMix
from ._color import ColorJitter, RandomPhotometricDistort, RandomEqualize
from ._augment import RandomCutmix, RandomErasing, RandomMixup
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import ColorJitter, RandomEqualize, RandomPhotometricDistort
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
Resize,
BatchMultiCrop,
CenterCrop,
RandomResizedCrop,
FiveCrop,
TenCrop,
BatchMultiCrop,
Pad,
RandomAffine,
RandomHorizontalFlip,
RandomResizedCrop,
RandomRotation,
RandomVerticalFlip,
Pad,
RandomZoomOut,
RandomRotation,
RandomAffine,
Resize,
TenCrop,
)
from ._meta import ConvertBoundingBoxFormat, ConvertImageDtype, ConvertImageColorSpace
from ._misc import Identity, Normalize, ToDtype, Lambda
from ._meta import ConvertBoundingBoxFormat, ConvertImageColorSpace, ConvertImageDtype
from ._misc import Identity, Lambda, Normalize, ToDtype
from ._type_conversion import DecodeImage, LabelToOneHot
from ._deprecated import Grayscale, RandomGrayscale, ToTensor, ToPILImage, PILToTensor # usort: skip
......@@ -6,10 +6,10 @@ from typing import Any, Dict, Tuple
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import Transform, functional as F
from torchvision.prototype.transforms import functional as F, Transform
from ._transform import _RandomApplyTransform
from ._utils import query_image, get_image_dimensions, has_any, has_all
from ._utils import get_image_dimensions, has_all, has_any, query_image
class RandomErasing(_RandomApplyTransform):
......
import math
from typing import Any, Dict, Tuple, Optional, Callable, List, cast, Sequence, TypeVar, Union, Type
from typing import Any, Callable, cast, Dict, List, Optional, Sequence, Tuple, Type, TypeVar, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import Transform, functional as F
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.prototype.utils._internal import query_recursively
from torchvision.transforms.autoaugment import AutoAugmentPolicy
from torchvision.transforms.functional import pil_to_tensor, to_pil_image, InterpolationMode
from torchvision.transforms.functional import InterpolationMode, pil_to_tensor, to_pil_image
from ._utils import get_image_dimensions
......
import collections.abc
from typing import Any, Dict, Union, Tuple, Optional, Sequence, TypeVar
from typing import Any, Dict, Optional, Sequence, Tuple, TypeVar, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import Transform, functional as F
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.transforms import functional as _F
from ._transform import _RandomApplyTransform
from ._utils import is_simple_tensor, get_image_dimensions, query_image
from ._utils import get_image_dimensions, is_simple_tensor, query_image
T = TypeVar("T", features.Image, torch.Tensor, PIL.Image.Image)
......
from typing import Any, Optional, List, Dict
from typing import Any, Dict, List, Optional
import torch
from torchvision.prototype.transforms import Transform
......
......@@ -2,18 +2,18 @@ import collections.abc
import math
import numbers
import warnings
from typing import Any, Dict, List, Optional, Union, Sequence, Tuple, cast
from typing import Any, cast, Dict, List, Optional, Sequence, Tuple, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import Transform, functional as F
from torchvision.transforms.functional import pil_to_tensor, InterpolationMode
from torchvision.transforms.transforms import _setup_size, _setup_angle, _check_sequence_input
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.transforms.functional import InterpolationMode, pil_to_tensor
from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size
from typing_extensions import Literal
from ._transform import _RandomApplyTransform
from ._utils import query_image, get_image_dimensions, has_any, is_simple_tensor
from ._utils import get_image_dimensions, has_any, is_simple_tensor, query_image
class RandomHorizontalFlip(_RandomApplyTransform):
......
from typing import Union, Any, Dict, Optional
from typing import Any, Dict, Optional, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.transforms import Transform, functional as F
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.transforms.functional import convert_image_dtype
from ._utils import is_simple_tensor
......
import functools
from typing import Any, List, Type, Callable, Dict
from typing import Any, Callable, Dict, List, Type
import torch
from torchvision.prototype.transforms import Transform, functional as F
from torchvision.prototype.transforms import functional as F, Transform
class Identity(Transform):
......
......@@ -3,7 +3,7 @@ from typing import Any, Dict
import numpy as np
import PIL.Image
from torchvision.prototype import features
from torchvision.prototype.transforms import Transform, functional as F
from torchvision.prototype.transforms import functional as F, Transform
from ._utils import is_simple_tensor
......
from typing import Any, Optional, Tuple, Union, Type, Iterator
from typing import Any, Iterator, Optional, Tuple, Type, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.prototype.utils._internal import query_recursively
from .functional._meta import get_dimensions_image_tensor, get_dimensions_image_pil
from .functional._meta import get_dimensions_image_pil, get_dimensions_image_tensor
def query_image(sample: Any) -> Union[PIL.Image.Image, torch.Tensor, features.Image]:
......
......@@ -5,105 +5,103 @@ from ._meta import (
convert_image_color_space_pil,
) # usort: skip
from ._augment import (
erase_image_tensor,
)
from ._augment import erase_image_tensor
from ._color import (
adjust_brightness,
adjust_brightness_image_tensor,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_contrast,
adjust_contrast_image_tensor,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_saturation,
adjust_saturation_image_tensor,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_sharpness,
adjust_sharpness_image_tensor,
adjust_sharpness_image_pil,
adjust_hue,
adjust_hue_image_tensor,
adjust_hue_image_pil,
adjust_gamma,
adjust_gamma_image_tensor,
adjust_gamma_image_pil,
posterize,
posterize_image_tensor,
posterize_image_pil,
solarize,
solarize_image_tensor,
solarize_image_pil,
adjust_sharpness_image_tensor,
autocontrast,
autocontrast_image_tensor,
autocontrast_image_pil,
autocontrast_image_tensor,
equalize,
equalize_image_tensor,
equalize_image_pil,
equalize_image_tensor,
invert,
invert_image_tensor,
invert_image_pil,
invert_image_tensor,
posterize,
posterize_image_pil,
posterize_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_segmentation_mask,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_segmentation_mask,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_segmentation_mask,
five_crop_image_pil,
five_crop_image_tensor,
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_tensor,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_segmentation_mask,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_segmentation_mask,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_segmentation_mask,
resize,
resize_bounding_box,
resize_image_tensor,
resize_image_pil,
resize_image_tensor,
resize_segmentation_mask,
center_crop,
center_crop_bounding_box,
center_crop_segmentation_mask,
center_crop_image_tensor,
center_crop_image_pil,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_tensor,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_segmentation_mask,
affine,
affine_bounding_box,
affine_image_tensor,
affine_image_pil,
affine_segmentation_mask,
rotate,
rotate_bounding_box,
rotate_image_tensor,
rotate_image_pil,
rotate_image_tensor,
rotate_segmentation_mask,
pad,
pad_bounding_box,
pad_image_tensor,
pad_image_pil,
pad_segmentation_mask,
crop,
crop_bounding_box,
crop_image_tensor,
crop_image_pil,
crop_segmentation_mask,
perspective,
perspective_bounding_box,
perspective_image_tensor,
perspective_image_pil,
perspective_segmentation_mask,
ten_crop_image_pil,
ten_crop_image_tensor,
vertical_flip,
vertical_flip_image_tensor,
vertical_flip_image_pil,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_segmentation_mask,
five_crop_image_tensor,
five_crop_image_pil,
ten_crop_image_tensor,
ten_crop_image_pil,
)
from ._misc import normalize_image_tensor, gaussian_blur_image_tensor
from ._misc import gaussian_blur_image_tensor, normalize_image_tensor
from ._type_conversion import (
decode_image_with_pil,
decode_video_with_av,
label_to_one_hot,
to_image_tensor,
to_image_pil,
to_image_tensor,
)
......@@ -3,7 +3,7 @@ from typing import Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional_tensor as _FT, functional_pil as _FP
from torchvision.transforms import functional_pil as _FP, functional_tensor as _FT
# shortcut type
......
import numbers
import warnings
from typing import Tuple, List, Optional, Sequence, Union
from typing import List, Optional, Sequence, Tuple, Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional_tensor as _FT, functional_pil as _FP
from torchvision.transforms import functional_pil as _FP, functional_tensor as _FT
from torchvision.transforms.functional import (
pil_modes_mapping,
_compute_output_size,
_get_inverse_affine_matrix,
InterpolationMode,
_compute_output_size,
pil_modes_mapping,
)
from ._meta import convert_bounding_box_format, get_dimensions_image_tensor, get_dimensions_image_pil
from ._meta import convert_bounding_box_format, get_dimensions_image_pil, get_dimensions_image_tensor
# shortcut type
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment