"vscode:/vscode.git/clone" did not exist on "6b6b4bcffe1ab94ea3dbc511034cde3035e50129"
Unverified Commit 39fe34a2 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Eliminate runtime cyclic dependencies (#6476)

* Move imports on constructor.

* Turn `_F` to a property.

* fix linter

* Fix mypy

* Make it class-wide attribute.

* Add tests based on code review

* Making changes from code-reviews.

* Remove the new tests.

* Clean up.

* Adding comments.

* Update the comment link.
parent 9559188c
......@@ -63,25 +63,19 @@ class BoundingBox(_Feature):
)
def to_format(self, format: Union[str, BoundingBoxFormat]) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
if isinstance(format, str):
format = BoundingBoxFormat.from_str(format.upper())
return BoundingBox.new_like(
self, _F.convert_bounding_box_format(self, old_format=self.format, new_format=format), format=format
self, self._F.convert_bounding_box_format(self, old_format=self.format, new_format=format), format=format
)
def horizontal_flip(self) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
output = _F.horizontal_flip_bounding_box(self, format=self.format, image_size=self.image_size)
output = self._F.horizontal_flip_bounding_box(self, format=self.format, image_size=self.image_size)
return BoundingBox.new_like(self, output)
def vertical_flip(self) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
output = _F.vertical_flip_bounding_box(self, format=self.format, image_size=self.image_size)
output = self._F.vertical_flip_bounding_box(self, format=self.format, image_size=self.image_size)
return BoundingBox.new_like(self, output)
def resize( # type: ignore[override]
......@@ -91,22 +85,16 @@ class BoundingBox(_Feature):
max_size: Optional[int] = None,
antialias: bool = False,
) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
output = _F.resize_bounding_box(self, size, image_size=self.image_size, max_size=max_size)
output = self._F.resize_bounding_box(self, size, image_size=self.image_size, max_size=max_size)
image_size = (size[0], size[0]) if len(size) == 1 else (size[0], size[1])
return BoundingBox.new_like(self, output, image_size=image_size, dtype=output.dtype)
def crop(self, top: int, left: int, height: int, width: int) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
output = _F.crop_bounding_box(self, self.format, top, left)
output = self._F.crop_bounding_box(self, self.format, top, left)
return BoundingBox.new_like(self, output, image_size=(height, width))
def center_crop(self, output_size: List[int]) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
output = _F.center_crop_bounding_box(
output = self._F.center_crop_bounding_box(
self, format=self.format, output_size=output_size, image_size=self.image_size
)
image_size = (output_size[0], output_size[0]) if len(output_size) == 1 else (output_size[0], output_size[1])
......@@ -122,9 +110,7 @@ class BoundingBox(_Feature):
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
antialias: bool = False,
) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
output = _F.resized_crop_bounding_box(self, self.format, top, left, height, width, size=size)
output = self._F.resized_crop_bounding_box(self, self.format, top, left, height, width, size=size)
image_size = (size[0], size[0]) if len(size) == 1 else (size[0], size[1])
return BoundingBox.new_like(self, output, image_size=image_size, dtype=output.dtype)
......@@ -134,8 +120,6 @@ class BoundingBox(_Feature):
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
padding_mode: str = "constant",
) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
if padding_mode not in ["constant"]:
raise ValueError(f"Padding mode '{padding_mode}' is not supported with bounding boxes")
......@@ -143,7 +127,7 @@ class BoundingBox(_Feature):
if not isinstance(padding, int):
padding = list(padding)
output = _F.pad_bounding_box(self, padding, format=self.format)
output = self._F.pad_bounding_box(self, padding, format=self.format)
# Update output image size:
# TODO: remove the import below and make _parse_pad_padding available
......@@ -164,9 +148,7 @@ class BoundingBox(_Feature):
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
center: Optional[List[float]] = None,
) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
output = _F.rotate_bounding_box(
output = self._F.rotate_bounding_box(
self, format=self.format, image_size=self.image_size, angle=angle, expand=expand, center=center
)
image_size = self.image_size
......@@ -174,7 +156,7 @@ class BoundingBox(_Feature):
# The way we recompute image_size is not optimal due to redundant computations of
# - rotation matrix (_get_inverse_affine_matrix)
# - points dot matrix (_compute_output_size)
# Alternatively, we could return new image size by _F.rotate_bounding_box
# Alternatively, we could return new image size by self._F.rotate_bounding_box
height, width = image_size
rotation_matrix = _get_inverse_affine_matrix([0.0, 0.0], angle, [0.0, 0.0], 1.0, [0.0, 0.0])
new_width, new_height = _compute_output_size(rotation_matrix, width, height)
......@@ -192,9 +174,7 @@ class BoundingBox(_Feature):
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
center: Optional[List[float]] = None,
) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
output = _F.affine_bounding_box(
output = self._F.affine_bounding_box(
self,
self.format,
self.image_size,
......@@ -212,9 +192,7 @@ class BoundingBox(_Feature):
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
output = _F.perspective_bounding_box(self, self.format, perspective_coeffs)
output = self._F.perspective_bounding_box(self, self.format, perspective_coeffs)
return BoundingBox.new_like(self, output, dtype=output.dtype)
def elastic(
......@@ -223,7 +201,5 @@ class BoundingBox(_Feature):
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> BoundingBox:
from torchvision.prototype.transforms import functional as _F
output = _F.elastic_bounding_box(self, self.format, displacement)
output = self._F.elastic_bounding_box(self, self.format, displacement)
return BoundingBox.new_like(self, output, dtype=output.dtype)
......@@ -49,11 +49,7 @@ class EncodedImage(EncodedData):
def decode(self) -> Image:
# TODO: this is useful for developing and debugging but we should remove or at least revisit this before we
# promote this out of the prototype state
# import at runtime to avoid cyclic imports
from torchvision.prototype.transforms.functional import decode_image_with_pil
return Image(decode_image_with_pil(self))
return Image(self._F.decode_image_with_pil(self))
class EncodedVideo(EncodedData):
......
from __future__ import annotations
from types import ModuleType
from typing import Any, Callable, cast, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
......@@ -10,6 +11,8 @@ F = TypeVar("F", bound="_Feature")
class _Feature(torch.Tensor):
__F: Optional[ModuleType] = None
def __new__(
cls: Type[F],
data: Any,
......@@ -92,6 +95,18 @@ class _Feature(torch.Tensor):
extra_repr = ", ".join(f"{key}={value}" for key, value in kwargs.items())
return f"{super().__repr__()[:-1]}, {extra_repr})"
@property
def _F(self) -> ModuleType:
# This implements a lazy import of the functional to get around the cyclic import. This import is deferred
# until the first time we need reference to the functional module and it's shared across all instances of
# the class. This approach avoids the DataLoader issue described at
# https://github.com/pytorch/vision/pull/6476#discussion_r953588621
if _Feature.__F is None:
from ..transforms import functional
_Feature.__F = functional
return _Feature.__F
def horizontal_flip(self) -> _Feature:
return self
......
......@@ -103,14 +103,12 @@ class Image(_Feature):
return ColorSpace.OTHER
def to_color_space(self, color_space: Union[str, ColorSpace], copy: bool = True) -> Image:
from torchvision.prototype.transforms import functional as _F
if isinstance(color_space, str):
color_space = ColorSpace.from_str(color_space.upper())
return Image.new_like(
self,
_F.convert_color_space_image_tensor(
self._F.convert_color_space_image_tensor(
self, old_color_space=self.color_space, new_color_space=color_space, copy=copy
),
color_space=color_space,
......@@ -127,15 +125,11 @@ class Image(_Feature):
return Image.new_like(self, draw_bounding_boxes(self, bounding_box.to_format("xyxy").view(-1, 4), **kwargs))
def horizontal_flip(self) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.horizontal_flip_image_tensor(self)
output = self._F.horizontal_flip_image_tensor(self)
return Image.new_like(self, output)
def vertical_flip(self) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.vertical_flip_image_tensor(self)
output = self._F.vertical_flip_image_tensor(self)
return Image.new_like(self, output)
def resize( # type: ignore[override]
......@@ -145,21 +139,17 @@ class Image(_Feature):
max_size: Optional[int] = None,
antialias: bool = False,
) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.resize_image_tensor(self, size, interpolation=interpolation, max_size=max_size, antialias=antialias)
output = self._F.resize_image_tensor(
self, size, interpolation=interpolation, max_size=max_size, antialias=antialias
)
return Image.new_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.crop_image_tensor(self, top, left, height, width)
output = self._F.crop_image_tensor(self, top, left, height, width)
return Image.new_like(self, output)
def center_crop(self, output_size: List[int]) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.center_crop_image_tensor(self, output_size=output_size)
output = self._F.center_crop_image_tensor(self, output_size=output_size)
return Image.new_like(self, output)
def resized_crop(
......@@ -172,9 +162,7 @@ class Image(_Feature):
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
antialias: bool = False,
) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.resized_crop_image_tensor(
output = self._F.resized_crop_image_tensor(
self, top, left, height, width, size=list(size), interpolation=interpolation, antialias=antialias
)
return Image.new_like(self, output)
......@@ -185,19 +173,15 @@ class Image(_Feature):
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
padding_mode: str = "constant",
) -> Image:
from torchvision.prototype.transforms import functional as _F
# This cast does Sequence[int] -> List[int] and is required to make mypy happy
if not isinstance(padding, int):
padding = list(padding)
# PyTorch's pad supports only scalars on fill. So we need to overwrite the colour
if isinstance(fill, (int, float)) or fill is None:
output = _F.pad_image_tensor(self, padding, fill=fill, padding_mode=padding_mode)
output = self._F.pad_image_tensor(self, padding, fill=fill, padding_mode=padding_mode)
else:
from torchvision.prototype.transforms.functional._geometry import _pad_with_vector_fill
output = _pad_with_vector_fill(self, padding, fill=fill, padding_mode=padding_mode)
output = self._F._geometry._pad_with_vector_fill(self, padding, fill=fill, padding_mode=padding_mode)
return Image.new_like(self, output)
......@@ -209,11 +193,9 @@ class Image(_Feature):
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
center: Optional[List[float]] = None,
) -> Image:
from torchvision.prototype.transforms.functional import _geometry as _F
fill = _F._convert_fill_arg(fill)
fill = self._F._geometry._convert_fill_arg(fill)
output = _F.rotate_image_tensor(
output = self._F._geometry.rotate_image_tensor(
self, angle, interpolation=interpolation, expand=expand, fill=fill, center=center
)
return Image.new_like(self, output)
......@@ -228,11 +210,9 @@ class Image(_Feature):
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
center: Optional[List[float]] = None,
) -> Image:
from torchvision.prototype.transforms.functional import _geometry as _F
fill = self._F._geometry._convert_fill_arg(fill)
fill = _F._convert_fill_arg(fill)
output = _F.affine_image_tensor(
output = self._F._geometry.affine_image_tensor(
self,
angle,
translate=translate,
......@@ -250,11 +230,11 @@ class Image(_Feature):
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> Image:
from torchvision.prototype.transforms.functional import _geometry as _F
fill = _F._convert_fill_arg(fill)
fill = self._F._geometry._convert_fill_arg(fill)
output = _F.perspective_image_tensor(self, perspective_coeffs, interpolation=interpolation, fill=fill)
output = self._F._geometry.perspective_image_tensor(
self, perspective_coeffs, interpolation=interpolation, fill=fill
)
return Image.new_like(self, output)
def elastic(
......@@ -263,81 +243,55 @@ class Image(_Feature):
interpolation: InterpolationMode = InterpolationMode.BILINEAR,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> Image:
from torchvision.prototype.transforms.functional import _geometry as _F
fill = self._F._geometry._convert_fill_arg(fill)
fill = _F._convert_fill_arg(fill)
output = _F.elastic_image_tensor(self, displacement, interpolation=interpolation, fill=fill)
output = self._F._geometry.elastic_image_tensor(self, displacement, interpolation=interpolation, fill=fill)
return Image.new_like(self, output)
def adjust_brightness(self, brightness_factor: float) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.adjust_brightness_image_tensor(self, brightness_factor=brightness_factor)
output = self._F.adjust_brightness_image_tensor(self, brightness_factor=brightness_factor)
return Image.new_like(self, output)
def adjust_saturation(self, saturation_factor: float) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.adjust_saturation_image_tensor(self, saturation_factor=saturation_factor)
output = self._F.adjust_saturation_image_tensor(self, saturation_factor=saturation_factor)
return Image.new_like(self, output)
def adjust_contrast(self, contrast_factor: float) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.adjust_contrast_image_tensor(self, contrast_factor=contrast_factor)
output = self._F.adjust_contrast_image_tensor(self, contrast_factor=contrast_factor)
return Image.new_like(self, output)
def adjust_sharpness(self, sharpness_factor: float) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.adjust_sharpness_image_tensor(self, sharpness_factor=sharpness_factor)
output = self._F.adjust_sharpness_image_tensor(self, sharpness_factor=sharpness_factor)
return Image.new_like(self, output)
def adjust_hue(self, hue_factor: float) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.adjust_hue_image_tensor(self, hue_factor=hue_factor)
output = self._F.adjust_hue_image_tensor(self, hue_factor=hue_factor)
return Image.new_like(self, output)
def adjust_gamma(self, gamma: float, gain: float = 1) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.adjust_gamma_image_tensor(self, gamma=gamma, gain=gain)
output = self._F.adjust_gamma_image_tensor(self, gamma=gamma, gain=gain)
return Image.new_like(self, output)
def posterize(self, bits: int) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.posterize_image_tensor(self, bits=bits)
output = self._F.posterize_image_tensor(self, bits=bits)
return Image.new_like(self, output)
def solarize(self, threshold: float) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.solarize_image_tensor(self, threshold=threshold)
output = self._F.solarize_image_tensor(self, threshold=threshold)
return Image.new_like(self, output)
def autocontrast(self) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.autocontrast_image_tensor(self)
output = self._F.autocontrast_image_tensor(self)
return Image.new_like(self, output)
def equalize(self) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.equalize_image_tensor(self)
output = self._F.equalize_image_tensor(self)
return Image.new_like(self, output)
def invert(self) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.invert_image_tensor(self)
output = self._F.invert_image_tensor(self)
return Image.new_like(self, output)
def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Image:
from torchvision.prototype.transforms import functional as _F
output = _F.gaussian_blur_image_tensor(self, kernel_size=kernel_size, sigma=sigma)
output = self._F.gaussian_blur_image_tensor(self, kernel_size=kernel_size, sigma=sigma)
return Image.new_like(self, output)
......@@ -10,15 +10,11 @@ from ._feature import _Feature
class SegmentationMask(_Feature):
def horizontal_flip(self) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
output = _F.horizontal_flip_segmentation_mask(self)
output = self._F.horizontal_flip_segmentation_mask(self)
return SegmentationMask.new_like(self, output)
def vertical_flip(self) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
output = _F.vertical_flip_segmentation_mask(self)
output = self._F.vertical_flip_segmentation_mask(self)
return SegmentationMask.new_like(self, output)
def resize( # type: ignore[override]
......@@ -28,21 +24,15 @@ class SegmentationMask(_Feature):
max_size: Optional[int] = None,
antialias: bool = False,
) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
output = _F.resize_segmentation_mask(self, size, max_size=max_size)
output = self._F.resize_segmentation_mask(self, size, max_size=max_size)
return SegmentationMask.new_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
output = _F.crop_segmentation_mask(self, top, left, height, width)
output = self._F.crop_segmentation_mask(self, top, left, height, width)
return SegmentationMask.new_like(self, output)
def center_crop(self, output_size: List[int]) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
output = _F.center_crop_segmentation_mask(self, output_size=output_size)
output = self._F.center_crop_segmentation_mask(self, output_size=output_size)
return SegmentationMask.new_like(self, output)
def resized_crop(
......@@ -55,9 +45,7 @@ class SegmentationMask(_Feature):
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: bool = False,
) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
output = _F.resized_crop_segmentation_mask(self, top, left, height, width, size=size)
output = self._F.resized_crop_segmentation_mask(self, top, left, height, width, size=size)
return SegmentationMask.new_like(self, output)
def pad(
......@@ -66,13 +54,11 @@ class SegmentationMask(_Feature):
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
padding_mode: str = "constant",
) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
# This cast does Sequence[int] -> List[int] and is required to make mypy happy
if not isinstance(padding, int):
padding = list(padding)
output = _F.pad_segmentation_mask(self, padding, padding_mode=padding_mode)
output = self._F.pad_segmentation_mask(self, padding, padding_mode=padding_mode)
return SegmentationMask.new_like(self, output)
def rotate(
......@@ -83,9 +69,7 @@ class SegmentationMask(_Feature):
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
center: Optional[List[float]] = None,
) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
output = _F.rotate_segmentation_mask(self, angle, expand=expand, center=center)
output = self._F.rotate_segmentation_mask(self, angle, expand=expand, center=center)
return SegmentationMask.new_like(self, output)
def affine(
......@@ -98,9 +82,7 @@ class SegmentationMask(_Feature):
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
center: Optional[List[float]] = None,
) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
output = _F.affine_segmentation_mask(
output = self._F.affine_segmentation_mask(
self,
angle,
translate=translate,
......@@ -116,9 +98,7 @@ class SegmentationMask(_Feature):
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
output = _F.perspective_segmentation_mask(self, perspective_coeffs)
output = self._F.perspective_segmentation_mask(self, perspective_coeffs)
return SegmentationMask.new_like(self, output)
def elastic(
......@@ -127,7 +107,5 @@ class SegmentationMask(_Feature):
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: Optional[Union[int, float, Sequence[int], Sequence[float]]] = None,
) -> SegmentationMask:
from torchvision.prototype.transforms import functional as _F
output = _F.elastic_segmentation_mask(self, displacement)
output = self._F.elastic_segmentation_mask(self, displacement)
return SegmentationMask.new_like(self, output, dtype=output.dtype)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment