Unverified Commit b45969a7 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

[prototype] Clean up `features` area (#6834)

* Clean ups on `features` area

* remove unncessary imports
parent 7de68b0d
from ._bounding_box import BoundingBox, BoundingBoxFormat from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._encoded import EncodedData, EncodedImage from ._encoded import EncodedData, EncodedImage
from ._feature import _Feature, FillType, FillTypeJIT, InputType, InputTypeJIT, is_simple_tensor from ._feature import _Feature, FillType, FillTypeJIT, InputType, InputTypeJIT, is_simple_tensor
from ._image import ( from ._image import ColorSpace, Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
ColorSpace,
Image,
ImageType,
ImageTypeJIT,
LegacyImageType,
LegacyImageTypeJIT,
TensorImageType,
TensorImageTypeJIT,
)
from ._label import Label, OneHotLabel from ._label import Label, OneHotLabel
from ._mask import Mask from ._mask import Mask
from ._video import ( from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT
LegacyVideoType,
LegacyVideoTypeJIT,
TensorVideoType,
TensorVideoTypeJIT,
Video,
VideoType,
VideoTypeJIT,
)
...@@ -61,18 +61,6 @@ class BoundingBox(_Feature): ...@@ -61,18 +61,6 @@ class BoundingBox(_Feature):
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override] def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr(format=self.format, spatial_size=self.spatial_size) return self._make_repr(format=self.format, spatial_size=self.spatial_size)
def to_format(self, format: Union[str, BoundingBoxFormat]) -> BoundingBox:
if isinstance(format, str):
format = BoundingBoxFormat.from_str(format.upper())
return BoundingBox.wrap_like(
self,
self._F.convert_format_bounding_box(
self.as_subclass(torch.Tensor), old_format=self.format, new_format=format
),
format=format,
)
def horizontal_flip(self) -> BoundingBox: def horizontal_flip(self) -> BoundingBox:
output = self._F.horizontal_flip_bounding_box( output = self._F.horizontal_flip_bounding_box(
self.as_subclass(torch.Tensor), format=self.format, spatial_size=self.spatial_size self.as_subclass(torch.Tensor), format=self.format, spatial_size=self.spatial_size
......
from __future__ import annotations from __future__ import annotations
import warnings import warnings
from typing import Any, cast, List, Optional, Tuple, Union from typing import Any, List, Optional, Tuple, Union
import PIL.Image import PIL.Image
import torch import torch
...@@ -104,7 +104,7 @@ class Image(_Feature): ...@@ -104,7 +104,7 @@ class Image(_Feature):
@property @property
def spatial_size(self) -> Tuple[int, int]: def spatial_size(self) -> Tuple[int, int]:
return cast(Tuple[int, int], tuple(self.shape[-2:])) return tuple(self.shape[-2:]) # type: ignore[return-value]
@property @property
def num_channels(self) -> int: def num_channels(self) -> int:
...@@ -285,7 +285,5 @@ class Image(_Feature): ...@@ -285,7 +285,5 @@ class Image(_Feature):
ImageType = Union[torch.Tensor, PIL.Image.Image, Image] ImageType = Union[torch.Tensor, PIL.Image.Image, Image]
ImageTypeJIT = torch.Tensor ImageTypeJIT = torch.Tensor
LegacyImageType = Union[torch.Tensor, PIL.Image.Image]
LegacyImageTypeJIT = torch.Tensor
TensorImageType = Union[torch.Tensor, Image] TensorImageType = Union[torch.Tensor, Image]
TensorImageTypeJIT = torch.Tensor TensorImageTypeJIT = torch.Tensor
from __future__ import annotations from __future__ import annotations
from typing import Any, cast, List, Optional, Tuple, Union from typing import Any, List, Optional, Tuple, Union
import torch import torch
from torchvision.transforms import InterpolationMode from torchvision.transforms import InterpolationMode
...@@ -34,7 +34,7 @@ class Mask(_Feature): ...@@ -34,7 +34,7 @@ class Mask(_Feature):
@property @property
def spatial_size(self) -> Tuple[int, int]: def spatial_size(self) -> Tuple[int, int]:
return cast(Tuple[int, int], tuple(self.shape[-2:])) return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask: def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor)) output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
......
from __future__ import annotations from __future__ import annotations
import warnings import warnings
from typing import Any, cast, List, Optional, Tuple, Union from typing import Any, List, Optional, Tuple, Union
import torch import torch
from torchvision.transforms.functional import InterpolationMode from torchvision.transforms.functional import InterpolationMode
...@@ -56,7 +56,7 @@ class Video(_Feature): ...@@ -56,7 +56,7 @@ class Video(_Feature):
@property @property
def spatial_size(self) -> Tuple[int, int]: def spatial_size(self) -> Tuple[int, int]:
return cast(Tuple[int, int], tuple(self.shape[-2:])) return tuple(self.shape[-2:]) # type: ignore[return-value]
@property @property
def num_channels(self) -> int: def num_channels(self) -> int:
...@@ -237,7 +237,5 @@ class Video(_Feature): ...@@ -237,7 +237,5 @@ class Video(_Feature):
VideoType = Union[torch.Tensor, Video] VideoType = Union[torch.Tensor, Video]
VideoTypeJIT = torch.Tensor VideoTypeJIT = torch.Tensor
LegacyVideoType = torch.Tensor
LegacyVideoTypeJIT = torch.Tensor
TensorVideoType = Union[torch.Tensor, Video] TensorVideoType = Union[torch.Tensor, Video]
TensorVideoTypeJIT = torch.Tensor TensorVideoTypeJIT = torch.Tensor
...@@ -23,8 +23,8 @@ def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Ima ...@@ -23,8 +23,8 @@ def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Ima
def rgb_to_grayscale( def rgb_to_grayscale(
inpt: Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT], num_output_channels: int = 1 inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT], num_output_channels: int = 1
) -> Union[features.LegacyImageTypeJIT, features.LegacyVideoTypeJIT]: ) -> Union[features.ImageTypeJIT, features.VideoTypeJIT]:
if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)): if not torch.jit.is_scripting() and isinstance(inpt, (features.Image, features.Video)):
inpt = inpt.as_subclass(torch.Tensor) inpt = inpt.as_subclass(torch.Tensor)
old_color_space = None old_color_space = None
......
...@@ -16,9 +16,7 @@ def normalize_image_tensor( ...@@ -16,9 +16,7 @@ def normalize_image_tensor(
raise TypeError(f"Input tensor should be a float tensor. Got {image.dtype}.") raise TypeError(f"Input tensor should be a float tensor. Got {image.dtype}.")
if image.ndim < 3: if image.ndim < 3:
raise ValueError( raise ValueError(f"Expected tensor to be a tensor image of size (..., C, H, W). Got {image.shape}.")
f"Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = {image.size()}"
)
if isinstance(std, (tuple, list)): if isinstance(std, (tuple, list)):
divzero = not all(std) divzero = not all(std)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment