Unverified Commit 1a1d509c authored by vfdev's avatar vfdev Committed by GitHub
Browse files

Replaced python built-in input by inpt (#6329)

parent 8068594a
...@@ -49,12 +49,12 @@ class _AutoAugmentBase(Transform): ...@@ -49,12 +49,12 @@ class _AutoAugmentBase(Transform):
unsupported_types: Tuple[Type, ...] = (features.BoundingBox, features.SegmentationMask), unsupported_types: Tuple[Type, ...] = (features.BoundingBox, features.SegmentationMask),
) -> Tuple[Tuple[Any, ...], Union[PIL.Image.Image, torch.Tensor, features.Image]]: ) -> Tuple[Tuple[Any, ...], Union[PIL.Image.Image, torch.Tensor, features.Image]]:
def fn( def fn(
id: Tuple[Any, ...], input: Any id: Tuple[Any, ...], inpt: Any
) -> Optional[Tuple[Tuple[Any, ...], Union[PIL.Image.Image, torch.Tensor, features.Image]]]: ) -> Optional[Tuple[Tuple[Any, ...], Union[PIL.Image.Image, torch.Tensor, features.Image]]]:
if type(input) in {torch.Tensor, features.Image} or isinstance(input, PIL.Image.Image): if type(inpt) in {torch.Tensor, features.Image} or isinstance(inpt, PIL.Image.Image):
return id, input return id, inpt
elif isinstance(input, unsupported_types): elif isinstance(inpt, unsupported_types):
raise TypeError(f"Inputs of type {type(input).__name__} are not supported by {type(self).__name__}()") raise TypeError(f"Inputs of type {type(inpt).__name__} are not supported by {type(self).__name__}()")
else: else:
return None return None
...@@ -494,7 +494,7 @@ class AugMix(_AutoAugmentBase): ...@@ -494,7 +494,7 @@ class AugMix(_AutoAugmentBase):
if isinstance(orig_image, torch.Tensor): if isinstance(orig_image, torch.Tensor):
image = orig_image image = orig_image
else: # isinstance(input, PIL.Image.Image): else: # isinstance(inpt, PIL.Image.Image):
image = pil_to_tensor(orig_image) image = pil_to_tensor(orig_image)
augmentation_space = self._AUGMENTATION_SPACE if self.all_ops else self._PARTIAL_AUGMENTATION_SPACE augmentation_space = self._AUGMENTATION_SPACE if self.all_ops else self._PARTIAL_AUGMENTATION_SPACE
......
...@@ -25,8 +25,8 @@ class RandomApply(_RandomApplyTransform): ...@@ -25,8 +25,8 @@ class RandomApply(_RandomApplyTransform):
super().__init__(p=p) super().__init__(p=p)
self.transform = transform self.transform = transform
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self.transform(input) return self.transform(inpt)
def extra_repr(self) -> str: def extra_repr(self) -> str:
return f"p={self.p}" return f"p={self.p}"
......
...@@ -22,11 +22,11 @@ class ToTensor(Transform): ...@@ -22,11 +22,11 @@ class ToTensor(Transform):
) )
super().__init__() super().__init__()
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, (PIL.Image.Image, np.ndarray)): if isinstance(inpt, (PIL.Image.Image, np.ndarray)):
return _F.to_tensor(input) return _F.to_tensor(inpt)
else: else:
return input return inpt
class PILToTensor(Transform): class PILToTensor(Transform):
...@@ -37,11 +37,11 @@ class PILToTensor(Transform): ...@@ -37,11 +37,11 @@ class PILToTensor(Transform):
) )
super().__init__() super().__init__()
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, PIL.Image.Image): if isinstance(inpt, PIL.Image.Image):
return _F.pil_to_tensor(input) return _F.pil_to_tensor(inpt)
else: else:
return input return inpt
class ToPILImage(Transform): class ToPILImage(Transform):
...@@ -53,11 +53,11 @@ class ToPILImage(Transform): ...@@ -53,11 +53,11 @@ class ToPILImage(Transform):
super().__init__() super().__init__()
self.mode = mode self.mode = mode
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if is_simple_tensor(input) or isinstance(input, (features.Image, np.ndarray)): if is_simple_tensor(inpt) or isinstance(inpt, (features.Image, np.ndarray)):
return _F.to_pil_image(input, mode=self.mode) return _F.to_pil_image(inpt, mode=self.mode)
else: else:
return input return inpt
class Grayscale(Transform): class Grayscale(Transform):
...@@ -84,8 +84,8 @@ class Grayscale(Transform): ...@@ -84,8 +84,8 @@ class Grayscale(Transform):
self._rgb_to_gray = ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY) self._rgb_to_gray = ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY)
self._gray_to_rgb = ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB) self._gray_to_rgb = ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB)
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
output = self._rgb_to_gray(input) output = self._rgb_to_gray(inpt)
if self.num_output_channels == 3: if self.num_output_channels == 3:
output = self._gray_to_rgb(output) output = self._gray_to_rgb(output)
return output return output
...@@ -109,5 +109,5 @@ class RandomGrayscale(_RandomApplyTransform): ...@@ -109,5 +109,5 @@ class RandomGrayscale(_RandomApplyTransform):
self._rgb_to_gray = ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY) self._rgb_to_gray = ConvertImageColorSpace(old_color_space=ColorSpace.RGB, color_space=ColorSpace.GRAY)
self._gray_to_rgb = ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB) self._gray_to_rgb = ConvertImageColorSpace(old_color_space=ColorSpace.GRAY, color_space=ColorSpace.RGB)
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._gray_to_rgb(self._rgb_to_gray(input)) return self._gray_to_rgb(self._rgb_to_gray(inpt))
...@@ -150,16 +150,16 @@ class FiveCrop(Transform): ...@@ -150,16 +150,16 @@ class FiveCrop(Transform):
super().__init__() super().__init__()
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, features.Image): if isinstance(inpt, features.Image):
output = F.five_crop_image_tensor(input, self.size) output = F.five_crop_image_tensor(inpt, self.size)
return MultiCropResult(features.Image.new_like(input, o) for o in output) return MultiCropResult(features.Image.new_like(inpt, o) for o in output)
elif is_simple_tensor(input): elif is_simple_tensor(inpt):
return MultiCropResult(F.five_crop_image_tensor(input, self.size)) return MultiCropResult(F.five_crop_image_tensor(inpt, self.size))
elif isinstance(input, PIL.Image.Image): elif isinstance(inpt, PIL.Image.Image):
return MultiCropResult(F.five_crop_image_pil(input, self.size)) return MultiCropResult(F.five_crop_image_pil(inpt, self.size))
else: else:
return input return inpt
def forward(self, *inputs: Any) -> Any: def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0] sample = inputs if len(inputs) > 1 else inputs[0]
...@@ -174,16 +174,16 @@ class TenCrop(Transform): ...@@ -174,16 +174,16 @@ class TenCrop(Transform):
self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
self.vertical_flip = vertical_flip self.vertical_flip = vertical_flip
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, features.Image): if isinstance(inpt, features.Image):
output = F.ten_crop_image_tensor(input, self.size, vertical_flip=self.vertical_flip) output = F.ten_crop_image_tensor(inpt, self.size, vertical_flip=self.vertical_flip)
return MultiCropResult(features.Image.new_like(input, o) for o in output) return MultiCropResult(features.Image.new_like(inpt, o) for o in output)
elif is_simple_tensor(input): elif is_simple_tensor(inpt):
return MultiCropResult(F.ten_crop_image_tensor(input, self.size)) return MultiCropResult(F.ten_crop_image_tensor(inpt, self.size))
elif isinstance(input, PIL.Image.Image): elif isinstance(inpt, PIL.Image.Image):
return MultiCropResult(F.ten_crop_image_pil(input, self.size)) return MultiCropResult(F.ten_crop_image_pil(inpt, self.size))
else: else:
return input return inpt
def forward(self, *inputs: Any) -> Any: def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0] sample = inputs if len(inputs) > 1 else inputs[0]
......
...@@ -16,12 +16,12 @@ class ConvertBoundingBoxFormat(Transform): ...@@ -16,12 +16,12 @@ class ConvertBoundingBoxFormat(Transform):
format = features.BoundingBoxFormat[format] format = features.BoundingBoxFormat[format]
self.format = format self.format = format
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, features.BoundingBox): if isinstance(inpt, features.BoundingBox):
output = F.convert_bounding_box_format(input, old_format=input.format, new_format=params["format"]) output = F.convert_bounding_box_format(inpt, old_format=inpt.format, new_format=params["format"])
return features.BoundingBox.new_like(input, output, format=params["format"]) return features.BoundingBox.new_like(inpt, output, format=params["format"])
else: else:
return input return inpt
class ConvertImageDtype(Transform): class ConvertImageDtype(Transform):
...@@ -29,14 +29,14 @@ class ConvertImageDtype(Transform): ...@@ -29,14 +29,14 @@ class ConvertImageDtype(Transform):
super().__init__() super().__init__()
self.dtype = dtype self.dtype = dtype
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, features.Image): if isinstance(inpt, features.Image):
output = convert_image_dtype(input, dtype=self.dtype) output = convert_image_dtype(inpt, dtype=self.dtype)
return features.Image.new_like(input, output, dtype=self.dtype) return features.Image.new_like(inpt, output, dtype=self.dtype)
elif is_simple_tensor(input): elif is_simple_tensor(inpt):
return convert_image_dtype(input, dtype=self.dtype) return convert_image_dtype(inpt, dtype=self.dtype)
else: else:
return input return inpt
class ConvertImageColorSpace(Transform): class ConvertImageColorSpace(Transform):
...@@ -55,13 +55,13 @@ class ConvertImageColorSpace(Transform): ...@@ -55,13 +55,13 @@ class ConvertImageColorSpace(Transform):
old_color_space = features.ColorSpace.from_str(old_color_space) old_color_space = features.ColorSpace.from_str(old_color_space)
self.old_color_space = old_color_space self.old_color_space = old_color_space
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, features.Image): if isinstance(inpt, features.Image):
output = F.convert_image_color_space_tensor( output = F.convert_image_color_space_tensor(
input, old_color_space=input.color_space, new_color_space=self.color_space inpt, old_color_space=inpt.color_space, new_color_space=self.color_space
) )
return features.Image.new_like(input, output, color_space=self.color_space) return features.Image.new_like(inpt, output, color_space=self.color_space)
elif is_simple_tensor(input): elif is_simple_tensor(inpt):
if self.old_color_space is None: if self.old_color_space is None:
raise RuntimeError( raise RuntimeError(
f"In order to convert simple tensor images, `{type(self).__name__}(...)` " f"In order to convert simple tensor images, `{type(self).__name__}(...)` "
...@@ -69,9 +69,9 @@ class ConvertImageColorSpace(Transform): ...@@ -69,9 +69,9 @@ class ConvertImageColorSpace(Transform):
) )
return F.convert_image_color_space_tensor( return F.convert_image_color_space_tensor(
input, old_color_space=self.old_color_space, new_color_space=self.color_space inpt, old_color_space=self.old_color_space, new_color_space=self.color_space
) )
elif isinstance(input, PIL.Image.Image): elif isinstance(inpt, PIL.Image.Image):
return F.convert_image_color_space_pil(input, color_space=self.color_space) return F.convert_image_color_space_pil(inpt, color_space=self.color_space)
else: else:
return input return inpt
...@@ -7,8 +7,8 @@ from torchvision.transforms.transforms import _setup_size ...@@ -7,8 +7,8 @@ from torchvision.transforms.transforms import _setup_size
class Identity(Transform): class Identity(Transform):
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return input return inpt
class Lambda(Transform): class Lambda(Transform):
...@@ -17,11 +17,11 @@ class Lambda(Transform): ...@@ -17,11 +17,11 @@ class Lambda(Transform):
self.fn = fn self.fn = fn
self.types = types self.types = types
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if type(input) in self.types: if type(inpt) in self.types:
return self.fn(input) return self.fn(inpt)
else: else:
return input return inpt
def extra_repr(self) -> str: def extra_repr(self) -> str:
extras = [] extras = []
...@@ -38,13 +38,13 @@ class Normalize(Transform): ...@@ -38,13 +38,13 @@ class Normalize(Transform):
self.mean = mean self.mean = mean
self.std = std self.std = std
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, torch.Tensor): if isinstance(inpt, torch.Tensor):
# We don't need to differentiate between vanilla tensors and features.Image's here, since the result of the # We don't need to differentiate between vanilla tensors and features.Image's here, since the result of the
# normalization transform is no longer a features.Image # normalization transform is no longer a features.Image
return F.normalize_image_tensor(input, mean=self.mean, std=self.std) return F.normalize_image_tensor(inpt, mean=self.mean, std=self.std)
else: else:
return input return inpt
class GaussianBlur(Transform): class GaussianBlur(Transform):
......
...@@ -9,12 +9,12 @@ from ._utils import is_simple_tensor ...@@ -9,12 +9,12 @@ from ._utils import is_simple_tensor
class DecodeImage(Transform): class DecodeImage(Transform):
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, features.EncodedImage): if isinstance(inpt, features.EncodedImage):
output = F.decode_image_with_pil(input) output = F.decode_image_with_pil(inpt)
return features.Image(output) return features.Image(output)
else: else:
return input return inpt
class LabelToOneHot(Transform): class LabelToOneHot(Transform):
...@@ -22,15 +22,15 @@ class LabelToOneHot(Transform): ...@@ -22,15 +22,15 @@ class LabelToOneHot(Transform):
super().__init__() super().__init__()
self.num_categories = num_categories self.num_categories = num_categories
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, features.Label): if isinstance(inpt, features.Label):
num_categories = self.num_categories num_categories = self.num_categories
if num_categories == -1 and input.categories is not None: if num_categories == -1 and inpt.categories is not None:
num_categories = len(input.categories) num_categories = len(inpt.categories)
output = F.label_to_one_hot(input, num_categories=num_categories) output = F.label_to_one_hot(inpt, num_categories=num_categories)
return features.OneHotLabel(output, categories=input.categories) return features.OneHotLabel(output, categories=inpt.categories)
else: else:
return input return inpt
def extra_repr(self) -> str: def extra_repr(self) -> str:
if self.num_categories == -1: if self.num_categories == -1:
...@@ -44,12 +44,12 @@ class ToImageTensor(Transform): ...@@ -44,12 +44,12 @@ class ToImageTensor(Transform):
super().__init__() super().__init__()
self.copy = copy self.copy = copy
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, (features.Image, PIL.Image.Image, np.ndarray)) or is_simple_tensor(input): if isinstance(inpt, (features.Image, PIL.Image.Image, np.ndarray)) or is_simple_tensor(inpt):
output = F.to_image_tensor(input, copy=self.copy) output = F.to_image_tensor(inpt, copy=self.copy)
return features.Image(output) return features.Image(output)
else: else:
return input return inpt
class ToImagePIL(Transform): class ToImagePIL(Transform):
...@@ -57,8 +57,8 @@ class ToImagePIL(Transform): ...@@ -57,8 +57,8 @@ class ToImagePIL(Transform):
super().__init__() super().__init__()
self.copy = copy self.copy = copy
def _transform(self, input: Any, params: Dict[str, Any]) -> Any: def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if isinstance(input, (features.Image, PIL.Image.Image, np.ndarray)) or is_simple_tensor(input): if isinstance(inpt, (features.Image, PIL.Image.Image, np.ndarray)) or is_simple_tensor(inpt):
return F.to_image_pil(input, copy=self.copy) return F.to_image_pil(inpt, copy=self.copy)
else: else:
return input return inpt
...@@ -40,5 +40,5 @@ def has_all(sample: Any, *types: Type) -> bool: ...@@ -40,5 +40,5 @@ def has_all(sample: Any, *types: Type) -> bool:
return not bool(set(types) - set([type(obj) for obj in flat_sample])) return not bool(set(types) - set([type(obj) for obj in flat_sample]))
def is_simple_tensor(input: Any) -> bool: def is_simple_tensor(inpt: Any) -> bool:
return isinstance(input, torch.Tensor) and not isinstance(input, features._Feature) return isinstance(inpt, torch.Tensor) and not isinstance(inpt, features._Feature)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment