Unverified Commit 30b879fc authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

Cleanup prototype kernel signatures (#6648)

* pass metadata directly after input in prototype kernels

* rename img to image
parent dc07ac2a
...@@ -632,7 +632,7 @@ def test_correctness_pad_bounding_box(device, padding): ...@@ -632,7 +632,7 @@ def test_correctness_pad_bounding_box(device, padding):
bboxes_format = bboxes.format bboxes_format = bboxes.format
bboxes_image_size = bboxes.image_size bboxes_image_size = bboxes.image_size
output_boxes = F.pad_bounding_box(bboxes, padding, format=bboxes_format) output_boxes = F.pad_bounding_box(bboxes, format=bboxes_format, padding=padding)
if bboxes.ndim < 2 or bboxes.shape[0] == 0: if bboxes.ndim < 2 or bboxes.shape[0] == 0:
bboxes = [bboxes] bboxes = [bboxes]
...@@ -781,7 +781,7 @@ def test_correctness_center_crop_bounding_box(device, output_size): ...@@ -781,7 +781,7 @@ def test_correctness_center_crop_bounding_box(device, output_size):
bboxes_format = bboxes.format bboxes_format = bboxes.format
bboxes_image_size = bboxes.image_size bboxes_image_size = bboxes.image_size
output_boxes = F.center_crop_bounding_box(bboxes, bboxes_format, output_size, bboxes_image_size) output_boxes = F.center_crop_bounding_box(bboxes, bboxes_format, bboxes_image_size, output_size)
if bboxes.ndim < 2: if bboxes.ndim < 2:
bboxes = [bboxes] bboxes = [bboxes]
......
...@@ -83,7 +83,7 @@ class BoundingBox(_Feature): ...@@ -83,7 +83,7 @@ class BoundingBox(_Feature):
max_size: Optional[int] = None, max_size: Optional[int] = None,
antialias: bool = False, antialias: bool = False,
) -> BoundingBox: ) -> BoundingBox:
output = self._F.resize_bounding_box(self, size, image_size=self.image_size, max_size=max_size) output = self._F.resize_bounding_box(self, image_size=self.image_size, size=size, max_size=max_size)
if isinstance(size, int): if isinstance(size, int):
size = [size] size = [size]
image_size = (size[0], size[0]) if len(size) == 1 else (size[0], size[1]) image_size = (size[0], size[0]) if len(size) == 1 else (size[0], size[1])
...@@ -95,7 +95,7 @@ class BoundingBox(_Feature): ...@@ -95,7 +95,7 @@ class BoundingBox(_Feature):
def center_crop(self, output_size: List[int]) -> BoundingBox: def center_crop(self, output_size: List[int]) -> BoundingBox:
output = self._F.center_crop_bounding_box( output = self._F.center_crop_bounding_box(
self, format=self.format, output_size=output_size, image_size=self.image_size self, format=self.format, image_size=self.image_size, output_size=output_size
) )
if isinstance(output_size, int): if isinstance(output_size, int):
output_size = [output_size] output_size = [output_size]
...@@ -126,7 +126,7 @@ class BoundingBox(_Feature): ...@@ -126,7 +126,7 @@ class BoundingBox(_Feature):
if not isinstance(padding, int): if not isinstance(padding, int):
padding = list(padding) padding = list(padding)
output = self._F.pad_bounding_box(self, padding, format=self.format, padding_mode=padding_mode) output = self._F.pad_bounding_box(self, format=self.format, padding=padding, padding_mode=padding_mode)
# Update output image size: # Update output image size:
left, right, top, bottom = self._F._geometry._parse_pad_padding(padding) left, right, top, bottom = self._F._geometry._parse_pad_padding(padding)
......
...@@ -10,11 +10,11 @@ erase_image_tensor = _FT.erase ...@@ -10,11 +10,11 @@ erase_image_tensor = _FT.erase
@torch.jit.unused @torch.jit.unused
def erase_image_pil( def erase_image_pil(
img: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image: ) -> PIL.Image.Image:
t_img = pil_to_tensor(img) t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace) output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=img.mode) return to_pil_image(output, mode=image.mode)
def erase( def erase(
......
...@@ -21,7 +21,7 @@ def normalize( ...@@ -21,7 +21,7 @@ def normalize(
def gaussian_blur_image_tensor( def gaussian_blur_image_tensor(
img: torch.Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None image: torch.Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> torch.Tensor: ) -> torch.Tensor:
# TODO: consider deprecating integers from sigma on the future # TODO: consider deprecating integers from sigma on the future
if isinstance(kernel_size, int): if isinstance(kernel_size, int):
...@@ -47,16 +47,16 @@ def gaussian_blur_image_tensor( ...@@ -47,16 +47,16 @@ def gaussian_blur_image_tensor(
if s <= 0.0: if s <= 0.0:
raise ValueError(f"sigma should have positive values. Got {sigma}") raise ValueError(f"sigma should have positive values. Got {sigma}")
return _FT.gaussian_blur(img, kernel_size, sigma) return _FT.gaussian_blur(image, kernel_size, sigma)
@torch.jit.unused @torch.jit.unused
def gaussian_blur_image_pil( def gaussian_blur_image_pil(
img: PIL.Image.Image, kernel_size: List[int], sigma: Optional[List[float]] = None image: PIL.Image.Image, kernel_size: List[int], sigma: Optional[List[float]] = None
) -> PIL.Image.Image: ) -> PIL.Image.Image:
t_img = pil_to_tensor(img) t_img = pil_to_tensor(image)
output = gaussian_blur_image_tensor(t_img, kernel_size=kernel_size, sigma=sigma) output = gaussian_blur_image_tensor(t_img, kernel_size=kernel_size, sigma=sigma)
return to_pil_image(output, mode=img.mode) return to_pil_image(output, mode=image.mode)
def gaussian_blur( def gaussian_blur(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment