Unverified Commit 08743385 authored by Vasilis Vryniotis's avatar Vasilis Vryniotis Committed by GitHub
Browse files

Rename `convert_bounding_box_format` => `convert_format_bounding_box` (#6582)

* Rename `convert_bounding_box_format` => `convert_format_bounding_box`

* Add missed replacement.
parent 321f6552
......@@ -19,7 +19,7 @@ from prototype_common_utils import (
from torch import jit
from torchvision.prototype import features
from torchvision.prototype.transforms.functional._geometry import _center_crop_compute_padding
from torchvision.prototype.transforms.functional._meta import convert_bounding_box_format
from torchvision.prototype.transforms.functional._meta import convert_format_bounding_box
from torchvision.transforms.functional import _get_perspective_coeffs
......@@ -633,7 +633,7 @@ def test_correctness_affine_bounding_box(angle, translate, scale, shear, center)
affine_matrix = _compute_affine_matrix(angle_, translate_, scale_, shear_, center_)
affine_matrix = affine_matrix[:2, :]
bbox_xyxy = convert_bounding_box_format(
bbox_xyxy = convert_format_bounding_box(
bbox, old_format=bbox.format, new_format=features.BoundingBoxFormat.XYXY
)
points = np.array(
......@@ -658,7 +658,7 @@ def test_correctness_affine_bounding_box(angle, translate, scale, shear, center)
dtype=torch.float32,
device=bbox.device,
)
return convert_bounding_box_format(
return convert_format_bounding_box(
out_bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox.format, copy=False
)
......@@ -835,7 +835,7 @@ def test_correctness_rotate_bounding_box(angle, expand, center):
affine_matrix = affine_matrix[:2, :]
image_size = bbox.image_size
bbox_xyxy = convert_bounding_box_format(
bbox_xyxy = convert_format_bounding_box(
bbox, old_format=bbox.format, new_format=features.BoundingBoxFormat.XYXY
)
points = np.array(
......@@ -876,7 +876,7 @@ def test_correctness_rotate_bounding_box(angle, expand, center):
dtype=torch.float32,
device=bbox.device,
)
return convert_bounding_box_format(
return convert_format_bounding_box(
out_bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox.format, copy=False
)
......@@ -1097,7 +1097,7 @@ def test_correctness_crop_bounding_box(device, format, top, left, height, width,
]
in_boxes = features.BoundingBox(in_boxes, format=features.BoundingBoxFormat.XYXY, image_size=size, device=device)
if format != features.BoundingBoxFormat.XYXY:
in_boxes = convert_bounding_box_format(in_boxes, features.BoundingBoxFormat.XYXY, format)
in_boxes = convert_format_bounding_box(in_boxes, features.BoundingBoxFormat.XYXY, format)
output_boxes = F.crop_bounding_box(
in_boxes,
......@@ -1107,7 +1107,7 @@ def test_correctness_crop_bounding_box(device, format, top, left, height, width,
)
if format != features.BoundingBoxFormat.XYXY:
output_boxes = convert_bounding_box_format(output_boxes, format, features.BoundingBoxFormat.XYXY)
output_boxes = convert_format_bounding_box(output_boxes, format, features.BoundingBoxFormat.XYXY)
torch.testing.assert_close(output_boxes.tolist(), expected_bboxes)
......@@ -1213,12 +1213,12 @@ def test_correctness_resized_crop_bounding_box(device, format, top, left, height
in_boxes, format=features.BoundingBoxFormat.XYXY, image_size=image_size, device=device
)
if format != features.BoundingBoxFormat.XYXY:
in_boxes = convert_bounding_box_format(in_boxes, features.BoundingBoxFormat.XYXY, format)
in_boxes = convert_format_bounding_box(in_boxes, features.BoundingBoxFormat.XYXY, format)
output_boxes = F.resized_crop_bounding_box(in_boxes, format, top, left, height, width, size)
if format != features.BoundingBoxFormat.XYXY:
output_boxes = convert_bounding_box_format(output_boxes, format, features.BoundingBoxFormat.XYXY)
output_boxes = convert_format_bounding_box(output_boxes, format, features.BoundingBoxFormat.XYXY)
torch.testing.assert_close(output_boxes, expected_bboxes)
......@@ -1268,12 +1268,12 @@ def test_correctness_pad_bounding_box(device, padding):
bbox_format = bbox.format
bbox_dtype = bbox.dtype
bbox = convert_bounding_box_format(bbox, old_format=bbox_format, new_format=features.BoundingBoxFormat.XYXY)
bbox = convert_format_bounding_box(bbox, old_format=bbox_format, new_format=features.BoundingBoxFormat.XYXY)
bbox[0::2] += pad_left
bbox[1::2] += pad_up
bbox = convert_bounding_box_format(
bbox = convert_format_bounding_box(
bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox_format, copy=False
)
if bbox.dtype != bbox_dtype:
......@@ -1396,7 +1396,7 @@ def test_correctness_perspective_bounding_box(device, startpoints, endpoints):
]
)
bbox_xyxy = convert_bounding_box_format(
bbox_xyxy = convert_format_bounding_box(
bbox, old_format=bbox.format, new_format=features.BoundingBoxFormat.XYXY
)
points = np.array(
......@@ -1423,7 +1423,7 @@ def test_correctness_perspective_bounding_box(device, startpoints, endpoints):
dtype=torch.float32,
device=bbox.device,
)
return convert_bounding_box_format(
return convert_format_bounding_box(
out_bbox, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox.format, copy=False
)
......@@ -1528,7 +1528,7 @@ def test_correctness_center_crop_bounding_box(device, output_size):
def _compute_expected_bbox(bbox, output_size_):
format_ = bbox.format
image_size_ = bbox.image_size
bbox = convert_bounding_box_format(bbox, format_, features.BoundingBoxFormat.XYWH)
bbox = convert_format_bounding_box(bbox, format_, features.BoundingBoxFormat.XYWH)
if len(output_size_) == 1:
output_size_.append(output_size_[-1])
......@@ -1548,7 +1548,7 @@ def test_correctness_center_crop_bounding_box(device, output_size):
dtype=bbox.dtype,
device=bbox.device,
)
return convert_bounding_box_format(out_bbox, features.BoundingBoxFormat.XYWH, format_, copy=False)
return convert_format_bounding_box(out_bbox, features.BoundingBoxFormat.XYWH, format_, copy=False)
for bboxes in make_bounding_boxes(
image_sizes=[(32, 32), (24, 33), (32, 25)],
......
......@@ -65,7 +65,7 @@ class BoundingBox(_Feature):
format = BoundingBoxFormat.from_str(format.upper())
return BoundingBox.new_like(
self, self._F.convert_bounding_box_format(self, old_format=self.format, new_format=format), format=format
self, self._F.convert_format_bounding_box(self, old_format=self.format, new_format=format), format=format
)
def horizontal_flip(self) -> BoundingBox:
......
......@@ -254,7 +254,7 @@ class SimpleCopyPaste(_RandomApplyTransform):
# There is a similar +1 in other reference implementations:
# https://github.com/pytorch/vision/blob/b6feccbc4387766b76a3e22b13815dbbbfa87c0f/torchvision/models/detection/roi_heads.py#L418-L422
xyxy_boxes[:, 2:] += 1
boxes = F.convert_bounding_box_format(
boxes = F.convert_format_bounding_box(
xyxy_boxes, old_format=features.BoundingBoxFormat.XYXY, new_format=bbox_format, copy=False
)
out_target["boxes"] = torch.cat([boxes, paste_boxes])
......@@ -263,7 +263,7 @@ class SimpleCopyPaste(_RandomApplyTransform):
out_target["labels"] = torch.cat([labels, paste_labels])
# Check for degenerated boxes and remove them
boxes = F.convert_bounding_box_format(
boxes = F.convert_format_bounding_box(
out_target["boxes"], old_format=bbox_format, new_format=features.BoundingBoxFormat.XYXY
)
degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
......
......@@ -655,7 +655,7 @@ class RandomIoUCrop(Transform):
continue
# check for any valid boxes with centers within the crop area
xyxy_bboxes = F.convert_bounding_box_format(
xyxy_bboxes = F.convert_format_bounding_box(
bboxes, old_format=bboxes.format, new_format=features.BoundingBoxFormat.XYXY, copy=True
)
cx = 0.5 * (xyxy_bboxes[..., 0] + xyxy_bboxes[..., 2])
......
......@@ -17,7 +17,7 @@ class ConvertBoundingBoxFormat(Transform):
self.format = format
def _transform(self, inpt: features.BoundingBox, params: Dict[str, Any]) -> features.BoundingBox:
output = F.convert_bounding_box_format(inpt, old_format=inpt.format, new_format=params["format"])
output = F.convert_format_bounding_box(inpt, old_format=inpt.format, new_format=params["format"])
return features.BoundingBox.new_like(inpt, output, format=params["format"])
......
......@@ -163,7 +163,7 @@ class RemoveSmallBoundingBoxes(Transform):
# be in XYXY format only to calculate the width and height internally. Thus, if the box is in XYWH or CXCYWH
# format,we need to convert first just to afterwards compute the width and height again, although they were
# there in the first place for these formats.
bounding_box = F.convert_bounding_box_format(
bounding_box = F.convert_format_bounding_box(
bounding_box, old_format=bounding_box.format, new_format=features.BoundingBoxFormat.XYXY
)
valid_indices = remove_small_boxes(bounding_box, min_size=self.min_size)
......
......@@ -3,7 +3,7 @@
from torchvision.transforms import InterpolationMode # usort: skip
from ._meta import (
clamp_bounding_box,
convert_bounding_box_format,
convert_format_bounding_box,
convert_color_space_image_tensor,
convert_color_space_image_pil,
convert_color_space,
......
......@@ -17,7 +17,7 @@ from torchvision.transforms.functional import (
)
from torchvision.transforms.functional_tensor import _parse_pad_padding
from ._meta import convert_bounding_box_format, get_dimensions_image_pil, get_dimensions_image_tensor
from ._meta import convert_format_bounding_box, get_dimensions_image_pil, get_dimensions_image_tensor
# shortcut type
......@@ -37,13 +37,13 @@ def horizontal_flip_bounding_box(
) -> torch.Tensor:
shape = bounding_box.shape
bounding_box = convert_bounding_box_format(
bounding_box = convert_format_bounding_box(
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
).view(-1, 4)
bounding_box[:, [0, 2]] = image_size[1] - bounding_box[:, [2, 0]]
return convert_bounding_box_format(
return convert_format_bounding_box(
bounding_box, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
).view(shape)
......@@ -70,13 +70,13 @@ def vertical_flip_bounding_box(
) -> torch.Tensor:
shape = bounding_box.shape
bounding_box = convert_bounding_box_format(
bounding_box = convert_format_bounding_box(
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
).view(-1, 4)
bounding_box[:, [1, 3]] = image_size[0] - bounding_box[:, [3, 1]]
return convert_bounding_box_format(
return convert_format_bounding_box(
bounding_box, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
).view(shape)
......@@ -362,7 +362,7 @@ def affine_bounding_box(
center: Optional[List[float]] = None,
) -> torch.Tensor:
original_shape = bounding_box.shape
bounding_box = convert_bounding_box_format(
bounding_box = convert_format_bounding_box(
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
).view(-1, 4)
......@@ -370,7 +370,7 @@ def affine_bounding_box(
# out_bboxes should be of shape [N boxes, 4]
return convert_bounding_box_format(
return convert_format_bounding_box(
out_bboxes, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
).view(original_shape)
......@@ -530,13 +530,13 @@ def rotate_bounding_box(
center = None
original_shape = bounding_box.shape
bounding_box = convert_bounding_box_format(
bounding_box = convert_format_bounding_box(
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
).view(-1, 4)
out_bboxes = _affine_bounding_box_xyxy(bounding_box, image_size, angle=-angle, center=center, expand=expand)
return convert_bounding_box_format(
return convert_format_bounding_box(
out_bboxes, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
).view(original_shape)
......@@ -704,7 +704,7 @@ def crop_bounding_box(
top: int,
left: int,
) -> torch.Tensor:
bounding_box = convert_bounding_box_format(
bounding_box = convert_format_bounding_box(
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
)
......@@ -712,7 +712,7 @@ def crop_bounding_box(
bounding_box[..., 0::2] -= left
bounding_box[..., 1::2] -= top
return convert_bounding_box_format(
return convert_format_bounding_box(
bounding_box, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
)
......@@ -758,7 +758,7 @@ def perspective_bounding_box(
raise ValueError("Argument perspective_coeffs should have 8 float values")
original_shape = bounding_box.shape
bounding_box = convert_bounding_box_format(
bounding_box = convert_format_bounding_box(
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
).view(-1, 4)
......@@ -828,7 +828,7 @@ def perspective_bounding_box(
# out_bboxes should be of shape [N boxes, 4]
return convert_bounding_box_format(
return convert_format_bounding_box(
out_bboxes, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
).view(original_shape)
......@@ -900,7 +900,7 @@ def elastic_bounding_box(
displacement = displacement.to(bounding_box.device)
original_shape = bounding_box.shape
bounding_box = convert_bounding_box_format(
bounding_box = convert_format_bounding_box(
bounding_box, old_format=format, new_format=features.BoundingBoxFormat.XYXY
).view(-1, 4)
......@@ -926,7 +926,7 @@ def elastic_bounding_box(
out_bbox_maxs, _ = torch.max(transformed_points, dim=1)
out_bboxes = torch.cat([out_bbox_mins, out_bbox_maxs], dim=1)
return convert_bounding_box_format(
return convert_format_bounding_box(
out_bboxes, old_format=features.BoundingBoxFormat.XYXY, new_format=format, copy=False
).view(original_shape)
......
......@@ -79,7 +79,7 @@ def _xyxy_to_cxcywh(xyxy: torch.Tensor) -> torch.Tensor:
return torch.stack((cx, cy, w, h), dim=-1)
def convert_bounding_box_format(
def convert_format_bounding_box(
bounding_box: torch.Tensor, old_format: BoundingBoxFormat, new_format: BoundingBoxFormat, copy: bool = True
) -> torch.Tensor:
if new_format == old_format:
......@@ -106,10 +106,10 @@ def clamp_bounding_box(
) -> torch.Tensor:
# TODO: (PERF) Possible speed up clamping if we have different implementations for each bbox format.
# Not sure if they yield equivalent results.
xyxy_boxes = convert_bounding_box_format(bounding_box, format, BoundingBoxFormat.XYXY)
xyxy_boxes = convert_format_bounding_box(bounding_box, format, BoundingBoxFormat.XYXY)
xyxy_boxes[..., 0::2].clamp_(min=0, max=image_size[1])
xyxy_boxes[..., 1::2].clamp_(min=0, max=image_size[0])
return convert_bounding_box_format(xyxy_boxes, BoundingBoxFormat.XYXY, format, copy=False)
return convert_format_bounding_box(xyxy_boxes, BoundingBoxFormat.XYXY, format, copy=False)
def _split_alpha(image: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment