Unverified Commit 2d4484fb authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

port rotate (#7713)

parent c3e92565
......@@ -541,80 +541,6 @@ class TestRandomZoomOut:
fn.assert_has_calls(calls)
class TestRandomRotation:
def test_assertions(self):
with pytest.raises(ValueError, match="is a single number, it must be positive"):
transforms.RandomRotation(-0.7)
for d in [[-0.7], [-0.7, 0, 0.7]]:
with pytest.raises(ValueError, match="degrees should be a sequence of length 2"):
transforms.RandomRotation(d)
with pytest.raises(TypeError, match="Got inappropriate fill arg"):
transforms.RandomRotation(12, fill="abc")
with pytest.raises(TypeError, match="center should be a sequence of length"):
transforms.RandomRotation(12, center=12)
with pytest.raises(ValueError, match="center should be a sequence of length"):
transforms.RandomRotation(12, center=[1, 2, 3])
def test__get_params(self):
angle_bound = 34
transform = transforms.RandomRotation(angle_bound)
params = transform._get_params(None)
assert -angle_bound <= params["angle"] <= angle_bound
angle_bounds = [12, 34]
transform = transforms.RandomRotation(angle_bounds)
params = transform._get_params(None)
assert angle_bounds[0] <= params["angle"] <= angle_bounds[1]
@pytest.mark.parametrize("degrees", [23, [0, 45], (0, 45)])
@pytest.mark.parametrize("expand", [False, True])
@pytest.mark.parametrize("fill", [0, [1, 2, 3], (2, 3, 4)])
@pytest.mark.parametrize("center", [None, [2.0, 3.0]])
def test__transform(self, degrees, expand, fill, center, mocker):
interpolation = InterpolationMode.BILINEAR
transform = transforms.RandomRotation(
degrees, interpolation=interpolation, expand=expand, fill=fill, center=center
)
if isinstance(degrees, (tuple, list)):
assert transform.degrees == [float(degrees[0]), float(degrees[1])]
else:
assert transform.degrees == [float(-degrees), float(degrees)]
fn = mocker.patch("torchvision.transforms.v2.functional.rotate")
inpt = mocker.MagicMock(spec=datapoints.Image)
# vfdev-5, Feature Request: let's store params as Transform attribute
# This could be also helpful for users
# Otherwise, we can mock transform._get_params
torch.manual_seed(12)
_ = transform(inpt)
torch.manual_seed(12)
params = transform._get_params(inpt)
fill = transforms._utils._convert_fill_arg(fill)
fn.assert_called_once_with(inpt, **params, interpolation=interpolation, expand=expand, fill=fill, center=center)
@pytest.mark.parametrize("angle", [34, -87])
@pytest.mark.parametrize("expand", [False, True])
def test_boundingbox_spatial_size(self, angle, expand):
# Specific test for BoundingBox.rotate
bbox = datapoints.BoundingBox(
torch.tensor([1, 2, 3, 4]), format=datapoints.BoundingBoxFormat.XYXY, spatial_size=(32, 32)
)
img = datapoints.Image(torch.rand(1, 3, 32, 32))
out_img = img.rotate(angle, expand=expand)
out_bbox = bbox.rotate(angle, expand=expand)
assert out_img.spatial_size == out_bbox.spatial_size
class TestRandomCrop:
def test_assertions(self):
with pytest.raises(ValueError, match="Please provide only two dimensions"):
......
This diff is collapsed.
......@@ -138,20 +138,6 @@ xfails_pil_if_fill_sequence_needs_broadcast = xfails_pil(
DISPATCHER_INFOS = [
DispatcherInfo(
F.rotate,
kernels={
datapoints.Image: F.rotate_image_tensor,
datapoints.Video: F.rotate_video,
datapoints.BoundingBox: F.rotate_bounding_box,
datapoints.Mask: F.rotate_mask,
},
pil_kernel_info=PILKernelInfo(F.rotate_image_pil),
test_marks=[
xfail_jit_python_scalar_arg("fill"),
*xfails_pil_if_fill_sequence_needs_broadcast,
],
),
DispatcherInfo(
F.crop,
kernels={
......
......@@ -264,129 +264,6 @@ KERNEL_INFOS.append(
)
_ROTATE_ANGLES = [-87, 15, 90]
def sample_inputs_rotate_image_tensor():
make_rotate_image_loaders = functools.partial(
make_image_loaders, sizes=["random"], color_spaces=["RGB"], dtypes=[torch.float32]
)
for image_loader in make_rotate_image_loaders():
yield ArgsKwargs(image_loader, angle=15.0, expand=True)
for image_loader, center in itertools.product(
make_rotate_image_loaders(), [None, [1.0, 0.5], [1, 2], (1.0, 0.5), (1, 2)]
):
yield ArgsKwargs(image_loader, angle=15.0, center=center)
for image_loader in make_rotate_image_loaders():
for fill in get_fills(num_channels=image_loader.num_channels, dtype=image_loader.dtype):
yield ArgsKwargs(image_loader, angle=15.0, fill=fill)
for image_loader, interpolation in itertools.product(
make_rotate_image_loaders(),
[F.InterpolationMode.NEAREST, F.InterpolationMode.BILINEAR],
):
yield ArgsKwargs(image_loader, angle=15.0, fill=0)
def reference_inputs_rotate_image_tensor():
for image_loader, angle in itertools.product(make_image_loaders_for_interpolation(), _ROTATE_ANGLES):
yield ArgsKwargs(image_loader, angle=angle)
def sample_inputs_rotate_bounding_box():
for bounding_box_loader in make_bounding_box_loaders():
yield ArgsKwargs(
bounding_box_loader,
format=bounding_box_loader.format,
spatial_size=bounding_box_loader.spatial_size,
angle=_ROTATE_ANGLES[0],
)
def reference_inputs_rotate_bounding_box():
for bounding_box_loader, angle in itertools.product(
make_bounding_box_loaders(extra_dims=((), (4,))), _ROTATE_ANGLES
):
yield ArgsKwargs(
bounding_box_loader,
format=bounding_box_loader.format,
spatial_size=bounding_box_loader.spatial_size,
angle=angle,
)
# TODO: add samples with expand=True and center
def reference_rotate_bounding_box(bounding_box, *, format, spatial_size, angle, expand=False, center=None):
if center is None:
center = [spatial_size[1] * 0.5, spatial_size[0] * 0.5]
a = np.cos(angle * np.pi / 180.0)
b = np.sin(angle * np.pi / 180.0)
cx = center[0]
cy = center[1]
affine_matrix = np.array(
[
[a, b, cx - cx * a - b * cy],
[-b, a, cy + cx * b - a * cy],
],
dtype="float64" if bounding_box.dtype == torch.float64 else "float32",
)
expected_bboxes = reference_affine_bounding_box_helper(
bounding_box, format=format, spatial_size=spatial_size, affine_matrix=affine_matrix
)
return expected_bboxes, spatial_size
def sample_inputs_rotate_mask():
for mask_loader in make_mask_loaders(sizes=["random"], num_categories=["random"], num_objects=["random"]):
yield ArgsKwargs(mask_loader, angle=15.0)
def sample_inputs_rotate_video():
for video_loader in make_video_loaders(sizes=["random"], num_frames=["random"]):
yield ArgsKwargs(video_loader, angle=15.0)
KERNEL_INFOS.extend(
[
KernelInfo(
F.rotate_image_tensor,
sample_inputs_fn=sample_inputs_rotate_image_tensor,
reference_fn=pil_reference_wrapper(F.rotate_image_pil),
reference_inputs_fn=reference_inputs_rotate_image_tensor,
float32_vs_uint8=True,
closeness_kwargs=pil_reference_pixel_difference(1, mae=True),
test_marks=[
xfail_jit_python_scalar_arg("fill"),
],
),
KernelInfo(
F.rotate_bounding_box,
sample_inputs_fn=sample_inputs_rotate_bounding_box,
reference_fn=reference_rotate_bounding_box,
reference_inputs_fn=reference_inputs_rotate_bounding_box,
closeness_kwargs={
**scripted_vs_eager_float64_tolerances("cpu", atol=1e-4, rtol=1e-4),
**scripted_vs_eager_float64_tolerances("cuda", atol=1e-4, rtol=1e-4),
},
),
KernelInfo(
F.rotate_mask,
sample_inputs_fn=sample_inputs_rotate_mask,
),
KernelInfo(
F.rotate_video,
sample_inputs_fn=sample_inputs_rotate_video,
),
]
)
_CROP_PARAMS = combinations_grid(top=[-8, 0, 9], left=[-8, 0, 9], height=[12, 20], width=[12, 20])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment