"docs/git@developer.sourcefind.cn:one/TransferBench.git" did not exist on "45d15bfd9d2fe13e66b2f1b1c90070ee424fea93"
Unverified Commit 5178a2e2 authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

[PoC] refactor transforms v2 tests (#7562)


Co-authored-by: default avatarNicolas Hug <nh.nicolas.hug@gmail.com>
Co-authored-by: default avatarNicolas Hug <contact@nicolas-hug.com>
parent 17d50fc6
...@@ -7,9 +7,11 @@ import itertools ...@@ -7,9 +7,11 @@ import itertools
import os import os
import pathlib import pathlib
import random import random
import re
import shutil import shutil
import sys import sys
import tempfile import tempfile
import warnings
from collections import defaultdict from collections import defaultdict
from subprocess import CalledProcessError, check_output, STDOUT from subprocess import CalledProcessError, check_output, STDOUT
from typing import Callable, Sequence, Tuple, Union from typing import Callable, Sequence, Tuple, Union
...@@ -880,3 +882,23 @@ def assert_run_python_script(source_code): ...@@ -880,3 +882,23 @@ def assert_run_python_script(source_code):
raise RuntimeError(f"script errored with output:\n{e.output.decode()}") raise RuntimeError(f"script errored with output:\n{e.output.decode()}")
if out != b"": if out != b"":
raise AssertionError(out.decode()) raise AssertionError(out.decode())
@contextlib.contextmanager
def assert_no_warnings():
# The name `catch_warnings` is a misnomer as the context manager does **not** catch any warnings, but rather scopes
# the warning filters. All changes that are made to the filters while in this context, will be reset upon exit.
with warnings.catch_warnings():
warnings.simplefilter("error")
yield
@contextlib.contextmanager
def ignore_jit_no_profile_information_warning():
# Calling a scripted object often triggers a warning like
# `UserWarning: operator() profile_node %$INT1 : int[] = prim::profile_ivalue($INT2) does not have profile information`
# with varying `INT1` and `INT2`. Since these are uninteresting for us and only clutter the test summary, we ignore
# them.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=re.escape("operator() profile_node %"), category=UserWarning)
yield
...@@ -1711,8 +1711,6 @@ def test_antialias_warning(): ...@@ -1711,8 +1711,6 @@ def test_antialias_warning():
tensor_video = torch.randint(0, 256, size=(2, 3, 10, 10), dtype=torch.uint8) tensor_video = torch.randint(0, 256, size=(2, 3, 10, 10), dtype=torch.uint8)
match = "The default value of the antialias parameter" match = "The default value of the antialias parameter"
with pytest.warns(UserWarning, match=match):
transforms.Resize((20, 20))(tensor_img)
with pytest.warns(UserWarning, match=match): with pytest.warns(UserWarning, match=match):
transforms.RandomResizedCrop((20, 20))(tensor_img) transforms.RandomResizedCrop((20, 20))(tensor_img)
with pytest.warns(UserWarning, match=match): with pytest.warns(UserWarning, match=match):
...@@ -1722,18 +1720,6 @@ def test_antialias_warning(): ...@@ -1722,18 +1720,6 @@ def test_antialias_warning():
with pytest.warns(UserWarning, match=match): with pytest.warns(UserWarning, match=match):
transforms.RandomResize(10, 20)(tensor_img) transforms.RandomResize(10, 20)(tensor_img)
with pytest.warns(UserWarning, match=match):
transforms.functional.resize(tensor_img, (20, 20))
with pytest.warns(UserWarning, match=match):
transforms.functional.resize_image_tensor(tensor_img, (20, 20))
with pytest.warns(UserWarning, match=match):
transforms.functional.resize(tensor_video, (20, 20))
with pytest.warns(UserWarning, match=match):
transforms.functional.resize_video(tensor_video, (20, 20))
with pytest.warns(UserWarning, match=match):
datapoints.Image(tensor_img).resize((20, 20))
with pytest.warns(UserWarning, match=match): with pytest.warns(UserWarning, match=match):
datapoints.Image(tensor_img).resized_crop(0, 0, 10, 10, (20, 20)) datapoints.Image(tensor_img).resized_crop(0, 0, 10, 10, (20, 20))
...@@ -1744,27 +1730,17 @@ def test_antialias_warning(): ...@@ -1744,27 +1730,17 @@ def test_antialias_warning():
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter("error") warnings.simplefilter("error")
transforms.Resize((20, 20))(pil_img)
transforms.RandomResizedCrop((20, 20))(pil_img) transforms.RandomResizedCrop((20, 20))(pil_img)
transforms.ScaleJitter((20, 20))(pil_img) transforms.ScaleJitter((20, 20))(pil_img)
transforms.RandomShortestSize((20, 20))(pil_img) transforms.RandomShortestSize((20, 20))(pil_img)
transforms.RandomResize(10, 20)(pil_img) transforms.RandomResize(10, 20)(pil_img)
transforms.functional.resize(pil_img, (20, 20))
transforms.Resize((20, 20), antialias=True)(tensor_img)
transforms.RandomResizedCrop((20, 20), antialias=True)(tensor_img) transforms.RandomResizedCrop((20, 20), antialias=True)(tensor_img)
transforms.ScaleJitter((20, 20), antialias=True)(tensor_img) transforms.ScaleJitter((20, 20), antialias=True)(tensor_img)
transforms.RandomShortestSize((20, 20), antialias=True)(tensor_img) transforms.RandomShortestSize((20, 20), antialias=True)(tensor_img)
transforms.RandomResize(10, 20, antialias=True)(tensor_img) transforms.RandomResize(10, 20, antialias=True)(tensor_img)
transforms.functional.resize(tensor_img, (20, 20), antialias=True)
transforms.functional.resize_image_tensor(tensor_img, (20, 20), antialias=True)
transforms.functional.resize(tensor_video, (20, 20), antialias=True)
transforms.functional.resize_video(tensor_video, (20, 20), antialias=True)
datapoints.Image(tensor_img).resize((20, 20), antialias=True)
datapoints.Image(tensor_img).resized_crop(0, 0, 10, 10, (20, 20), antialias=True) datapoints.Image(tensor_img).resized_crop(0, 0, 10, 10, (20, 20), antialias=True)
datapoints.Video(tensor_video).resize((20, 20), antialias=True)
datapoints.Video(tensor_video).resized_crop(0, 0, 10, 10, (20, 20), antialias=True) datapoints.Video(tensor_video).resized_crop(0, 0, 10, 10, (20, 20), antialias=True)
......
This diff is collapsed.
...@@ -148,19 +148,6 @@ DISPATCHER_INFOS = [ ...@@ -148,19 +148,6 @@ DISPATCHER_INFOS = [
}, },
pil_kernel_info=PILKernelInfo(F.horizontal_flip_image_pil, kernel_name="horizontal_flip_image_pil"), pil_kernel_info=PILKernelInfo(F.horizontal_flip_image_pil, kernel_name="horizontal_flip_image_pil"),
), ),
DispatcherInfo(
F.resize,
kernels={
datapoints.Image: F.resize_image_tensor,
datapoints.Video: F.resize_video,
datapoints.BoundingBox: F.resize_bounding_box,
datapoints.Mask: F.resize_mask,
},
pil_kernel_info=PILKernelInfo(F.resize_image_pil),
test_marks=[
xfail_jit_python_scalar_arg("size"),
],
),
DispatcherInfo( DispatcherInfo(
F.affine, F.affine,
kernels={ kernels={
......
...@@ -238,179 +238,6 @@ KERNEL_INFOS.extend( ...@@ -238,179 +238,6 @@ KERNEL_INFOS.extend(
) )
def _get_resize_sizes(spatial_size):
height, width = spatial_size
length = max(spatial_size)
yield length
yield [length]
yield (length,)
new_height = int(height * 0.75)
new_width = int(width * 1.25)
yield [new_height, new_width]
yield height, width
def sample_inputs_resize_image_tensor():
for image_loader in make_image_loaders(sizes=["random"], color_spaces=["RGB"], dtypes=[torch.float32]):
for size in _get_resize_sizes(image_loader.spatial_size):
yield ArgsKwargs(image_loader, size=size)
for image_loader, interpolation in itertools.product(
make_image_loaders(sizes=["random"], color_spaces=["RGB"]),
[F.InterpolationMode.NEAREST, F.InterpolationMode.BILINEAR],
):
yield ArgsKwargs(image_loader, size=[min(image_loader.spatial_size) + 1], interpolation=interpolation)
yield ArgsKwargs(make_image_loader(size=(11, 17)), size=20, max_size=25)
def sample_inputs_resize_image_tensor_bicubic():
for image_loader, interpolation in itertools.product(
make_image_loaders(sizes=["random"], color_spaces=["RGB"]), [F.InterpolationMode.BICUBIC]
):
yield ArgsKwargs(image_loader, size=[min(image_loader.spatial_size) + 1], interpolation=interpolation)
@pil_reference_wrapper
def reference_resize_image_tensor(*args, **kwargs):
if not kwargs.pop("antialias", False) and kwargs.get("interpolation", F.InterpolationMode.BILINEAR) in {
F.InterpolationMode.BILINEAR,
F.InterpolationMode.BICUBIC,
}:
raise pytest.UsageError("Anti-aliasing is always active in PIL")
return F.resize_image_pil(*args, **kwargs)
def reference_inputs_resize_image_tensor():
for image_loader, interpolation in itertools.product(
make_image_loaders_for_interpolation(),
[
F.InterpolationMode.NEAREST,
F.InterpolationMode.NEAREST_EXACT,
F.InterpolationMode.BILINEAR,
F.InterpolationMode.BICUBIC,
],
):
for size in _get_resize_sizes(image_loader.spatial_size):
yield ArgsKwargs(
image_loader,
size=size,
interpolation=interpolation,
antialias=interpolation
in {
F.InterpolationMode.BILINEAR,
F.InterpolationMode.BICUBIC,
},
)
def sample_inputs_resize_bounding_box():
for bounding_box_loader in make_bounding_box_loaders():
for size in _get_resize_sizes(bounding_box_loader.spatial_size):
yield ArgsKwargs(bounding_box_loader, spatial_size=bounding_box_loader.spatial_size, size=size)
def sample_inputs_resize_mask():
for mask_loader in make_mask_loaders(sizes=["random"], num_categories=["random"], num_objects=["random"]):
yield ArgsKwargs(mask_loader, size=[min(mask_loader.shape[-2:]) + 1])
def sample_inputs_resize_video():
for video_loader in make_video_loaders(sizes=["random"], num_frames=["random"]):
yield ArgsKwargs(video_loader, size=[min(video_loader.shape[-2:]) + 1])
def reference_resize_bounding_box(bounding_box, *, spatial_size, size, max_size=None):
old_height, old_width = spatial_size
new_height, new_width = F._geometry._compute_resized_output_size(spatial_size, size=size, max_size=max_size)
if (old_height, old_width) == (new_height, new_width):
return bounding_box, (old_height, old_width)
affine_matrix = np.array(
[
[new_width / old_width, 0, 0],
[0, new_height / old_height, 0],
],
dtype="float64" if bounding_box.dtype == torch.float64 else "float32",
)
expected_bboxes = reference_affine_bounding_box_helper(
bounding_box,
format=bounding_box.format,
spatial_size=(new_height, new_width),
affine_matrix=affine_matrix,
)
return expected_bboxes, (new_height, new_width)
def reference_inputs_resize_bounding_box():
for bounding_box_loader in make_bounding_box_loaders(extra_dims=((), (4,))):
for size in _get_resize_sizes(bounding_box_loader.spatial_size):
yield ArgsKwargs(bounding_box_loader, size=size, spatial_size=bounding_box_loader.spatial_size)
KERNEL_INFOS.extend(
[
KernelInfo(
F.resize_image_tensor,
sample_inputs_fn=sample_inputs_resize_image_tensor,
reference_fn=reference_resize_image_tensor,
reference_inputs_fn=reference_inputs_resize_image_tensor,
float32_vs_uint8=True,
closeness_kwargs={
**pil_reference_pixel_difference(10, mae=True),
**cuda_vs_cpu_pixel_difference(),
**float32_vs_uint8_pixel_difference(1, mae=True),
},
test_marks=[
xfail_jit_python_scalar_arg("size"),
],
),
KernelInfo(
F.resize_image_tensor,
sample_inputs_fn=sample_inputs_resize_image_tensor_bicubic,
reference_fn=reference_resize_image_tensor,
reference_inputs_fn=reference_inputs_resize_image_tensor,
float32_vs_uint8=True,
closeness_kwargs={
**pil_reference_pixel_difference(10, mae=True),
**cuda_vs_cpu_pixel_difference(atol=30),
**float32_vs_uint8_pixel_difference(1, mae=True),
},
test_marks=[
xfail_jit_python_scalar_arg("size"),
],
),
KernelInfo(
F.resize_bounding_box,
sample_inputs_fn=sample_inputs_resize_bounding_box,
reference_fn=reference_resize_bounding_box,
reference_inputs_fn=reference_inputs_resize_bounding_box,
closeness_kwargs={
(("TestKernels", "test_against_reference"), torch.int64, "cpu"): dict(atol=1, rtol=0),
},
test_marks=[
xfail_jit_python_scalar_arg("size"),
],
),
KernelInfo(
F.resize_mask,
sample_inputs_fn=sample_inputs_resize_mask,
closeness_kwargs=pil_reference_pixel_difference(10),
test_marks=[
xfail_jit_python_scalar_arg("size"),
],
),
KernelInfo(
F.resize_video,
sample_inputs_fn=sample_inputs_resize_video,
closeness_kwargs=cuda_vs_cpu_pixel_difference(),
),
]
)
_AFFINE_KWARGS = combinations_grid( _AFFINE_KWARGS = combinations_grid(
angle=[-87, 15, 90], angle=[-87, 15, 90],
translate=[(5, 5), (-5, -5)], translate=[(5, 5), (-5, -5)],
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment