"docs/git@developer.sourcefind.cn:one/TransferBench.git" did not exist on "c015654ae52070cbc16d1f7c6014632d167a6861"
Unverified Commit 21824ce6 authored by vfdev's avatar vfdev Committed by GitHub
Browse files

Port resize tests to pytest and fix their flakyness (#3907)


Co-authored-by: default avatarPhilip Meier <github.pmeier@posteo.de>
parent 9c31d1d5
...@@ -324,76 +324,6 @@ class Tester(TransformsTester): ...@@ -324,76 +324,6 @@ class Tester(TransformsTester):
self._test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **kwargs) self._test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **kwargs)
def test_resize(self):
script_fn = torch.jit.script(F.resize)
tensor, pil_img = self._create_data(26, 36, device=self.device)
batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device)
for dt in [None, torch.float32, torch.float64, torch.float16]:
if dt == torch.float16 and torch.device(self.device).type == "cpu":
# skip float16 on CPU case
continue
if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases
tensor = tensor.to(dt)
batch_tensors = batch_tensors.to(dt)
for size in [32, 26, [32, ], [32, 32], (32, 32), [26, 35]]:
for max_size in (None, 33, 40, 1000):
if max_size is not None and isinstance(size, Sequence) and len(size) != 1:
continue # unsupported, see assertRaises below
for interpolation in [BILINEAR, BICUBIC, NEAREST]:
resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size)
resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size)
assert_equal(
resized_tensor.size()[1:],
resized_pil_img.size[::-1],
msg="{}, {}".format(size, interpolation),
)
if interpolation not in [NEAREST, ]:
# We can not check values if mode = NEAREST, as results are different
# E.g. resized_tensor = [[a, a, b, c, d, d, e, ...]]
# E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]]
resized_tensor_f = resized_tensor
# we need to cast to uint8 to compare with PIL image
if resized_tensor_f.dtype == torch.uint8:
resized_tensor_f = resized_tensor_f.to(torch.float)
# Pay attention to high tolerance for MAE
self.approxEqualTensorToPIL(
resized_tensor_f, resized_pil_img, tol=8.0, msg="{}, {}".format(size, interpolation)
)
if isinstance(size, int):
script_size = [size, ]
else:
script_size = size
resize_result = script_fn(tensor, size=script_size, interpolation=interpolation,
max_size=max_size)
assert_equal(resized_tensor, resize_result, msg="{}, {}".format(size, interpolation))
self._test_fn_on_batch(
batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size
)
# assert changed type warning
with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"):
res1 = F.resize(tensor, size=32, interpolation=2)
res2 = F.resize(tensor, size=32, interpolation=BILINEAR)
assert_equal(res1, res2)
for img in (tensor, pil_img):
exp_msg = "max_size should only be passed if size specifies the length of the smaller edge"
with self.assertRaisesRegex(ValueError, exp_msg):
F.resize(img, size=(32, 34), max_size=35)
with self.assertRaisesRegex(ValueError, "max_size = 32 must be strictly greater"):
F.resize(img, size=32, max_size=32)
def test_resized_crop(self): def test_resized_crop(self):
# test values of F.resized_crop in several cases: # test values of F.resized_crop in several cases:
# 1) resize to the same size, crop to the same size => should be identity # 1) resize to the same size, crop to the same size => should be identity
...@@ -868,18 +798,93 @@ def test_perspective_interpolation_warning(tester): ...@@ -868,18 +798,93 @@ def test_perspective_interpolation_warning(tester):
tester.assertTrue(res1.equal(res2)) tester.assertTrue(res1.equal(res2))
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize('size', [32, 26, [32, ], [32, 32], (32, 32), [26, 35]])
@pytest.mark.parametrize('max_size', [None, 34, 40, 1000])
@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC, NEAREST])
def test_resize(device, dt, size, max_size, interpolation, tester):
if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case
return
if max_size is not None and isinstance(size, Sequence) and len(size) != 1:
return # unsupported
torch.manual_seed(12)
script_fn = torch.jit.script(F.resize)
tensor, pil_img = tester._create_data(26, 36, device=device)
batch_tensors = tester._create_data_batch(16, 18, num_samples=4, device=device)
if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases
tensor = tensor.to(dt)
batch_tensors = batch_tensors.to(dt)
resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size)
resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size)
assert resized_tensor.size()[1:] == resized_pil_img.size[::-1]
if interpolation not in [NEAREST, ]:
# We can not check values if mode = NEAREST, as results are different
# E.g. resized_tensor = [[a, a, b, c, d, d, e, ...]]
# E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]]
resized_tensor_f = resized_tensor
# we need to cast to uint8 to compare with PIL image
if resized_tensor_f.dtype == torch.uint8:
resized_tensor_f = resized_tensor_f.to(torch.float)
# Pay attention to high tolerance for MAE
tester.approxEqualTensorToPIL(resized_tensor_f, resized_pil_img, tol=8.0)
if isinstance(size, int):
script_size = [size, ]
else:
script_size = size
resize_result = script_fn(
tensor, size=script_size, interpolation=interpolation, max_size=max_size
)
assert_equal(resized_tensor, resize_result)
tester._test_fn_on_batch(
batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size
)
@pytest.mark.parametrize('device', cpu_and_gpu())
def test_resize_asserts(device, tester):
tensor, pil_img = tester._create_data(26, 36, device=device)
# assert changed type warning
with pytest.warns(UserWarning, match=r"Argument interpolation should be of type InterpolationMode"):
res1 = F.resize(tensor, size=32, interpolation=2)
res2 = F.resize(tensor, size=32, interpolation=BILINEAR)
assert_equal(res1, res2)
for img in (tensor, pil_img):
exp_msg = "max_size should only be passed if size specifies the length of the smaller edge"
with pytest.raises(ValueError, match=exp_msg):
F.resize(img, size=(32, 34), max_size=35)
with pytest.raises(ValueError, match="max_size = 32 must be strictly greater"):
F.resize(img, size=32, max_size=32)
@pytest.mark.parametrize('device', cpu_and_gpu()) @pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16]) @pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize('size', [[96, 72], [96, 420], [420, 72]]) @pytest.mark.parametrize('size', [[96, 72], [96, 420], [420, 72]])
@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC]) @pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC])
def test_resize_antialias(device, dt, size, interpolation, tester): def test_resize_antialias(device, dt, size, interpolation, tester):
torch.manual_seed(12)
if dt == torch.float16 and device == "cpu": if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case # skip float16 on CPU case
return return
torch.manual_seed(12)
script_fn = torch.jit.script(F.resize) script_fn = torch.jit.script(F.resize)
tensor, pil_img = tester._create_data(320, 290, device=device) tensor, pil_img = tester._create_data(320, 290, device=device)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment