Unverified Commit a2329ff6 authored by Saswat Das's avatar Saswat Das Committed by GitHub
Browse files

port pad, resized_crop and test_affine in test_functional_tensor to pytest (#3974)

parent 6cb73eb3
...@@ -191,10 +191,57 @@ class Tester(unittest.TestCase): ...@@ -191,10 +191,57 @@ class Tester(unittest.TestCase):
for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches): for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches):
assert_equal(transformed_batch, s_transformed_batch) assert_equal(transformed_batch, s_transformed_batch)
def test_pad(self): def _test_rotate_all_options(self, tensor, pil_img, scripted_rotate, centers):
script_fn = torch.jit.script(F.pad) img_size = pil_img.size
tensor, pil_img = _create_data(7, 8, device=self.device) dt = tensor.dtype
batch_tensors = _create_data_batch(16, 18, num_samples=4, device=self.device) for r in [NEAREST, ]:
for a in range(-180, 180, 17):
for e in [True, False]:
for c in centers:
for f in [None, [0, 0, 0], (1, 2, 3), [255, 255, 255], [1, ], (2.0, )]:
f_pil = int(f[0]) if f is not None and len(f) == 1 else f
out_pil_img = F.rotate(pil_img, angle=a, interpolation=r, expand=e, center=c, fill=f_pil)
out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
for fn in [F.rotate, scripted_rotate]:
out_tensor = fn(tensor, angle=a, interpolation=r, expand=e, center=c, fill=f).cpu()
if out_tensor.dtype != torch.uint8:
out_tensor = out_tensor.to(torch.uint8)
self.assertEqual(
out_tensor.shape,
out_pil_tensor.shape,
msg="{}: {} vs {}".format(
(img_size, r, dt, a, e, c), out_tensor.shape, out_pil_tensor.shape
))
num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
# Tolerance : less than 3% of different pixels
self.assertLess(
ratio_diff_pixels,
0.03,
msg="{}: {}\n{} vs \n{}".format(
(img_size, r, dt, a, e, c, f),
ratio_diff_pixels,
out_tensor[0, :7, :7],
out_pil_tensor[0, :7, :7]
)
)
def test_rotate(self):
# Tests on square image
scripted_rotate = torch.jit.script(F.rotate)
data = [_create_data(26, 26, device=self.device), _create_data(32, 26, device=self.device)]
for tensor, pil_img in data:
img_size = pil_img.size
centers = [
None,
(int(img_size[0] * 0.3), int(img_size[0] * 0.4)),
[int(img_size[0] * 0.5), int(img_size[0] * 0.6)]
]
for dt in [None, torch.float32, torch.float64, torch.float16]: for dt in [None, torch.float32, torch.float64, torch.float16]:
...@@ -203,100 +250,115 @@ class Tester(unittest.TestCase): ...@@ -203,100 +250,115 @@ class Tester(unittest.TestCase):
continue continue
if dt is not None: if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases tensor = tensor.to(dtype=dt)
tensor = tensor.to(dt)
batch_tensors = batch_tensors.to(dt)
for pad in [2, [3, ], [0, 3], (3, 3), [4, 2, 4, 3]]: self._test_rotate_all_options(tensor, pil_img, scripted_rotate, centers)
configs = [
{"padding_mode": "constant", "fill": 0},
{"padding_mode": "constant", "fill": 10},
{"padding_mode": "constant", "fill": 20},
{"padding_mode": "edge"},
{"padding_mode": "reflect"},
{"padding_mode": "symmetric"},
]
for kwargs in configs:
pad_tensor = F_t.pad(tensor, pad, **kwargs)
pad_pil_img = F_pil.pad(pil_img, pad, **kwargs)
pad_tensor_8b = pad_tensor batch_tensors = _create_data_batch(26, 36, num_samples=4, device=self.device)
# we need to cast to uint8 to compare with PIL image if dt is not None:
if pad_tensor_8b.dtype != torch.uint8: batch_tensors = batch_tensors.to(dtype=dt)
pad_tensor_8b = pad_tensor_8b.to(torch.uint8)
_assert_equal_tensor_to_pil(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, kwargs)) center = (20, 22)
_test_fn_on_batch(
batch_tensors, F.rotate, angle=32, interpolation=NEAREST, expand=True, center=center
)
tensor, pil_img = data[0]
# assert deprecation warning and non-BC
with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"):
res1 = F.rotate(tensor, 45, resample=2)
res2 = F.rotate(tensor, 45, interpolation=BILINEAR)
assert_equal(res1, res2)
if isinstance(pad, int): # assert changed type warning
script_pad = [pad, ] with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"):
else: res1 = F.rotate(tensor, 45, interpolation=2)
script_pad = pad res2 = F.rotate(tensor, 45, interpolation=BILINEAR)
pad_tensor_script = script_fn(tensor, script_pad, **kwargs) assert_equal(res1, res2)
assert_equal(pad_tensor, pad_tensor_script, msg="{}, {}".format(pad, kwargs))
_test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **kwargs)
def test_resized_crop(self): @unittest.skipIf(not torch.cuda.is_available(), reason="Skip if no CUDA device")
# test values of F.resized_crop in several cases: class CUDATester(Tester):
# 1) resize to the same size, crop to the same size => should be identity
tensor, _ = _create_data(26, 36, device=self.device)
for mode in [NEAREST, BILINEAR, BICUBIC]: def setUp(self):
out_tensor = F.resized_crop(tensor, top=0, left=0, height=26, width=36, size=[26, 36], interpolation=mode) self.device = "cuda"
assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]))
# 2) resize by half and crop a TL corner def test_scale_channel(self):
tensor, _ = _create_data(26, 36, device=self.device) """Make sure that _scale_channel gives the same results on CPU and GPU as
out_tensor = F.resized_crop(tensor, top=0, left=0, height=20, width=30, size=[10, 15], interpolation=NEAREST) histc or bincount are used depending on the device.
expected_out_tensor = tensor[:, :20:2, :30:2] """
assert_equal( # TODO: when # https://github.com/pytorch/pytorch/issues/53194 is fixed,
expected_out_tensor, # only use bincount and remove that test.
out_tensor, size = (1_000,)
check_stride=False, img_chan = torch.randint(0, 256, size=size).to('cpu')
msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]), scaled_cpu = F_t._scale_channel(img_chan)
) scaled_cuda = F_t._scale_channel(img_chan.to('cuda'))
assert_equal(scaled_cpu, scaled_cuda.to('cpu'))
batch_tensors = _create_data_batch(26, 36, num_samples=4, device=self.device)
_test_fn_on_batch(
batch_tensors, F.resized_crop, top=1, left=2, height=20, width=30, size=[10, 15], interpolation=NEAREST
)
def _test_affine_identity_map(self, tensor, scripted_affine): class TestAffine:
ALL_DTYPES = [None, torch.float32, torch.float64, torch.float16]
scripted_affine = torch.jit.script(F.affine)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('height, width', [(26, 26), (32, 26)])
@pytest.mark.parametrize('dt', ALL_DTYPES)
def test_identity_map(self, device, height, width, dt):
# Tests on square and rectangular images
tensor, pil_img = _create_data(height, width, device=device)
if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case
return
if dt is not None:
tensor = tensor.to(dtype=dt)
# 1) identity map # 1) identity map
out_tensor = F.affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST) out_tensor = F.affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST)
assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]))
out_tensor = scripted_affine( out_tensor = self.scripted_affine(
tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
) )
assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]))
def _test_affine_square_rotations(self, tensor, pil_img, scripted_affine): @pytest.mark.parametrize('device', cpu_and_gpu())
# 2) Test rotation @pytest.mark.parametrize('height, width', [(26, 26)])
test_configs = [ @pytest.mark.parametrize('dt', ALL_DTYPES)
(90, torch.rot90(tensor, k=1, dims=(-1, -2))), @pytest.mark.parametrize('angle, config', [
(90, {'k': 1, 'dims': (-1, -2)}),
(45, None), (45, None),
(30, None), (30, None),
(-30, None), (-30, None),
(-45, None), (-45, None),
(-90, torch.rot90(tensor, k=-1, dims=(-1, -2))), (-90, {'k': -1, 'dims': (-1, -2)}),
(180, torch.rot90(tensor, k=2, dims=(-1, -2))), (180, {'k': 2, 'dims': (-1, -2)}),
] ])
for a, true_tensor in test_configs: @pytest.mark.parametrize('fn', [F.affine, scripted_affine])
def test_square_rotations(self, device, height, width, dt, angle, config, fn):
# 2) Test rotation
tensor, pil_img = _create_data(height, width, device=device)
if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case
return
if dt is not None:
tensor = tensor.to(dtype=dt)
out_pil_img = F.affine( out_pil_img = F.affine(
pil_img, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST pil_img, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
) )
out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))).to(self.device) out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))).to(device)
for fn in [F.affine, scripted_affine]:
out_tensor = fn( out_tensor = fn(
tensor, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST tensor, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
) )
if true_tensor is not None: if config is not None:
assert_equal( assert_equal(
true_tensor, torch.rot90(tensor, **config),
out_tensor, out_tensor,
msg="{}\n{} vs \n{}".format(a, out_tensor[0, :5, :5], true_tensor[0, :5, :5]),
check_stride=False, check_stride=False,
) )
...@@ -306,28 +368,33 @@ class Tester(unittest.TestCase): ...@@ -306,28 +368,33 @@ class Tester(unittest.TestCase):
num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
# Tolerance : less than 6% of different pixels # Tolerance : less than 6% of different pixels
self.assertLess( assert ratio_diff_pixels < 0.06, "{}\n{} vs \n{}".format(
ratio_diff_pixels,
0.06,
msg="{}\n{} vs \n{}".format(
ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7] ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
) )
)
def _test_affine_rect_rotations(self, tensor, pil_img, scripted_affine): @pytest.mark.parametrize('device', cpu_and_gpu())
test_configs = [ @pytest.mark.parametrize('height, width', [(32, 26)])
90, 45, 15, -30, -60, -120 @pytest.mark.parametrize('dt', ALL_DTYPES)
] @pytest.mark.parametrize('angle', [90, 45, 15, -30, -60, -120])
for a in test_configs: @pytest.mark.parametrize('fn', [F.affine, scripted_affine])
def test_rect_rotations(self, device, height, width, dt, angle, fn):
# Tests on rectangular images
tensor, pil_img = _create_data(height, width, device=device)
if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case
return
if dt is not None:
tensor = tensor.to(dtype=dt)
out_pil_img = F.affine( out_pil_img = F.affine(
pil_img, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST pil_img, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
) )
out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))) out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
for fn in [F.affine, scripted_affine]:
out_tensor = fn( out_tensor = fn(
tensor, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST tensor, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
).cpu() ).cpu()
if out_tensor.dtype != torch.uint8: if out_tensor.dtype != torch.uint8:
...@@ -336,24 +403,28 @@ class Tester(unittest.TestCase): ...@@ -336,24 +403,28 @@ class Tester(unittest.TestCase):
num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
# Tolerance : less than 3% of different pixels # Tolerance : less than 3% of different pixels
self.assertLess( assert ratio_diff_pixels < 0.03, "{}: {}\n{} vs \n{}".format(
ratio_diff_pixels, angle, ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
0.03,
msg="{}: {}\n{} vs \n{}".format(
a, ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
)
) )
def _test_affine_translations(self, tensor, pil_img, scripted_affine): @pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('height, width', [(26, 26), (32, 26)])
@pytest.mark.parametrize('dt', ALL_DTYPES)
@pytest.mark.parametrize('t', [[10, 12], (-12, -13)])
@pytest.mark.parametrize('fn', [F.affine, scripted_affine])
def test_translations(self, device, height, width, dt, t, fn):
# 3) Test translation # 3) Test translation
test_configs = [ tensor, pil_img = _create_data(height, width, device=device)
[10, 12], (-12, -13)
] if dt == torch.float16 and device == "cpu":
for t in test_configs: # skip float16 on CPU case
return
if dt is not None:
tensor = tensor.to(dtype=dt)
out_pil_img = F.affine(pil_img, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST) out_pil_img = F.affine(pil_img, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST)
for fn in [F.affine, scripted_affine]:
out_tensor = fn(tensor, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST) out_tensor = fn(tensor, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST)
if out_tensor.dtype != torch.uint8: if out_tensor.dtype != torch.uint8:
...@@ -361,9 +432,10 @@ class Tester(unittest.TestCase): ...@@ -361,9 +432,10 @@ class Tester(unittest.TestCase):
_assert_equal_tensor_to_pil(out_tensor, out_pil_img) _assert_equal_tensor_to_pil(out_tensor, out_pil_img)
def _test_affine_all_ops(self, tensor, pil_img, scripted_affine): @pytest.mark.parametrize('device', cpu_and_gpu())
# 4) Test rotation + translation + scale + share @pytest.mark.parametrize('height, width', [(26, 26), (32, 26)])
test_configs = [ @pytest.mark.parametrize('dt', ALL_DTYPES)
@pytest.mark.parametrize('a, t, s, sh, f', [
(45.5, [5, 6], 1.0, [0.0, 0.0], None), (45.5, [5, 6], 1.0, [0.0, 0.0], None),
(33, (5, -4), 1.0, [0.0, 0.0], [0, 0, 0]), (33, (5, -4), 1.0, [0.0, 0.0], [0, 0, 0]),
(45, [-5, 4], 1.2, [0.0, 0.0], (1, 2, 3)), (45, [-5, 4], 1.2, [0.0, 0.0], (1, 2, 3)),
...@@ -374,15 +446,24 @@ class Tester(unittest.TestCase): ...@@ -374,15 +446,24 @@ class Tester(unittest.TestCase):
(-45, [-10, 0], 0.7, [2.0, 5.0], None), (-45, [-10, 0], 0.7, [2.0, 5.0], None),
(-45, [-10, -10], 1.2, [4.0, 5.0], None), (-45, [-10, -10], 1.2, [4.0, 5.0], None),
(-90, [0, 0], 1.0, [0.0, 0.0], None), (-90, [0, 0], 1.0, [0.0, 0.0], None),
] ])
for r in [NEAREST, ]: @pytest.mark.parametrize('fn', [F.affine, scripted_affine])
for a, t, s, sh, f in test_configs: def test_all_ops(self, device, height, width, dt, a, t, s, sh, f, fn):
# 4) Test rotation + translation + scale + shear
tensor, pil_img = _create_data(height, width, device=device)
if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case
return
if dt is not None:
tensor = tensor.to(dtype=dt)
f_pil = int(f[0]) if f is not None and len(f) == 1 else f f_pil = int(f[0]) if f is not None and len(f) == 1 else f
out_pil_img = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh, interpolation=r, fill=f_pil) out_pil_img = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh, interpolation=NEAREST, fill=f_pil)
out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))) out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
for fn in [F.affine, scripted_affine]: out_tensor = fn(tensor, angle=a, translate=t, scale=s, shear=sh, interpolation=NEAREST, fill=f).cpu()
out_tensor = fn(tensor, angle=a, translate=t, scale=s, shear=sh, interpolation=r, fill=f).cpu()
if out_tensor.dtype != torch.uint8: if out_tensor.dtype != torch.uint8:
out_tensor = out_tensor.to(torch.uint8) out_tensor = out_tensor.to(torch.uint8)
...@@ -390,40 +471,19 @@ class Tester(unittest.TestCase): ...@@ -390,40 +471,19 @@ class Tester(unittest.TestCase):
num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
# Tolerance : less than 5% (cpu), 6% (cuda) of different pixels # Tolerance : less than 5% (cpu), 6% (cuda) of different pixels
tol = 0.06 if self.device == "cuda" else 0.05 tol = 0.06 if device == "cuda" else 0.05
self.assertLess( assert ratio_diff_pixels < tol, "{}: {}\n{} vs \n{}".format(
ratio_diff_pixels, (i, a, t, s, sh, f), ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
tol,
msg="{}: {}\n{} vs \n{}".format(
(r, a, t, s, sh, f), ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
)
) )
def test_affine(self): @pytest.mark.parametrize('device', cpu_and_gpu())
# Tests on square and rectangular images @pytest.mark.parametrize('dt', ALL_DTYPES)
scripted_affine = torch.jit.script(F.affine) def test_batches(self, device, dt):
if dt == torch.float16 and device == "cpu":
data = [_create_data(26, 26, device=self.device), _create_data(32, 26, device=self.device)]
for tensor, pil_img in data:
for dt in [None, torch.float32, torch.float64, torch.float16]:
if dt == torch.float16 and torch.device(self.device).type == "cpu":
# skip float16 on CPU case # skip float16 on CPU case
continue return
if dt is not None:
tensor = tensor.to(dtype=dt)
self._test_affine_identity_map(tensor, scripted_affine)
if pil_img.size[0] == pil_img.size[1]:
self._test_affine_square_rotations(tensor, pil_img, scripted_affine)
else:
self._test_affine_rect_rotations(tensor, pil_img, scripted_affine)
self._test_affine_translations(tensor, pil_img, scripted_affine)
self._test_affine_all_ops(tensor, pil_img, scripted_affine)
batch_tensors = _create_data_batch(26, 36, num_samples=4, device=self.device) batch_tensors = _create_data_batch(26, 36, num_samples=4, device=device)
if dt is not None: if dt is not None:
batch_tensors = batch_tensors.to(dtype=dt) batch_tensors = batch_tensors.to(dtype=dt)
...@@ -431,128 +491,28 @@ class Tester(unittest.TestCase): ...@@ -431,128 +491,28 @@ class Tester(unittest.TestCase):
batch_tensors, F.affine, angle=-43, translate=[-3, 4], scale=1.2, shear=[4.0, 5.0] batch_tensors, F.affine, angle=-43, translate=[-3, 4], scale=1.2, shear=[4.0, 5.0]
) )
tensor, pil_img = data[0] @pytest.mark.parametrize('device', cpu_and_gpu())
def test_warnings(self, device):
tensor, pil_img = _create_data(26, 26, device=device)
# assert deprecation warning and non-BC # assert deprecation warning and non-BC
with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"): with pytest.warns(UserWarning, match=r"Argument resample is deprecated and will be removed"):
res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=2) res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=2)
res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR) res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR)
assert_equal(res1, res2) assert_equal(res1, res2)
# assert changed type warning # assert changed type warning
with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): with pytest.warns(UserWarning, match=r"Argument interpolation should be of type InterpolationMode"):
res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=2) res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=2)
res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR) res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR)
assert_equal(res1, res2) assert_equal(res1, res2)
with self.assertWarnsRegex(UserWarning, r"Argument fillcolor is deprecated and will be removed"): with pytest.warns(UserWarning, match=r"Argument fillcolor is deprecated and will be removed"):
res1 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fillcolor=10) res1 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fillcolor=10)
res2 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fill=10) res2 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fill=10)
# we convert the PIL images to numpy as assert_equal doesn't work on PIL images. # we convert the PIL images to numpy as assert_equal doesn't work on PIL images.
assert_equal(np.asarray(res1), np.asarray(res2)) assert_equal(np.asarray(res1), np.asarray(res2))
def _test_rotate_all_options(self, tensor, pil_img, scripted_rotate, centers):
img_size = pil_img.size
dt = tensor.dtype
for r in [NEAREST, ]:
for a in range(-180, 180, 17):
for e in [True, False]:
for c in centers:
for f in [None, [0, 0, 0], (1, 2, 3), [255, 255, 255], [1, ], (2.0, )]:
f_pil = int(f[0]) if f is not None and len(f) == 1 else f
out_pil_img = F.rotate(pil_img, angle=a, interpolation=r, expand=e, center=c, fill=f_pil)
out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
for fn in [F.rotate, scripted_rotate]:
out_tensor = fn(tensor, angle=a, interpolation=r, expand=e, center=c, fill=f).cpu()
if out_tensor.dtype != torch.uint8:
out_tensor = out_tensor.to(torch.uint8)
self.assertEqual(
out_tensor.shape,
out_pil_tensor.shape,
msg="{}: {} vs {}".format(
(img_size, r, dt, a, e, c), out_tensor.shape, out_pil_tensor.shape
))
num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
# Tolerance : less than 3% of different pixels
self.assertLess(
ratio_diff_pixels,
0.03,
msg="{}: {}\n{} vs \n{}".format(
(img_size, r, dt, a, e, c, f),
ratio_diff_pixels,
out_tensor[0, :7, :7],
out_pil_tensor[0, :7, :7]
)
)
def test_rotate(self):
# Tests on square image
scripted_rotate = torch.jit.script(F.rotate)
data = [_create_data(26, 26, device=self.device), _create_data(32, 26, device=self.device)]
for tensor, pil_img in data:
img_size = pil_img.size
centers = [
None,
(int(img_size[0] * 0.3), int(img_size[0] * 0.4)),
[int(img_size[0] * 0.5), int(img_size[0] * 0.6)]
]
for dt in [None, torch.float32, torch.float64, torch.float16]:
if dt == torch.float16 and torch.device(self.device).type == "cpu":
# skip float16 on CPU case
continue
if dt is not None:
tensor = tensor.to(dtype=dt)
self._test_rotate_all_options(tensor, pil_img, scripted_rotate, centers)
batch_tensors = _create_data_batch(26, 36, num_samples=4, device=self.device)
if dt is not None:
batch_tensors = batch_tensors.to(dtype=dt)
center = (20, 22)
_test_fn_on_batch(
batch_tensors, F.rotate, angle=32, interpolation=NEAREST, expand=True, center=center
)
tensor, pil_img = data[0]
# assert deprecation warning and non-BC
with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"):
res1 = F.rotate(tensor, 45, resample=2)
res2 = F.rotate(tensor, 45, interpolation=BILINEAR)
assert_equal(res1, res2)
# assert changed type warning
with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"):
res1 = F.rotate(tensor, 45, interpolation=2)
res2 = F.rotate(tensor, 45, interpolation=BILINEAR)
assert_equal(res1, res2)
@unittest.skipIf(not torch.cuda.is_available(), reason="Skip if no CUDA device")
class CUDATester(Tester):
def setUp(self):
self.device = "cuda"
def test_scale_channel(self):
"""Make sure that _scale_channel gives the same results on CPU and GPU as
histc or bincount are used depending on the device.
"""
# TODO: when # https://github.com/pytorch/pytorch/issues/53194 is fixed,
# only use bincount and remove that test.
size = (1_000,)
img_chan = torch.randint(0, 256, size=size).to('cpu')
scaled_cpu = F_t._scale_channel(img_chan)
scaled_cuda = F_t._scale_channel(img_chan.to('cuda'))
assert_equal(scaled_cpu, scaled_cuda.to('cpu'))
def _get_data_dims_and_points_for_perspective(): def _get_data_dims_and_points_for_perspective():
# Ideally we would parametrize independently over data dims and points, but # Ideally we would parametrize independently over data dims and points, but
...@@ -1003,6 +963,78 @@ def test_adjust_gamma(device, dtype, config): ...@@ -1003,6 +963,78 @@ def test_adjust_gamma(device, dtype, config):
) )
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize('pad', [2, [3, ], [0, 3], (3, 3), [4, 2, 4, 3]])
@pytest.mark.parametrize('config', [
{"padding_mode": "constant", "fill": 0},
{"padding_mode": "constant", "fill": 10},
{"padding_mode": "constant", "fill": 20},
{"padding_mode": "edge"},
{"padding_mode": "reflect"},
{"padding_mode": "symmetric"},
])
def test_pad(device, dt, pad, config):
script_fn = torch.jit.script(F.pad)
tensor, pil_img = _create_data(7, 8, device=device)
batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
if dt == torch.float16 and device == "cpu":
# skip float16 on CPU case
return
if dt is not None:
# This is a trivial cast to float of uint8 data to test all cases
tensor = tensor.to(dt)
batch_tensors = batch_tensors.to(dt)
pad_tensor = F_t.pad(tensor, pad, **config)
pad_pil_img = F_pil.pad(pil_img, pad, **config)
pad_tensor_8b = pad_tensor
# we need to cast to uint8 to compare with PIL image
if pad_tensor_8b.dtype != torch.uint8:
pad_tensor_8b = pad_tensor_8b.to(torch.uint8)
_assert_equal_tensor_to_pil(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, config))
if isinstance(pad, int):
script_pad = [pad, ]
else:
script_pad = pad
pad_tensor_script = script_fn(tensor, script_pad, **config)
assert_equal(pad_tensor, pad_tensor_script, msg="{}, {}".format(pad, config))
_test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **config)
@pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('mode', [NEAREST, BILINEAR, BICUBIC])
def test_resized_crop(device, mode):
# test values of F.resized_crop in several cases:
# 1) resize to the same size, crop to the same size => should be identity
tensor, _ = _create_data(26, 36, device=device)
out_tensor = F.resized_crop(tensor, top=0, left=0, height=26, width=36, size=[26, 36], interpolation=mode)
assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]))
# 2) resize by half and crop a TL corner
tensor, _ = _create_data(26, 36, device=device)
out_tensor = F.resized_crop(tensor, top=0, left=0, height=20, width=30, size=[10, 15], interpolation=NEAREST)
expected_out_tensor = tensor[:, :20:2, :30:2]
assert_equal(
expected_out_tensor,
out_tensor,
check_stride=False,
msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]),
)
batch_tensors = _create_data_batch(26, 36, num_samples=4, device=device)
_test_fn_on_batch(
batch_tensors, F.resized_crop, top=1, left=2, height=20, width=30, size=[10, 15], interpolation=NEAREST
)
@pytest.mark.parametrize('device', cpu_and_gpu()) @pytest.mark.parametrize('device', cpu_and_gpu())
@pytest.mark.parametrize('func, args', [ @pytest.mark.parametrize('func, args', [
(F_t._get_image_size, ()), (F_t.vflip, ()), (F_t._get_image_size, ()), (F_t.vflip, ()),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment