Unverified Commit 15bebfbc authored by DevPranjal's avatar DevPranjal Committed by GitHub
Browse files

Port to_tensor tests in test_transforms to pytest (#3966)

parent 366bf0aa
...@@ -307,54 +307,6 @@ class Tester(unittest.TestCase): ...@@ -307,54 +307,6 @@ class Tester(unittest.TestCase):
with self.assertRaisesRegex(ValueError, r"Required crop size .+ is larger then input image size .+"): with self.assertRaisesRegex(ValueError, r"Required crop size .+ is larger then input image size .+"):
t(img) t(img)
def test_to_tensor(self):
test_channels = [1, 3, 4]
height, width = 4, 4
trans = transforms.ToTensor()
with self.assertRaises(TypeError):
trans(np.random.rand(1, height, width).tolist())
with self.assertRaises(ValueError):
trans(np.random.rand(height))
trans(np.random.rand(1, 1, height, width))
for channels in test_channels:
input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(output, input_data, check_stride=False)
ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1)) / 255.0
torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)
ndarray = np.random.rand(height, width, channels).astype(np.float32)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)
# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
output = trans(img)
torch.testing.assert_close(input_data, output, check_dtype=False, check_stride=False)
def test_to_tensor_with_other_default_dtypes(self):
current_def_dtype = torch.get_default_dtype()
t = transforms.ToTensor()
np_arr = np.random.randint(0, 255, (32, 32, 3), dtype=np.uint8)
img = Image.fromarray(np_arr)
for dtype in [torch.float16, torch.float, torch.double]:
torch.set_default_dtype(dtype)
res = t(img)
self.assertTrue(res.dtype == dtype, msg=f"{res.dtype} vs {dtype}")
torch.set_default_dtype(current_def_dtype)
def test_max_value(self): def test_max_value(self):
for dtype in int_dtypes(): for dtype in int_dtypes():
self.assertEqual(F_t._max_value(dtype), torch.iinfo(dtype).max) self.assertEqual(F_t._max_value(dtype), torch.iinfo(dtype).max)
...@@ -492,39 +444,6 @@ class Tester(unittest.TestCase): ...@@ -492,39 +444,6 @@ class Tester(unittest.TestCase):
torch.testing.assert_close(output, expected_output) torch.testing.assert_close(output, expected_output)
def test_pil_to_tensor(self):
test_channels = [1, 3, 4]
height, width = 4, 4
trans = transforms.PILToTensor()
with self.assertRaises(TypeError):
trans(np.random.rand(1, height, width).tolist())
trans(np.random.rand(1, height, width))
for channels in test_channels:
input_data = torch.ByteTensor(channels, height, width).random_(0, 255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(input_data, output, check_stride=False)
input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
img = transforms.ToPILImage()(input_data)
output = trans(img)
expected_output = input_data.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output)
input_data = torch.as_tensor(np.random.rand(channels, height, width).astype(np.float32))
img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte()
output = trans(img) # HWC -> CHW
expected_output = (input_data * 255).byte()
torch.testing.assert_close(output, expected_output, check_stride=False)
# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
output = trans(img).view(torch.uint8).bool().to(torch.uint8)
torch.testing.assert_close(input_data, output, check_stride=False)
@unittest.skipIf(accimage is None, 'accimage not available') @unittest.skipIf(accimage is None, 'accimage not available')
def test_accimage_pil_to_tensor(self): def test_accimage_pil_to_tensor(self):
trans = transforms.PILToTensor() trans = transforms.PILToTensor()
...@@ -1219,6 +1138,102 @@ class Tester(unittest.TestCase): ...@@ -1219,6 +1138,102 @@ class Tester(unittest.TestCase):
t.__repr__() t.__repr__()
@pytest.mark.parametrize('channels', [1, 3, 4])
def test_to_tensor(channels):
height, width = 4, 4
trans = transforms.ToTensor()
input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(output, input_data, check_stride=False)
ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1)) / 255.0
torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)
ndarray = np.random.rand(height, width, channels).astype(np.float32)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)
# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
output = trans(img)
torch.testing.assert_close(input_data, output, check_dtype=False, check_stride=False)
def test_to_tensor_errors():
height, width = 4, 4
trans = transforms.ToTensor()
with pytest.raises(TypeError):
trans(np.random.rand(1, height, width).tolist())
with pytest.raises(ValueError):
trans(np.random.rand(height))
with pytest.raises(ValueError):
trans(np.random.rand(1, 1, height, width))
@pytest.mark.parametrize('dtype', [torch.float16, torch.float, torch.double])
def test_to_tensor_with_other_default_dtypes(dtype):
current_def_dtype = torch.get_default_dtype()
t = transforms.ToTensor()
np_arr = np.random.randint(0, 255, (32, 32, 3), dtype=np.uint8)
img = Image.fromarray(np_arr)
torch.set_default_dtype(dtype)
res = t(img)
assert res.dtype == dtype, f"{res.dtype} vs {dtype}"
torch.set_default_dtype(current_def_dtype)
@pytest.mark.parametrize('channels', [1, 3, 4])
def test_pil_to_tensor(channels):
height, width = 4, 4
trans = transforms.PILToTensor()
input_data = torch.ByteTensor(channels, height, width).random_(0, 255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(input_data, output, check_stride=False)
input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
img = transforms.ToPILImage()(input_data)
output = trans(img)
expected_output = input_data.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output)
input_data = torch.as_tensor(np.random.rand(channels, height, width).astype(np.float32))
img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte()
output = trans(img) # HWC -> CHW
expected_output = (input_data * 255).byte()
torch.testing.assert_close(output, expected_output, check_stride=False)
# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
output = trans(img).view(torch.uint8).bool().to(torch.uint8)
torch.testing.assert_close(input_data, output, check_stride=False)
def test_pil_to_tensor_errors():
height, width = 4, 4
trans = transforms.PILToTensor()
with pytest.raises(TypeError):
trans(np.random.rand(1, height, width).tolist())
with pytest.raises(TypeError):
trans(np.random.rand(1, height, width))
def test_randomresized_params(): def test_randomresized_params():
height = random.randint(24, 32) * 2 height = random.randint(24, 32) * 2
width = random.randint(24, 32) * 2 width = random.randint(24, 32) * 2
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment