Unverified Commit 90a2402b authored by Philip Meier's avatar Philip Meier Committed by GitHub
Browse files

Cleanup test suite related to `torch.testing.assert_close` (#4177)


Co-authored-by: default avatarNicolas Hug <nicolashug@fb.com>
parent bf2fe567
"""This is a temporary module and should be removed as soon as torch.testing.assert_equal is supported."""
# TODO: remove this as soon torch.testing.assert_equal is supported
import functools
import torch.testing
__all__ = ["assert_equal"]
assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0)
......@@ -9,6 +9,7 @@ import torch
import __main__
import random
import inspect
import functools
from numbers import Number
from torch._six import string_classes
......@@ -17,8 +18,6 @@ from collections import OrderedDict
import numpy as np
from PIL import Image
from _assert_utils import assert_equal
IS_PY39 = sys.version_info.major == 3 and sys.version_info.minor == 9
PY39_SEGFAULT_SKIP_MSG = "Segmentation fault with Python 3.9, see https://github.com/pytorch/vision/issues/3367"
PY39_SKIP = unittest.skipIf(IS_PY39, PY39_SEGFAULT_SKIP_MSG)
......@@ -268,6 +267,9 @@ def _create_data_batch(height=3, width=3, channels=3, num_samples=4, device="cpu
return batch_tensor
assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0)
def _assert_equal_tensor_to_pil(tensor, pil_image, msg=None):
np_pil_image = np.array(pil_image)
if np_pil_image.ndim == 2:
......@@ -275,7 +277,7 @@ def _assert_equal_tensor_to_pil(tensor, pil_image, msg=None):
pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1)))
if msg is None:
msg = "tensor:\n{} \ndid not equal PIL tensor:\n{}".format(tensor, pil_tensor)
assert_equal(tensor.cpu(), pil_tensor, check_stride=False, msg=msg)
assert_equal(tensor.cpu(), pil_tensor, msg=msg)
def _assert_approx_equal_tensor_to_pil(tensor, pil_image, tol=1e-5, msg=None, agg_method="mean",
......
......@@ -13,8 +13,7 @@ from torchvision.datasets.samplers import (
from torchvision.datasets.video_utils import VideoClips, unfold
from torchvision import get_video_backend
from common_utils import get_tmp_dir
from _assert_utils import assert_equal
from common_utils import get_tmp_dir, assert_equal
@contextlib.contextmanager
......
......@@ -6,8 +6,7 @@ import pytest
from torchvision import io
from torchvision.datasets.video_utils import VideoClips, unfold
from common_utils import get_tmp_dir
from _assert_utils import assert_equal
from common_utils import get_tmp_dir, assert_equal
@contextlib.contextmanager
......@@ -41,7 +40,7 @@ class TestVideo:
[0, 1, 2],
[3, 4, 5],
])
assert_equal(r, expected, check_stride=False)
assert_equal(r, expected)
r = unfold(a, 3, 2, 1)
expected = torch.tensor([
......@@ -49,14 +48,14 @@ class TestVideo:
[2, 3, 4],
[4, 5, 6]
])
assert_equal(r, expected, check_stride=False)
assert_equal(r, expected)
r = unfold(a, 3, 2, 2)
expected = torch.tensor([
[0, 2, 4],
[2, 4, 6],
])
assert_equal(r, expected, check_stride=False)
assert_equal(r, expected)
@pytest.mark.skipif(not io.video._av_available(), reason="this test requires av")
def test_video_clips(self):
......
......@@ -21,8 +21,8 @@ from common_utils import (
_assert_equal_tensor_to_pil,
_assert_approx_equal_tensor_to_pil,
_test_fn_on_batch,
assert_equal,
)
from _assert_utils import assert_equal
from typing import Dict, List, Sequence, Tuple
......@@ -187,11 +187,7 @@ class TestAffine:
tensor, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
)
if config is not None:
assert_equal(
torch.rot90(tensor, **config),
out_tensor,
check_stride=False,
)
assert_equal(torch.rot90(tensor, **config), out_tensor)
if out_tensor.dtype != torch.uint8:
out_tensor = out_tensor.to(torch.uint8)
......@@ -856,7 +852,6 @@ def test_resized_crop(device, mode):
assert_equal(
expected_out_tensor,
out_tensor,
check_stride=False,
msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]),
)
......@@ -1001,10 +996,7 @@ def test_gaussian_blur(device, image_size, dt, ksize, sigma, fn):
).reshape(shape[-2], shape[-1], shape[-3]).permute(2, 0, 1).to(tensor)
out = fn(tensor, kernel_size=ksize, sigma=sigma)
torch.testing.assert_close(
out, true_out, rtol=0.0, atol=1.0, check_stride=False,
msg="{}, {}".format(ksize, sigma)
)
torch.testing.assert_close(out, true_out, rtol=0.0, atol=1.0, msg="{}, {}".format(ksize, sigma))
@pytest.mark.parametrize('device', cpu_and_gpu())
......
......@@ -9,8 +9,7 @@ import numpy as np
import torch
from PIL import Image, __version__ as PILLOW_VERSION
import torchvision.transforms.functional as F
from common_utils import get_tmp_dir, needs_cuda
from _assert_utils import assert_equal
from common_utils import get_tmp_dir, needs_cuda, assert_equal
from torchvision.io.image import (
decode_png, decode_jpeg, encode_jpeg, write_jpeg, decode_image, read_file,
......@@ -280,7 +279,7 @@ def test_read_1_bit_png(shape):
img.save(image_path)
img1 = read_image(image_path)
img2 = normalize_dimensions(torch.as_tensor(pixels * 255, dtype=torch.uint8))
assert_equal(img1, img2, check_stride=False)
assert_equal(img1, img2)
@pytest.mark.parametrize('shape', [
......
......@@ -9,8 +9,7 @@ from torchvision import get_video_backend
import warnings
from urllib.error import URLError
from common_utils import get_tmp_dir
from _assert_utils import assert_equal
from common_utils import get_tmp_dir, assert_equal
try:
......
import torch
from common_utils import TestCase
from _assert_utils import assert_equal
from common_utils import TestCase, assert_equal
from torchvision.models.detection.anchor_utils import AnchorGenerator, DefaultBoxGenerator
from torchvision.models.detection.image_list import ImageList
import pytest
......
......@@ -7,7 +7,7 @@ from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
import pytest
from _assert_utils import assert_equal
from common_utils import assert_equal
class TestModelsDetectionNegativeSamples:
......
......@@ -4,7 +4,7 @@ from torchvision.models.detection import _utils
from torchvision.models.detection.transform import GeneralizedRCNNTransform
import pytest
from torchvision.models.detection import backbone_utils
from _assert_utils import assert_equal
from common_utils import assert_equal
class TestModelsDetectionUtils:
......
......@@ -6,8 +6,7 @@ try:
except ImportError:
onnxruntime = None
from common_utils import set_rng_seed
from _assert_utils import assert_equal
from common_utils import set_rng_seed, assert_equal
import io
import torch
from torchvision import ops
......
from common_utils import needs_cuda, cpu_and_gpu
from _assert_utils import assert_equal
from common_utils import needs_cuda, cpu_and_gpu, assert_equal
import math
from abc import ABC, abstractmethod
import pytest
......
......@@ -19,8 +19,7 @@ try:
except ImportError:
stats = None
from common_utils import cycle_over, int_dtypes, float_dtypes
from _assert_utils import assert_equal
from common_utils import cycle_over, int_dtypes, float_dtypes, assert_equal
GRACE_HOPPER = get_file_path_2(
......@@ -159,7 +158,7 @@ class TestAccImage:
output = trans(accimage.Image(GRACE_HOPPER))
assert expected_output.size() == output.size()
torch.testing.assert_close(output, expected_output, check_stride=False)
torch.testing.assert_close(output, expected_output)
def test_accimage_resize(self):
trans = transforms.Compose([
......@@ -205,23 +204,23 @@ class TestToTensor:
input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(output, input_data, check_stride=False)
torch.testing.assert_close(output, input_data)
ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1)) / 255.0
torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)
torch.testing.assert_close(output.numpy(), expected_output, check_dtype=False)
ndarray = np.random.rand(height, width, channels).astype(np.float32)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)
torch.testing.assert_close(output.numpy(), expected_output, check_dtype=False)
# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
output = trans(img)
torch.testing.assert_close(input_data, output, check_dtype=False, check_stride=False)
torch.testing.assert_close(input_data, output, check_dtype=False)
def test_to_tensor_errors(self):
height, width = 4, 4
......@@ -258,7 +257,7 @@ class TestToTensor:
input_data = torch.ByteTensor(channels, height, width).random_(0, 255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(input_data, output, check_stride=False)
torch.testing.assert_close(input_data, output)
input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
img = transforms.ToPILImage()(input_data)
......@@ -270,13 +269,13 @@ class TestToTensor:
img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte()
output = trans(img) # HWC -> CHW
expected_output = (input_data * 255).byte()
torch.testing.assert_close(output, expected_output, check_stride=False)
torch.testing.assert_close(output, expected_output)
# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
output = trans(img).view(torch.uint8).bool().to(torch.uint8)
torch.testing.assert_close(input_data, output, check_stride=False)
torch.testing.assert_close(input_data, output)
def test_pil_to_tensor_errors(self):
height, width = 4, 4
......@@ -420,10 +419,10 @@ class TestPad:
h_padded = result[:, :padding, :]
w_padded = result[:, :, :padding]
torch.testing.assert_close(
h_padded, torch.full_like(h_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps
h_padded, torch.full_like(h_padded, fill_value=fill_v), rtol=0.0, atol=eps
)
torch.testing.assert_close(
w_padded, torch.full_like(w_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps
w_padded, torch.full_like(w_padded, fill_value=fill_v), rtol=0.0, atol=eps
)
pytest.raises(ValueError, transforms.Pad(padding, fill=(1, 2)),
transforms.ToPILImage()(img))
......@@ -457,7 +456,7 @@ class TestPad:
# First 6 elements of leftmost edge in the middle of the image, values are in order:
# edge_pad, edge_pad, edge_pad, constant_pad, constant value added to leftmost edge, 0
edge_middle_slice = np.asarray(edge_padded_img).transpose(2, 0, 1)[0][17][:6]
assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0], dtype=np.uint8), check_stride=False)
assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0], dtype=np.uint8))
assert transforms.ToTensor()(edge_padded_img).size() == (3, 35, 35)
# Pad 3 to left/right, 2 to top/bottom
......@@ -465,7 +464,7 @@ class TestPad:
# First 6 elements of leftmost edge in the middle of the image, values are in order:
# reflect_pad, reflect_pad, reflect_pad, constant_pad, constant value added to leftmost edge, 0
reflect_middle_slice = np.asarray(reflect_padded_img).transpose(2, 0, 1)[0][17][:6]
assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0], dtype=np.uint8), check_stride=False)
assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0], dtype=np.uint8))
assert transforms.ToTensor()(reflect_padded_img).size() == (3, 33, 35)
# Pad 3 to left, 2 to top, 2 to right, 1 to bottom
......@@ -473,7 +472,7 @@ class TestPad:
# First 6 elements of leftmost edge in the middle of the image, values are in order:
# sym_pad, sym_pad, sym_pad, constant_pad, constant value added to leftmost edge, 0
symmetric_middle_slice = np.asarray(symmetric_padded_img).transpose(2, 0, 1)[0][17][:6]
assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0], dtype=np.uint8), check_stride=False)
assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0], dtype=np.uint8))
assert transforms.ToTensor()(symmetric_padded_img).size() == (3, 32, 34)
# Check negative padding explicitly for symmetric case, since it is not
......@@ -482,8 +481,8 @@ class TestPad:
symmetric_padded_img_neg = F.pad(img, (-1, 2, 3, -3), padding_mode='symmetric')
symmetric_neg_middle_left = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][:3]
symmetric_neg_middle_right = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][-4:]
assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0], dtype=np.uint8), check_stride=False)
assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0], dtype=np.uint8), check_stride=False)
assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0], dtype=np.uint8))
assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0], dtype=np.uint8))
assert transforms.ToTensor()(symmetric_padded_img_neg).size() == (3, 28, 31)
def test_pad_raises_with_invalid_pad_sequence_len(self):
......@@ -502,7 +501,7 @@ class TestPad:
img = Image.new("F", (10, 10))
padded_img = transform(img)
assert_equal(padded_img.size, [edge_size + 2 * pad for edge_size in img.size], check_stride=False)
assert_equal(padded_img.size, [edge_size + 2 * pad for edge_size in img.size])
@pytest.mark.skipif(stats is None, reason="scipy.stats not available")
......@@ -579,7 +578,7 @@ class TestToPil:
img = transform(img_data)
assert img.mode == expected_mode
torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False)
torch.testing.assert_close(expected_output, to_tensor(img).numpy())
def test_1_channel_float_tensor_to_pil_image(self):
img_data = torch.Tensor(1, 4, 4).uniform_()
......@@ -617,7 +616,7 @@ class TestToPil:
assert img.mode == expected_mode
split = img.split()
for i in range(2):
torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False)
torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]))
def test_2_channel_ndarray_to_pil_image_error(self):
img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy()
......@@ -721,7 +720,7 @@ class TestToPil:
assert img.mode == expected_mode
split = img.split()
for i in range(3):
torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False)
torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]))
def test_3_channel_ndarray_to_pil_image_error(self):
img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy()
......@@ -778,7 +777,7 @@ class TestToPil:
assert img.mode == expected_mode
split = img.split()
for i in range(4):
torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False)
torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]))
def test_4_channel_ndarray_to_pil_image_error(self):
img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy()
......@@ -1152,7 +1151,7 @@ def test_to_grayscale():
assert gray_np_2.shape == tuple(x_shape), 'should be 3 channel'
assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False)
assert_equal(gray_np, gray_np_2[:, :, 0])
# Case 3: 1 channel grayscale -> 1 channel grayscale
trans3 = transforms.Grayscale(num_output_channels=1)
......@@ -1170,7 +1169,7 @@ def test_to_grayscale():
assert gray_np_4.shape == tuple(x_shape), 'should be 3 channel'
assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1])
assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2])
assert_equal(gray_np, gray_np_4[:, :, 0], check_stride=False)
assert_equal(gray_np, gray_np_4[:, :, 0])
# Checking if Grayscale can be printed as string
trans4.__repr__()
......@@ -1240,7 +1239,7 @@ def test_random_grayscale():
assert gray_np_2.shape == tuple(x_shape), 'should be 3 channel'
assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False)
assert_equal(gray_np, gray_np_2[:, :, 0])
# Case 3b: RGB -> 3 channel grayscale (unchanged)
trans2 = transforms.RandomGrayscale(p=0.0)
......@@ -1600,8 +1599,9 @@ def test_center_crop_2(odd_image_size, delta, delta_width, delta_height):
# Ensure output for PIL and Tensor are equal
assert_equal(
output_tensor, output_pil, check_stride=False,
msg="image_size: {} crop_size: {}".format(input_image_size, crop_size)
output_tensor,
output_pil,
msg="image_size: {} crop_size: {}".format(input_image_size, crop_size),
)
# Check if content in center of both image and cropped output is same.
......@@ -1625,7 +1625,7 @@ def test_center_crop_2(odd_image_size, delta, delta_width, delta_height):
input_center_tl[1]:input_center_tl[1] + center_size[1]
]
assert_equal(output_center, img_center, check_stride=False)
assert_equal(output_center, img_center)
def test_color_jitter():
......
......@@ -18,8 +18,8 @@ from common_utils import (
_assert_equal_tensor_to_pil,
_assert_approx_equal_tensor_to_pil,
cpu_and_gpu,
assert_equal,
)
from _assert_utils import assert_equal
NEAREST, BILINEAR, BICUBIC = InterpolationMode.NEAREST, InterpolationMode.BILINEAR, InterpolationMode.BICUBIC
......
......@@ -4,7 +4,7 @@ import pytest
import random
import numpy as np
import warnings
from _assert_utils import assert_equal
from common_utils import assert_equal
try:
from scipy import stats
......
......@@ -9,7 +9,7 @@ import torchvision.utils as utils
from io import BytesIO
import torchvision.transforms.functional as F
from PIL import Image, __version__ as PILLOW_VERSION, ImageColor
from _assert_utils import assert_equal
from common_utils import assert_equal
PILLOW_VERSION = tuple(int(x) for x in PILLOW_VERSION.split('.'))
......
......@@ -10,8 +10,7 @@ import torchvision.io as io
from numpy.random import randint
from torchvision import set_video_backend
from torchvision.io import _HAS_VIDEO_OPT
from common_utils import PY39_SKIP
from _assert_utils import assert_equal
from common_utils import PY39_SKIP, assert_equal
try:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment