test_utils.py 14.7 KB
Newer Older
1
import os
Francisco Massa's avatar
Francisco Massa committed
2
import sys
3
import tempfile
4
from io import BytesIO
5
6
7
8

import numpy as np
import pytest
import torch
9
import torchvision.transforms.functional as F
10
import torchvision.utils as utils
11
from common_utils import assert_equal
12
from PIL import Image, __version__ as PILLOW_VERSION, ImageColor
Nicolas Hug's avatar
Nicolas Hug committed
13
14


15
PILLOW_VERSION = tuple(int(x) for x in PILLOW_VERSION.split("."))
16

17
boxes = torch.tensor([[0, 0, 20, 20], [0, 0, 0, 0], [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float)
18

19
20
keypoints = torch.tensor([[[10, 10], [5, 5], [2, 2]], [[20, 20], [30, 30], [3, 3]]], dtype=torch.float)

21

22
23
24
25
26
def test_make_grid_not_inplace():
    t = torch.rand(5, 3, 10, 10)
    t_clone = t.clone()

    utils.make_grid(t, normalize=False)
27
    assert_equal(t, t_clone, msg="make_grid modified tensor in-place")
28
29

    utils.make_grid(t, normalize=True, scale_each=False)
30
    assert_equal(t, t_clone, msg="make_grid modified tensor in-place")
31
32

    utils.make_grid(t, normalize=True, scale_each=True)
33
    assert_equal(t, t_clone, msg="make_grid modified tensor in-place")
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49


def test_normalize_in_make_grid():
    t = torch.rand(5, 3, 10, 10) * 255
    norm_max = torch.tensor(1.0)
    norm_min = torch.tensor(0.0)

    grid = utils.make_grid(t, normalize=True)
    grid_max = torch.max(grid)
    grid_min = torch.min(grid)

    # Rounding the result to one decimal for comparison
    n_digits = 1
    rounded_grid_max = torch.round(grid_max * 10 ** n_digits) / (10 ** n_digits)
    rounded_grid_min = torch.round(grid_min * 10 ** n_digits) / (10 ** n_digits)

50
51
    assert_equal(norm_max, rounded_grid_max, msg="Normalized max is not equal to 1")
    assert_equal(norm_min, rounded_grid_min, msg="Normalized min is not equal to 0")
52
53


54
@pytest.mark.skipif(sys.platform in ("win32", "cygwin"), reason="temporarily disabled on Windows")
55
def test_save_image():
56
    with tempfile.NamedTemporaryFile(suffix=".png") as f:
57
58
        t = torch.rand(2, 3, 64, 64)
        utils.save_image(t, f.name)
59
        assert os.path.exists(f.name), "The image is not present after save"
60

61

62
@pytest.mark.skipif(sys.platform in ("win32", "cygwin"), reason="temporarily disabled on Windows")
63
def test_save_image_single_pixel():
64
    with tempfile.NamedTemporaryFile(suffix=".png") as f:
65
66
        t = torch.rand(1, 3, 1, 1)
        utils.save_image(t, f.name)
67
        assert os.path.exists(f.name), "The pixel image is not present after save"
68
69


70
@pytest.mark.skipif(sys.platform in ("win32", "cygwin"), reason="temporarily disabled on Windows")
71
def test_save_image_file_object():
72
    with tempfile.NamedTemporaryFile(suffix=".png") as f:
73
74
75
76
        t = torch.rand(2, 3, 64, 64)
        utils.save_image(t, f.name)
        img_orig = Image.open(f.name)
        fp = BytesIO()
77
        utils.save_image(t, fp, format="png")
78
        img_bytes = Image.open(fp)
79
        assert_equal(F.pil_to_tensor(img_orig), F.pil_to_tensor(img_bytes), msg="Image not stored in file object")
80
81


82
@pytest.mark.skipif(sys.platform in ("win32", "cygwin"), reason="temporarily disabled on Windows")
83
def test_save_image_single_pixel_file_object():
84
    with tempfile.NamedTemporaryFile(suffix=".png") as f:
85
86
87
88
        t = torch.rand(1, 3, 1, 1)
        utils.save_image(t, f.name)
        img_orig = Image.open(f.name)
        fp = BytesIO()
89
        utils.save_image(t, fp, format="png")
90
        img_bytes = Image.open(fp)
91
        assert_equal(F.pil_to_tensor(img_orig), F.pil_to_tensor(img_bytes), msg="Image not stored in file object")
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108


def test_draw_boxes():
    img = torch.full((3, 100, 100), 255, dtype=torch.uint8)
    img_cp = img.clone()
    boxes_cp = boxes.clone()
    labels = ["a", "b", "c", "d"]
    colors = ["green", "#FF00FF", (0, 255, 0), "red"]
    result = utils.draw_bounding_boxes(img, boxes, labels=labels, colors=colors, fill=True)

    path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "fakedata", "draw_boxes_util.png")
    if not os.path.exists(path):
        res = Image.fromarray(result.permute(1, 2, 0).contiguous().numpy())
        res.save(path)

    if PILLOW_VERSION >= (8, 2):
        # The reference image is only valid for new PIL versions
109
        expected = torch.as_tensor(np.array(Image.open(path))).permute(2, 0, 1)
110
        assert_equal(result, expected)
111

112
113
114
115
116
    # Check if modification is not in place
    assert_equal(boxes, boxes_cp)
    assert_equal(img, img_cp)


117
@pytest.mark.parametrize("colors", [None, ["red", "blue", "#FF00FF", (1, 34, 122)], "red", "#FF00FF", (1, 34, 122)])
118
119
120
121
122
def test_draw_boxes_colors(colors):
    img = torch.full((3, 100, 100), 0, dtype=torch.uint8)
    utils.draw_bounding_boxes(img, boxes, fill=False, width=7, colors=colors)


123
124
125
126
def test_draw_boxes_vanilla():
    img = torch.full((3, 100, 100), 0, dtype=torch.uint8)
    img_cp = img.clone()
    boxes_cp = boxes.clone()
127
    result = utils.draw_bounding_boxes(img, boxes, fill=False, width=7, colors="white")
128
129
130
131
132
133
134
135
136
137
138
139
140

    path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "fakedata", "draw_boxes_vanilla.png")
    if not os.path.exists(path):
        res = Image.fromarray(result.permute(1, 2, 0).contiguous().numpy())
        res.save(path)

    expected = torch.as_tensor(np.array(Image.open(path))).permute(2, 0, 1)
    assert_equal(result, expected)
    # Check if modification is not in place
    assert_equal(boxes, boxes_cp)
    assert_equal(img, img_cp)


141
142
143
144
145
146
147
def test_draw_boxes_grayscale():
    img = torch.full((1, 4, 4), fill_value=255, dtype=torch.uint8)
    boxes = torch.tensor([[0, 0, 3, 3]], dtype=torch.int64)
    bboxed_img = utils.draw_bounding_boxes(image=img, boxes=boxes, colors=["#1BBC9B"])
    assert bboxed_img.size(0) == 3


148
149
150
151
def test_draw_invalid_boxes():
    img_tp = ((1, 1, 1), (1, 2, 3))
    img_wrong1 = torch.full((3, 5, 5), 255, dtype=torch.float)
    img_wrong2 = torch.full((1, 3, 5, 5), 255, dtype=torch.uint8)
152
    img_correct = torch.zeros((3, 10, 10), dtype=torch.uint8)
153
    boxes = torch.tensor([[0, 0, 20, 20], [0, 0, 0, 0], [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float)
154
155
156
    labels_wrong = ["one", "two"]
    colors_wrong = ["pink", "blue"]

157
158
159
160
161
162
    with pytest.raises(TypeError, match="Tensor expected"):
        utils.draw_bounding_boxes(img_tp, boxes)
    with pytest.raises(ValueError, match="Tensor uint8 expected"):
        utils.draw_bounding_boxes(img_wrong1, boxes)
    with pytest.raises(ValueError, match="Pass individual images, not batches"):
        utils.draw_bounding_boxes(img_wrong2, boxes)
163
164
    with pytest.raises(ValueError, match="Only grayscale and RGB images are supported"):
        utils.draw_bounding_boxes(img_wrong2[0][:2], boxes)
165
166
167
168
    with pytest.raises(ValueError, match="Number of boxes"):
        utils.draw_bounding_boxes(img_correct, boxes, labels_wrong)
    with pytest.raises(ValueError, match="Number of colors"):
        utils.draw_bounding_boxes(img_correct, boxes, colors=colors_wrong)
169

170

171
172
173
174
@pytest.mark.parametrize(
    "colors",
    [
        None,
175
176
177
        "blue",
        "#FF00FF",
        (1, 34, 122),
178
179
180
181
182
        ["red", "blue"],
        ["#FF00FF", (1, 34, 122)],
    ],
)
@pytest.mark.parametrize("alpha", (0, 0.5, 0.7, 1))
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
def test_draw_segmentation_masks(colors, alpha):
    """This test makes sure that masks draw their corresponding color where they should"""
    num_masks, h, w = 2, 100, 100
    dtype = torch.uint8
    img = torch.randint(0, 256, size=(3, h, w), dtype=dtype)
    masks = torch.randint(0, 2, (num_masks, h, w), dtype=torch.bool)

    # For testing we enforce that there's no overlap between the masks. The
    # current behaviour is that the last mask's color will take priority when
    # masks overlap, but this makes testing slightly harder so we don't really
    # care
    overlap = masks[0] & masks[1]
    masks[:, overlap] = False

    out = utils.draw_segmentation_masks(img, masks, colors=colors, alpha=alpha)
    assert out.dtype == dtype
    assert out is not img

    # Make sure the image didn't change where there's no mask
    masked_pixels = masks[0] | masks[1]
203
    assert_equal(img[:, ~masked_pixels], out[:, ~masked_pixels])
204
205
206

    if colors is None:
        colors = utils._generate_color_palette(num_masks)
207
208
    elif isinstance(colors, str) or isinstance(colors, tuple):
        colors = [colors]
209
210
211
212
213
214
215
216
217
218
219
220

    # Make sure each mask draws with its own color
    for mask, color in zip(masks, colors):
        if isinstance(color, str):
            color = ImageColor.getrgb(color)
        color = torch.tensor(color, dtype=dtype)

        if alpha == 1:
            assert (out[:, mask] == color[:, None]).all()
        elif alpha == 0:
            assert (out[:, mask] == img[:, mask]).all()

221
222
        interpolated_color = (img[:, mask] * (1 - alpha) + color[:, None] * alpha).to(dtype)
        torch.testing.assert_close(out[:, mask], interpolated_color, rtol=0.0, atol=1.0)
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253


def test_draw_segmentation_masks_errors():
    h, w = 10, 10

    masks = torch.randint(0, 2, size=(h, w), dtype=torch.bool)
    img = torch.randint(0, 256, size=(3, h, w), dtype=torch.uint8)

    with pytest.raises(TypeError, match="The image must be a tensor"):
        utils.draw_segmentation_masks(image="Not A Tensor Image", masks=masks)
    with pytest.raises(ValueError, match="The image dtype must be"):
        img_bad_dtype = torch.randint(0, 256, size=(3, h, w), dtype=torch.int64)
        utils.draw_segmentation_masks(image=img_bad_dtype, masks=masks)
    with pytest.raises(ValueError, match="Pass individual images, not batches"):
        batch = torch.randint(0, 256, size=(10, 3, h, w), dtype=torch.uint8)
        utils.draw_segmentation_masks(image=batch, masks=masks)
    with pytest.raises(ValueError, match="Pass an RGB image"):
        one_channel = torch.randint(0, 256, size=(1, h, w), dtype=torch.uint8)
        utils.draw_segmentation_masks(image=one_channel, masks=masks)
    with pytest.raises(ValueError, match="The masks must be of dtype bool"):
        masks_bad_dtype = torch.randint(0, 2, size=(h, w), dtype=torch.float)
        utils.draw_segmentation_masks(image=img, masks=masks_bad_dtype)
    with pytest.raises(ValueError, match="masks must be of shape"):
        masks_bad_shape = torch.randint(0, 2, size=(3, 2, h, w), dtype=torch.bool)
        utils.draw_segmentation_masks(image=img, masks=masks_bad_shape)
    with pytest.raises(ValueError, match="must have the same height and width"):
        masks_bad_shape = torch.randint(0, 2, size=(h + 4, w), dtype=torch.bool)
        utils.draw_segmentation_masks(image=img, masks=masks_bad_shape)
    with pytest.raises(ValueError, match="There are more masks"):
        utils.draw_segmentation_masks(image=img, masks=masks, colors=[])
    with pytest.raises(ValueError, match="colors must be a tuple or a string, or a list thereof"):
254
        bad_colors = np.array(["red", "blue"])  # should be a list
255
256
        utils.draw_segmentation_masks(image=img, masks=masks, colors=bad_colors)
    with pytest.raises(ValueError, match="It seems that you passed a tuple of colors instead of"):
257
        bad_colors = ("red", "blue")  # should be a list
258
        utils.draw_segmentation_masks(image=img, masks=masks, colors=bad_colors)
259

260

261
262
263
264
265
266
def test_draw_keypoints_vanilla():
    # Keypoints is declared on top as global variable
    keypoints_cp = keypoints.clone()

    img = torch.full((3, 100, 100), 0, dtype=torch.uint8)
    img_cp = img.clone()
267
268
269
270
271
272
273
274
    result = utils.draw_keypoints(
        img,
        keypoints,
        colors="red",
        connectivity=[
            (0, 1),
        ],
    )
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
    path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "fakedata", "draw_keypoint_vanilla.png")
    if not os.path.exists(path):
        res = Image.fromarray(result.permute(1, 2, 0).contiguous().numpy())
        res.save(path)

    expected = torch.as_tensor(np.array(Image.open(path))).permute(2, 0, 1)
    assert_equal(result, expected)
    # Check that keypoints are not modified inplace
    assert_equal(keypoints, keypoints_cp)
    # Check that image is not modified in place
    assert_equal(img, img_cp)


@pytest.mark.parametrize("colors", ["red", "#FF00FF", (1, 34, 122)])
def test_draw_keypoints_colored(colors):
    # Keypoints is declared on top as global variable
    keypoints_cp = keypoints.clone()

    img = torch.full((3, 100, 100), 0, dtype=torch.uint8)
    img_cp = img.clone()
295
296
297
298
299
300
301
302
    result = utils.draw_keypoints(
        img,
        keypoints,
        colors=colors,
        connectivity=[
            (0, 1),
        ],
    )
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
    assert result.size(0) == 3
    assert_equal(keypoints, keypoints_cp)
    assert_equal(img, img_cp)


def test_draw_keypoints_errors():
    h, w = 10, 10
    img = torch.full((3, 100, 100), 0, dtype=torch.uint8)

    with pytest.raises(TypeError, match="The image must be a tensor"):
        utils.draw_keypoints(image="Not A Tensor Image", keypoints=keypoints)
    with pytest.raises(ValueError, match="The image dtype must be"):
        img_bad_dtype = torch.full((3, h, w), 0, dtype=torch.int64)
        utils.draw_keypoints(image=img_bad_dtype, keypoints=keypoints)
    with pytest.raises(ValueError, match="Pass individual images, not batches"):
        batch = torch.randint(0, 256, size=(10, 3, h, w), dtype=torch.uint8)
        utils.draw_keypoints(image=batch, keypoints=keypoints)
    with pytest.raises(ValueError, match="Pass an RGB image"):
        one_channel = torch.randint(0, 256, size=(1, h, w), dtype=torch.uint8)
        utils.draw_keypoints(image=one_channel, keypoints=keypoints)
    with pytest.raises(ValueError, match="keypoints must be of shape"):
        invalid_keypoints = torch.tensor([[10, 10, 10, 10], [5, 6, 7, 8]], dtype=torch.float)
        utils.draw_keypoints(image=img, keypoints=invalid_keypoints)


328
329
@pytest.mark.parametrize("batch", (True, False))
def test_flow_to_image(batch):
330
331
332
333
334
    h, w = 100, 100
    flow = torch.meshgrid(torch.arange(h), torch.arange(w), indexing="ij")
    flow = torch.stack(flow[::-1], dim=0).float()
    flow[0] -= h / 2
    flow[1] -= w / 2
335
336
337
338

    if batch:
        flow = torch.stack([flow, flow])

339
    img = utils.flow_to_image(flow)
340
341
    assert img.shape == (2, 3, h, w) if batch else (3, h, w)

342
343
344
    path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "expected_flow.pt")
    expected_img = torch.load(path, map_location="cpu")

345
346
347
348
    if batch:
        expected_img = torch.stack([expected_img, expected_img])

    assert_equal(expected_img, img)
349
350


351
352
353
354
355
356
357
358
359
360
361
362
363
@pytest.mark.parametrize(
    "input_flow, match",
    (
        (torch.full((3, 10, 10), 0, dtype=torch.float), "Input flow should have shape"),
        (torch.full((5, 3, 10, 10), 0, dtype=torch.float), "Input flow should have shape"),
        (torch.full((2, 10), 0, dtype=torch.float), "Input flow should have shape"),
        (torch.full((5, 2, 10), 0, dtype=torch.float), "Input flow should have shape"),
        (torch.full((2, 10, 30), 0, dtype=torch.int), "Flow should be of dtype torch.float"),
    ),
)
def test_flow_to_image_errors(input_flow, match):
    with pytest.raises(ValueError, match=match):
        utils.flow_to_image(flow=input_flow)
364
365


366
367
if __name__ == "__main__":
    pytest.main([__file__])