test_transforms_v2.py 39.2 KB
Newer Older
1
2
import itertools
import pathlib
3
import pickle
4
5
6
7
8
9
10
11
12
import random

import numpy as np

import PIL.Image
import pytest
import torch
import torchvision.transforms.v2 as transforms

13
from common_utils import assert_equal, cpu_and_cuda
14
from torch.utils._pytree import tree_flatten, tree_unflatten
15
from torchvision import tv_tensors
16
17
18
from torchvision.ops.boxes import box_iou
from torchvision.transforms.functional import to_pil_image
from torchvision.transforms.v2 import functional as F
Nicolas Hug's avatar
Nicolas Hug committed
19
from torchvision.transforms.v2._utils import check_type, is_pure_tensor, query_chw
20
from transforms_v2_legacy_utils import (
21
22
23
24
    make_bounding_boxes,
    make_detection_mask,
    make_image,
    make_images,
25
    make_multiple_bounding_boxes,
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
    make_segmentation_mask,
    make_video,
    make_videos,
)


def make_vanilla_tensor_images(*args, **kwargs):
    for image in make_images(*args, **kwargs):
        if image.ndim > 3:
            continue
        yield image.data


def make_pil_images(*args, **kwargs):
    for image in make_vanilla_tensor_images(*args, **kwargs):
        yield to_pil_image(image)


def make_vanilla_tensor_bounding_boxes(*args, **kwargs):
45
    for bounding_boxes in make_multiple_bounding_boxes(*args, **kwargs):
46
        yield bounding_boxes.data
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67


def parametrize(transforms_with_inputs):
    return pytest.mark.parametrize(
        ("transform", "input"),
        [
            pytest.param(
                transform,
                input,
                id=f"{type(transform).__name__}-{type(input).__module__}.{type(input).__name__}-{idx}",
            )
            for transform, inputs in transforms_with_inputs
            for idx, input in enumerate(inputs)
        ],
    )


def auto_augment_adapter(transform, input, device):
    adapted_input = {}
    image_or_video_found = False
    for key, value in input.items():
68
        if isinstance(value, (tv_tensors.BoundingBoxes, tv_tensors.Mask)):
69
70
            # AA transforms don't support bounding boxes or masks
            continue
71
        elif check_type(value, (tv_tensors.Image, tv_tensors.Video, is_pure_tensor, PIL.Image.Image)):
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
            if image_or_video_found:
                # AA transforms only support a single image or video
                continue
            image_or_video_found = True
        adapted_input[key] = value
    return adapted_input


def linear_transformation_adapter(transform, input, device):
    flat_inputs = list(input.values())
    c, h, w = query_chw(
        [
            item
            for item, needs_transform in zip(flat_inputs, transforms.Transform()._needs_transform_list(flat_inputs))
            if needs_transform
        ]
    )
    num_elements = c * h * w
    transform.transformation_matrix = torch.randn((num_elements, num_elements), device=device)
    transform.mean_vector = torch.randn((num_elements,), device=device)
    return {key: value for key, value in input.items() if not isinstance(value, PIL.Image.Image)}


def normalize_adapter(transform, input, device):
    adapted_input = {}
    for key, value in input.items():
        if isinstance(value, PIL.Image.Image):
            # normalize doesn't support PIL images
            continue
101
        elif check_type(value, (tv_tensors.Image, tv_tensors.Video, is_pure_tensor)):
102
            # normalize doesn't support integer images
103
            value = F.to_dtype(value, torch.float32, scale=True)
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
        adapted_input[key] = value
    return adapted_input


class TestSmoke:
    @pytest.mark.parametrize(
        ("transform", "adapter"),
        [
            (transforms.RandomErasing(p=1.0), None),
            (transforms.AugMix(), auto_augment_adapter),
            (transforms.AutoAugment(), auto_augment_adapter),
            (transforms.RandAugment(), auto_augment_adapter),
            (transforms.TrivialAugmentWide(), auto_augment_adapter),
            (transforms.ColorJitter(brightness=0.1, contrast=0.2, saturation=0.3, hue=0.15), None),
            (transforms.RandomAdjustSharpness(sharpness_factor=0.5, p=1.0), None),
            (transforms.RandomAutocontrast(p=1.0), None),
            (transforms.RandomEqualize(p=1.0), None),
            (transforms.RandomInvert(p=1.0), None),
122
            (transforms.RandomChannelPermutation(), None),
123
124
125
126
127
128
129
130
131
            (transforms.RandomPosterize(bits=4, p=1.0), None),
            (transforms.RandomSolarize(threshold=0.5, p=1.0), None),
            (transforms.CenterCrop([16, 16]), None),
            (transforms.ElasticTransform(sigma=1.0), None),
            (transforms.Pad(4), None),
            (transforms.RandomAffine(degrees=30.0), None),
            (transforms.RandomCrop([16, 16], pad_if_needed=True), None),
            (transforms.RandomHorizontalFlip(p=1.0), None),
            (transforms.RandomPerspective(p=1.0), None),
132
133
            (transforms.RandomResize(min_size=10, max_size=20, antialias=True), None),
            (transforms.RandomResizedCrop([16, 16], antialias=True), None),
134
            (transforms.RandomRotation(degrees=30), None),
135
            (transforms.RandomShortestSize(min_size=10, antialias=True), None),
136
137
            (transforms.RandomVerticalFlip(p=1.0), None),
            (transforms.Resize([16, 16], antialias=True), None),
138
            (transforms.ScaleJitter((16, 16), scale_range=(0.8, 1.2), antialias=True), None),
139
            (transforms.ClampBoundingBoxes(), None),
140
            (transforms.ConvertBoundingBoxFormat(tv_tensors.BoundingBoxFormat.CXCYWH), None),
141
            (transforms.ConvertImageDtype(), None),
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
            (transforms.GaussianBlur(kernel_size=3), None),
            (
                transforms.LinearTransformation(
                    # These are just dummy values that will be filled by the adapter. We can't define them upfront,
                    # because for we neither know the spatial size nor the device at this point
                    transformation_matrix=torch.empty((1, 1)),
                    mean_vector=torch.empty((1,)),
                ),
                linear_transformation_adapter,
            ),
            (transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), normalize_adapter),
            (transforms.ToDtype(torch.float64), None),
            (transforms.UniformTemporalSubsample(num_samples=2), None),
        ],
        ids=lambda transform: type(transform).__name__,
    )
    @pytest.mark.parametrize("container_type", [dict, list, tuple])
    @pytest.mark.parametrize(
        "image_or_video",
        [
            make_image(),
            make_video(),
            next(make_pil_images(color_spaces=["RGB"])),
            next(make_vanilla_tensor_images()),
        ],
    )
168
    @pytest.mark.parametrize("de_serialize", [lambda t: t, lambda t: pickle.loads(pickle.dumps(t))])
169
    @pytest.mark.parametrize("device", cpu_and_cuda())
170
171
172
    def test_common(self, transform, adapter, container_type, image_or_video, de_serialize, device):
        transform = de_serialize(transform)

Philip Meier's avatar
Philip Meier committed
173
        canvas_size = F.get_size(image_or_video)
174
175
        input = dict(
            image_or_video=image_or_video,
176
177
            image_tv_tensor=make_image(size=canvas_size),
            video_tv_tensor=make_video(size=canvas_size),
Philip Meier's avatar
Philip Meier committed
178
            image_pil=next(make_pil_images(sizes=[canvas_size], color_spaces=["RGB"])),
179
            bounding_boxes_xyxy=make_bounding_boxes(
180
                format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(3,)
181
            ),
182
            bounding_boxes_xywh=make_bounding_boxes(
183
                format=tv_tensors.BoundingBoxFormat.XYWH, canvas_size=canvas_size, batch_dims=(4,)
184
            ),
185
            bounding_boxes_cxcywh=make_bounding_boxes(
186
                format=tv_tensors.BoundingBoxFormat.CXCYWH, canvas_size=canvas_size, batch_dims=(5,)
187
            ),
188
            bounding_boxes_degenerate_xyxy=tv_tensors.BoundingBoxes(
189
190
191
192
193
194
195
196
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [2, 0, 1, 1],  # x1 > x2, y1 < y2
                    [0, 2, 1, 1],  # x1 < x2, y1 > y2
                    [2, 2, 1, 1],  # x1 > x2, y1 > y2
                ],
197
                format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
198
                canvas_size=canvas_size,
199
            ),
200
            bounding_boxes_degenerate_xywh=tv_tensors.BoundingBoxes(
201
202
203
204
205
206
207
208
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [0, 0, 1, -1],  # negative height
                    [0, 0, -1, 1],  # negative width
                    [0, 0, -1, -1],  # negative height and width
                ],
209
                format=tv_tensors.BoundingBoxFormat.XYWH,
Philip Meier's avatar
Philip Meier committed
210
                canvas_size=canvas_size,
211
            ),
212
            bounding_boxes_degenerate_cxcywh=tv_tensors.BoundingBoxes(
213
214
215
216
217
218
219
220
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [0, 0, 1, -1],  # negative height
                    [0, 0, -1, 1],  # negative width
                    [0, 0, -1, -1],  # negative height and width
                ],
221
                format=tv_tensors.BoundingBoxFormat.CXCYWH,
Philip Meier's avatar
Philip Meier committed
222
                canvas_size=canvas_size,
223
            ),
Philip Meier's avatar
Philip Meier committed
224
225
            detection_mask=make_detection_mask(size=canvas_size),
            segmentation_mask=make_segmentation_mask(size=canvas_size),
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
            int=0,
            float=0.0,
            bool=True,
            none=None,
            str="str",
            path=pathlib.Path.cwd(),
            object=object(),
            tensor=torch.empty(5),
            array=np.empty(5),
        )
        if adapter is not None:
            input = adapter(transform, input, device)

        if container_type in {tuple, list}:
            input = container_type(input.values())

        input_flat, input_spec = tree_flatten(input)
        input_flat = [item.to(device) if isinstance(item, torch.Tensor) else item for item in input_flat]
        input = tree_unflatten(input_flat, input_spec)

        torch.manual_seed(0)
        output = transform(input)
        output_flat, output_spec = tree_flatten(output)

        assert output_spec == input_spec

        for output_item, input_item, should_be_transformed in zip(
            output_flat, input_flat, transforms.Transform()._needs_transform_list(input_flat)
        ):
            if should_be_transformed:
                assert type(output_item) is type(input_item)
            else:
                assert output_item is input_item

260
            if isinstance(input_item, tv_tensors.BoundingBoxes) and not isinstance(
261
262
263
264
265
266
267
                transform, transforms.ConvertBoundingBoxFormat
            ):
                assert output_item.format == input_item.format

        # Enforce that the transform does not turn a degenerate box marked by RandomIoUCrop (or any other future
        # transform that does this), back into a valid one.
        # TODO: we should test that against all degenerate boxes above
268
        for format in list(tv_tensors.BoundingBoxFormat):
269
            sample = dict(
270
                boxes=tv_tensors.BoundingBoxes([[0, 0, 0, 0]], format=format, canvas_size=(224, 244)),
271
272
                labels=torch.tensor([3]),
            )
273
            assert transforms.SanitizeBoundingBoxes()(sample)["boxes"].shape == (0, 4)
274
275
276
277
278
279
280
281
282
283
284
285
286

    @parametrize(
        [
            (
                transform,
                itertools.chain.from_iterable(
                    fn(
                        color_spaces=[
                            "GRAY",
                            "RGB",
                        ],
                        dtypes=[torch.uint8],
                        extra_dims=[(), (4,)],
287
                        **(dict(num_frames=[3]) if fn is make_videos else dict()),
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
                    )
                    for fn in [
                        make_images,
                        make_vanilla_tensor_images,
                        make_pil_images,
                        make_videos,
                    ]
                ),
            )
            for transform in (
                transforms.RandAugment(),
                transforms.TrivialAugmentWide(),
                transforms.AutoAugment(),
                transforms.AugMix(),
            )
        ]
    )
    def test_auto_augment(self, transform, input):
        transform(input)

    @parametrize(
        [
            (
                transforms.Normalize(mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
                itertools.chain.from_iterable(
                    fn(color_spaces=["RGB"], dtypes=[torch.float32])
                    for fn in [
                        make_images,
                        make_vanilla_tensor_images,
                        make_videos,
                    ]
                ),
            ),
        ]
    )
    def test_normalize(self, transform, input):
        transform(input)


@pytest.mark.parametrize(
    "flat_inputs",
    itertools.permutations(
        [
            next(make_vanilla_tensor_images()),
            next(make_vanilla_tensor_images()),
            next(make_pil_images()),
            make_image(),
            next(make_videos()),
        ],
        3,
    ),
)
340
341
def test_pure_tensor_heuristic(flat_inputs):
    def split_on_pure_tensor(to_split):
342
        # This takes a sequence that is structurally aligned with `flat_inputs` and splits its items into three parts:
343
344
        # 1. The first pure tensor. If none is present, this will be `None`
        # 2. A list of the remaining pure tensors
345
        # 3. A list of all other items
346
        pure_tensors = []
347
348
349
350
        others = []
        # Splitting always happens on the original `flat_inputs` to avoid any erroneous type changes by the transform to
        # affect the splitting.
        for item, inpt in zip(to_split, flat_inputs):
351
352
            (pure_tensors if is_pure_tensor(inpt) else others).append(item)
        return pure_tensors[0] if pure_tensors else None, pure_tensors[1:], others
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367

    class CopyCloneTransform(transforms.Transform):
        def _transform(self, inpt, params):
            return inpt.clone() if isinstance(inpt, torch.Tensor) else inpt.copy()

        @staticmethod
        def was_applied(output, inpt):
            identity = output is inpt
            if identity:
                return False

            # Make sure nothing fishy is going on
            assert_equal(output, inpt)
            return True

368
    first_pure_tensor_input, other_pure_tensor_inputs, other_inputs = split_on_pure_tensor(flat_inputs)
369
370
371
372

    transform = CopyCloneTransform()
    transformed_sample = transform(flat_inputs)

373
    first_pure_tensor_output, other_pure_tensor_outputs, other_outputs = split_on_pure_tensor(transformed_sample)
374

375
    if first_pure_tensor_input is not None:
376
        if other_inputs:
377
            assert not transform.was_applied(first_pure_tensor_output, first_pure_tensor_input)
378
        else:
379
            assert transform.was_applied(first_pure_tensor_output, first_pure_tensor_input)
380

381
    for output, inpt in zip(other_pure_tensor_outputs, other_pure_tensor_inputs):
382
383
384
385
386
387
388
389
390
        assert not transform.was_applied(output, inpt)

    for input, output in zip(other_inputs, other_outputs):
        assert transform.was_applied(output, input)


class TestElasticTransform:
    def test_assertions(self):

391
        with pytest.raises(TypeError, match="alpha should be a number or a sequence of numbers"):
392
393
            transforms.ElasticTransform({})

394
        with pytest.raises(ValueError, match="alpha is a sequence its length should be 1 or 2"):
395
396
            transforms.ElasticTransform([1.0, 2.0, 3.0])

397
        with pytest.raises(TypeError, match="sigma should be a number or a sequence of numbers"):
398
399
            transforms.ElasticTransform(1.0, {})

400
        with pytest.raises(ValueError, match="sigma is a sequence its length should be 1 or 2"):
401
402
403
404
405
            transforms.ElasticTransform(1.0, [1.0, 2.0, 3.0])

        with pytest.raises(TypeError, match="Got inappropriate fill arg"):
            transforms.ElasticTransform(1.0, 2.0, fill="abc")

Philip Meier's avatar
Philip Meier committed
406
    def test__get_params(self):
407
408
409
        alpha = 2.0
        sigma = 3.0
        transform = transforms.ElasticTransform(alpha, sigma)
Philip Meier's avatar
Philip Meier committed
410
411
412

        h, w = size = (24, 32)
        image = make_image(size)
413
414
415
416
417
418
419
420
421
422
423
424

        params = transform._get_params([image])

        displacement = params["displacement"]
        assert displacement.shape == (1, h, w, 2)
        assert (-alpha / w <= displacement[0, ..., 0]).all() and (displacement[0, ..., 0] <= alpha / w).all()
        assert (-alpha / h <= displacement[0, ..., 1]).all() and (displacement[0, ..., 1] <= alpha / h).all()


class TestTransform:
    @pytest.mark.parametrize(
        "inpt_type",
425
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
426
427
428
429
430
431
432
433
434
435
436
437
438
439
    )
    def test_check_transformed_types(self, inpt_type, mocker):
        # This test ensures that we correctly handle which types to transform and which to bypass
        t = transforms.Transform()
        inpt = mocker.MagicMock(spec=inpt_type)

        if inpt_type in (np.ndarray, str, int):
            output = t(inpt)
            assert output is inpt
        else:
            with pytest.raises(NotImplementedError):
                t(inpt)


440
class TestToImage:
441
442
    @pytest.mark.parametrize(
        "inpt_type",
443
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
444
445
446
    )
    def test__transform(self, inpt_type, mocker):
        fn = mocker.patch(
447
            "torchvision.transforms.v2.functional.to_image",
448
449
450
451
            return_value=torch.rand(1, 3, 8, 8),
        )

        inpt = mocker.MagicMock(spec=inpt_type)
452
        transform = transforms.ToImage()
453
        transform(inpt)
454
        if inpt_type in (tv_tensors.BoundingBoxes, tv_tensors.Image, str, int):
455
456
457
458
459
460
461
462
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt)


class TestToPILImage:
    @pytest.mark.parametrize(
        "inpt_type",
463
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
464
465
    )
    def test__transform(self, inpt_type, mocker):
466
        fn = mocker.patch("torchvision.transforms.v2.functional.to_pil_image")
467
468
469
470

        inpt = mocker.MagicMock(spec=inpt_type)
        transform = transforms.ToPILImage()
        transform(inpt)
471
        if inpt_type in (PIL.Image.Image, tv_tensors.BoundingBoxes, str, int):
472
473
474
475
476
477
478
479
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt, mode=transform.mode)


class TestToTensor:
    @pytest.mark.parametrize(
        "inpt_type",
480
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
481
482
483
484
485
486
487
488
    )
    def test__transform(self, inpt_type, mocker):
        fn = mocker.patch("torchvision.transforms.functional.to_tensor")

        inpt = mocker.MagicMock(spec=inpt_type)
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            transform = transforms.ToTensor()
        transform(inpt)
489
        if inpt_type in (tv_tensors.Image, torch.Tensor, tv_tensors.BoundingBoxes, str, int):
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt)


class TestContainers:
    @pytest.mark.parametrize("transform_cls", [transforms.Compose, transforms.RandomChoice, transforms.RandomOrder])
    def test_assertions(self, transform_cls):
        with pytest.raises(TypeError, match="Argument transforms should be a sequence of callables"):
            transform_cls(transforms.RandomCrop(28))

    @pytest.mark.parametrize("transform_cls", [transforms.Compose, transforms.RandomChoice, transforms.RandomOrder])
    @pytest.mark.parametrize(
        "trfms",
        [
            [transforms.Pad(2), transforms.RandomCrop(28)],
            [lambda x: 2.0 * x, transforms.Pad(2), transforms.RandomCrop(28)],
            [transforms.Pad(2), lambda x: 2.0 * x, transforms.RandomCrop(28)],
        ],
    )
    def test_ctor(self, transform_cls, trfms):
        c = transform_cls(trfms)
        inpt = torch.rand(1, 3, 32, 32)
        output = c(inpt)
        assert isinstance(output, torch.Tensor)
        assert output.ndim == 4


class TestRandomChoice:
    def test_assertions(self):
520
        with pytest.raises(ValueError, match="Length of p doesn't match the number of transforms"):
521
            transforms.RandomChoice([transforms.Pad(2), transforms.RandomCrop(28)], p=[1])
522
523
524


class TestRandomIoUCrop:
525
    @pytest.mark.parametrize("device", cpu_and_cuda())
526
    @pytest.mark.parametrize("options", [[0.5, 0.9], [2.0]])
Philip Meier's avatar
Philip Meier committed
527
528
529
    def test__get_params(self, device, options):
        orig_h, orig_w = size = (24, 32)
        image = make_image(size)
530
        bboxes = tv_tensors.BoundingBoxes(
531
532
            torch.tensor([[1, 1, 10, 10], [20, 20, 23, 23], [1, 20, 10, 23], [20, 1, 23, 10]]),
            format="XYXY",
Philip Meier's avatar
Philip Meier committed
533
            canvas_size=size,
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
            device=device,
        )
        sample = [image, bboxes]

        transform = transforms.RandomIoUCrop(sampler_options=options)

        n_samples = 5
        for _ in range(n_samples):

            params = transform._get_params(sample)

            if options == [2.0]:
                assert len(params) == 0
                return

            assert len(params["is_within_crop_area"]) > 0
            assert params["is_within_crop_area"].dtype == torch.bool

            assert int(transform.min_scale * orig_h) <= params["height"] <= int(transform.max_scale * orig_h)
            assert int(transform.min_scale * orig_w) <= params["width"] <= int(transform.max_scale * orig_w)

            left, top = params["left"], params["top"]
            new_h, new_w = params["height"], params["width"]
            ious = box_iou(
                bboxes,
                torch.tensor([[left, top, left + new_w, top + new_h]], dtype=bboxes.dtype, device=bboxes.device),
            )
            assert ious.max() >= options[0] or ious.max() >= options[1], f"{ious} vs {options}"

    def test__transform_empty_params(self, mocker):
        transform = transforms.RandomIoUCrop(sampler_options=[2.0])
565
566
        image = tv_tensors.Image(torch.rand(1, 3, 4, 4))
        bboxes = tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4))
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
        label = torch.tensor([1])
        sample = [image, bboxes, label]
        # Let's mock transform._get_params to control the output:
        transform._get_params = mocker.MagicMock(return_value={})
        output = transform(sample)
        torch.testing.assert_close(output, sample)

    def test_forward_assertion(self):
        transform = transforms.RandomIoUCrop()
        with pytest.raises(
            TypeError,
            match="requires input sample to contain tensor or PIL images and bounding boxes",
        ):
            transform(torch.tensor(0))

    def test__transform(self, mocker):
        transform = transforms.RandomIoUCrop()

Philip Meier's avatar
Philip Meier committed
585
586
        size = (32, 24)
        image = make_image(size)
587
        bboxes = make_bounding_boxes(format="XYXY", canvas_size=size, batch_dims=(6,))
Philip Meier's avatar
Philip Meier committed
588
        masks = make_detection_mask(size, num_objects=6)
589
590
591
592
593
594
595
596
597
598
599

        sample = [image, bboxes, masks]

        is_within_crop_area = torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool)

        params = dict(top=1, left=2, height=12, width=12, is_within_crop_area=is_within_crop_area)
        transform._get_params = mocker.MagicMock(return_value=params)
        output = transform(sample)

        # check number of bboxes vs number of labels:
        output_bboxes = output[1]
600
        assert isinstance(output_bboxes, tv_tensors.BoundingBoxes)
601
602
603
        assert (output_bboxes[~is_within_crop_area] == 0).all()

        output_masks = output[2]
604
        assert isinstance(output_masks, tv_tensors.Mask)
605
606
607


class TestScaleJitter:
Philip Meier's avatar
Philip Meier committed
608
609
    def test__get_params(self):
        canvas_size = (24, 32)
610
611
612
613
        target_size = (16, 12)
        scale_range = (0.5, 1.5)

        transform = transforms.ScaleJitter(target_size=target_size, scale_range=scale_range)
Philip Meier's avatar
Philip Meier committed
614
615

        sample = make_image(canvas_size)
616
617
618
619
620
621
622
623
624
625
626
627

        n_samples = 5
        for _ in range(n_samples):

            params = transform._get_params([sample])

            assert "size" in params
            size = params["size"]

            assert isinstance(size, tuple) and len(size) == 2
            height, width = size

Philip Meier's avatar
Philip Meier committed
628
629
            r_min = min(target_size[1] / canvas_size[0], target_size[0] / canvas_size[1]) * scale_range[0]
            r_max = min(target_size[1] / canvas_size[0], target_size[0] / canvas_size[1]) * scale_range[1]
630

Philip Meier's avatar
Philip Meier committed
631
632
            assert int(canvas_size[0] * r_min) <= height <= int(canvas_size[0] * r_max)
            assert int(canvas_size[1] * r_min) <= width <= int(canvas_size[1] * r_max)
633
634
635
636


class TestRandomShortestSize:
    @pytest.mark.parametrize("min_size,max_size", [([5, 9], 20), ([5, 9], None)])
Philip Meier's avatar
Philip Meier committed
637
638
    def test__get_params(self, min_size, max_size):
        canvas_size = (3, 10)
639

640
        transform = transforms.RandomShortestSize(min_size=min_size, max_size=max_size, antialias=True)
641

Philip Meier's avatar
Philip Meier committed
642
        sample = make_image(canvas_size)
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
        params = transform._get_params([sample])

        assert "size" in params
        size = params["size"]

        assert isinstance(size, tuple) and len(size) == 2

        longer = max(size)
        shorter = min(size)
        if max_size is not None:
            assert longer <= max_size
            assert shorter <= max_size
        else:
            assert shorter in min_size


class TestLinearTransformation:
    def test_assertions(self):
        with pytest.raises(ValueError, match="transformation_matrix should be square"):
            transforms.LinearTransformation(torch.rand(2, 3), torch.rand(5))

        with pytest.raises(ValueError, match="mean_vector should have the same length"):
            transforms.LinearTransformation(torch.rand(3, 3), torch.rand(5))

    @pytest.mark.parametrize(
        "inpt",
        [
            122 * torch.ones(1, 3, 8, 8),
            122.0 * torch.ones(1, 3, 8, 8),
672
            tv_tensors.Image(122 * torch.ones(1, 3, 8, 8)),
673
674
675
676
677
678
679
680
681
682
            PIL.Image.new("RGB", (8, 8), (122, 122, 122)),
        ],
    )
    def test__transform(self, inpt):

        v = 121 * torch.ones(3 * 8 * 8)
        m = torch.ones(3 * 8 * 8, 3 * 8 * 8)
        transform = transforms.LinearTransformation(m, v)

        if isinstance(inpt, PIL.Image.Image):
683
            with pytest.raises(TypeError, match="does not support PIL images"):
684
685
686
687
688
689
690
691
692
693
694
695
696
                transform(inpt)
        else:
            output = transform(inpt)
            assert isinstance(output, torch.Tensor)
            assert output.unique() == 3 * 8 * 8
            assert output.dtype == inpt.dtype


class TestRandomResize:
    def test__get_params(self):
        min_size = 3
        max_size = 6

697
        transform = transforms.RandomResize(min_size=min_size, max_size=max_size, antialias=True)
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713

        for _ in range(10):
            params = transform._get_params([])

            assert isinstance(params["size"], list) and len(params["size"]) == 1
            size = params["size"][0]

            assert min_size <= size < max_size


class TestUniformTemporalSubsample:
    @pytest.mark.parametrize(
        "inpt",
        [
            torch.zeros(10, 3, 8, 8),
            torch.zeros(1, 10, 3, 8, 8),
714
            tv_tensors.Video(torch.zeros(1, 10, 3, 8, 8)),
715
716
717
718
719
720
721
722
723
724
725
726
        ],
    )
    def test__transform(self, inpt):
        num_samples = 5
        transform = transforms.UniformTemporalSubsample(num_samples)

        output = transform(inpt)
        assert type(output) is type(inpt)
        assert output.shape[-4] == num_samples
        assert output.dtype == inpt.dtype


727
@pytest.mark.parametrize("image_type", (PIL.Image, torch.Tensor, tv_tensors.Image))
728
729
@pytest.mark.parametrize("label_type", (torch.Tensor, int))
@pytest.mark.parametrize("dataset_return_type", (dict, tuple))
730
@pytest.mark.parametrize("to_tensor", (transforms.ToTensor, transforms.ToImage))
731
732
def test_classif_preset(image_type, label_type, dataset_return_type, to_tensor):

733
    image = tv_tensors.Image(torch.randint(0, 256, size=(1, 3, 250, 250), dtype=torch.uint8))
734
735
736
737
    if image_type is PIL.Image:
        image = to_pil_image(image[0])
    elif image_type is torch.Tensor:
        image = image.as_subclass(torch.Tensor)
738
        assert is_pure_tensor(image)
739
740
741
742
743
744
745
746
747
748
749

    label = 1 if label_type is int else torch.tensor([1])

    if dataset_return_type is dict:
        sample = {
            "image": image,
            "label": label,
        }
    else:
        sample = image, label

750
751
752
753
754
755
    if to_tensor is transforms.ToTensor:
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            to_tensor = to_tensor()
    else:
        to_tensor = to_tensor()

756
757
    t = transforms.Compose(
        [
758
            transforms.RandomResizedCrop((224, 224), antialias=True),
759
760
761
762
763
            transforms.RandomHorizontalFlip(p=1),
            transforms.RandAugment(),
            transforms.TrivialAugmentWide(),
            transforms.AugMix(),
            transforms.AutoAugment(),
764
            to_tensor,
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
            # TODO: ConvertImageDtype is a pass-through on PIL images, is that
            # intended?  This results in a failure if we convert to tensor after
            # it, because the image would still be uint8 which make Normalize
            # fail.
            transforms.ConvertImageDtype(torch.float),
            transforms.Normalize(mean=[0, 0, 0], std=[1, 1, 1]),
            transforms.RandomErasing(p=1),
        ]
    )

    out = t(sample)

    assert type(out) == type(sample)

    if dataset_return_type is tuple:
        out_image, out_label = out
    else:
        assert out.keys() == sample.keys()
        out_image, out_label = out.values()

    assert out_image.shape[-2:] == (224, 224)
    assert out_label == label


789
@pytest.mark.parametrize("image_type", (PIL.Image, torch.Tensor, tv_tensors.Image))
790
@pytest.mark.parametrize("data_augmentation", ("hflip", "lsj", "multiscale", "ssd", "ssdlite"))
791
@pytest.mark.parametrize("to_tensor", (transforms.ToTensor, transforms.ToImage))
792
793
794
@pytest.mark.parametrize("sanitize", (True, False))
def test_detection_preset(image_type, data_augmentation, to_tensor, sanitize):
    torch.manual_seed(0)
795
796
797
798
799
800
801

    if to_tensor is transforms.ToTensor:
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            to_tensor = to_tensor()
    else:
        to_tensor = to_tensor()

802
803
804
    if data_augmentation == "hflip":
        t = [
            transforms.RandomHorizontalFlip(p=1),
805
            to_tensor,
806
807
808
809
810
811
812
813
814
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "lsj":
        t = [
            transforms.ScaleJitter(target_size=(1024, 1024), antialias=True),
            # Note: replaced FixedSizeCrop with RandomCrop, becuase we're
            # leaving FixedSizeCrop in prototype for now, and it expects Label
            # classes which we won't release yet.
            # transforms.FixedSizeCrop(
815
            #     size=(1024, 1024), fill=defaultdict(lambda: (123.0, 117.0, 104.0), {tv_tensors.Mask: 0})
816
817
818
            # ),
            transforms.RandomCrop((1024, 1024), pad_if_needed=True),
            transforms.RandomHorizontalFlip(p=1),
819
            to_tensor,
820
821
822
823
824
825
826
827
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "multiscale":
        t = [
            transforms.RandomShortestSize(
                min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333, antialias=True
            ),
            transforms.RandomHorizontalFlip(p=1),
828
            to_tensor,
829
830
831
832
833
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "ssd":
        t = [
            transforms.RandomPhotometricDistort(p=1),
834
            transforms.RandomZoomOut(fill={"others": (123.0, 117.0, 104.0), tv_tensors.Mask: 0}, p=1),
835
836
            transforms.RandomIoUCrop(),
            transforms.RandomHorizontalFlip(p=1),
837
            to_tensor,
838
839
840
841
842
843
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "ssdlite":
        t = [
            transforms.RandomIoUCrop(),
            transforms.RandomHorizontalFlip(p=1),
844
            to_tensor,
845
846
847
            transforms.ConvertImageDtype(torch.float),
        ]
    if sanitize:
848
        t += [transforms.SanitizeBoundingBoxes()]
849
850
851
852
853
    t = transforms.Compose(t)

    num_boxes = 5
    H = W = 250

854
    image = tv_tensors.Image(torch.randint(0, 256, size=(1, 3, H, W), dtype=torch.uint8))
855
856
857
858
    if image_type is PIL.Image:
        image = to_pil_image(image[0])
    elif image_type is torch.Tensor:
        image = image.as_subclass(torch.Tensor)
859
        assert is_pure_tensor(image)
860
861
862
863
864
865

    label = torch.randint(0, 10, size=(num_boxes,))

    boxes = torch.randint(0, min(H, W) // 2, size=(num_boxes, 4))
    boxes[:, 2:] += boxes[:, :2]
    boxes = boxes.clamp(min=0, max=min(H, W))
866
    boxes = tv_tensors.BoundingBoxes(boxes, format="XYXY", canvas_size=(H, W))
867

868
    masks = tv_tensors.Mask(torch.randint(0, 2, size=(num_boxes, H, W), dtype=torch.uint8))
869
870
871
872
873
874
875
876
877
878

    sample = {
        "image": image,
        "label": label,
        "boxes": boxes,
        "masks": masks,
    }

    out = t(sample)

879
    if isinstance(to_tensor, transforms.ToTensor) and image_type is not tv_tensors.Image:
880
        assert is_pure_tensor(out["image"])
881
    else:
882
        assert isinstance(out["image"], tv_tensors.Image)
883
884
885
886
887
888
    assert isinstance(out["label"], type(sample["label"]))

    num_boxes_expected = {
        # ssd and ssdlite contain RandomIoUCrop which may "remove" some bbox. It
        # doesn't remove them strictly speaking, it just marks some boxes as
        # degenerate and those boxes will be later removed by
889
        # SanitizeBoundingBoxes(), which we add to the pipelines if the sanitize
890
891
892
        # param is True.
        # Note that the values below are probably specific to the random seed
        # set above (which is fine).
893
        (True, "ssd"): 5,
894
895
896
897
898
899
900
        (True, "ssdlite"): 4,
    }.get((sanitize, data_augmentation), num_boxes)

    assert out["boxes"].shape[0] == out["masks"].shape[0] == out["label"].shape[0] == num_boxes_expected


@pytest.mark.parametrize("min_size", (1, 10))
901
@pytest.mark.parametrize("labels_getter", ("default", lambda inputs: inputs["labels"], None, lambda inputs: None))
902
903
904
905
906
907
908
909
@pytest.mark.parametrize("sample_type", (tuple, dict))
def test_sanitize_bounding_boxes(min_size, labels_getter, sample_type):

    if sample_type is tuple and not isinstance(labels_getter, str):
        # The "lambda inputs: inputs["labels"]" labels_getter used in this test
        # doesn't work if the input is a tuple.
        return

910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
    H, W = 256, 128

    boxes_and_validity = [
        ([0, 1, 10, 1], False),  # Y1 == Y2
        ([0, 1, 0, 20], False),  # X1 == X2
        ([0, 0, min_size - 1, 10], False),  # H < min_size
        ([0, 0, 10, min_size - 1], False),  # W < min_size
        ([0, 0, 10, H + 1], False),  # Y2 > H
        ([0, 0, W + 1, 10], False),  # X2 > W
        ([-1, 1, 10, 20], False),  # any < 0
        ([0, 0, -1, 20], False),  # any < 0
        ([0, 0, -10, -1], False),  # any < 0
        ([0, 0, min_size, 10], True),  # H < min_size
        ([0, 0, 10, min_size], True),  # W < min_size
        ([0, 0, W, H], True),  # TODO: Is that actually OK?? Should it be -1?
        ([1, 1, 30, 20], True),
        ([0, 0, 10, 10], True),
        ([1, 1, 30, 20], True),
    ]

    random.shuffle(boxes_and_validity)  # For test robustness: mix order of wrong and correct cases
    boxes, is_valid_mask = zip(*boxes_and_validity)
    valid_indices = [i for (i, is_valid) in enumerate(is_valid_mask) if is_valid]

    boxes = torch.tensor(boxes)
    labels = torch.arange(boxes.shape[0])

937
    boxes = tv_tensors.BoundingBoxes(
938
        boxes,
939
        format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
940
        canvas_size=(H, W),
941
942
    )

943
    masks = tv_tensors.Mask(torch.randint(0, 2, size=(boxes.shape[0], H, W)))
944
945
    whatever = torch.rand(10)
    input_img = torch.randint(0, 256, size=(1, 3, H, W), dtype=torch.uint8)
946
    sample = {
947
        "image": input_img,
948
949
        "labels": labels,
        "boxes": boxes,
950
        "whatever": whatever,
951
952
953
954
        "None": None,
        "masks": masks,
    }

955
956
957
958
    if sample_type is tuple:
        img = sample.pop("image")
        sample = (img, sample)

959
    out = transforms.SanitizeBoundingBoxes(min_size=min_size, labels_getter=labels_getter)(sample)
960

961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
    if sample_type is tuple:
        out_image = out[0]
        out_labels = out[1]["labels"]
        out_boxes = out[1]["boxes"]
        out_masks = out[1]["masks"]
        out_whatever = out[1]["whatever"]
    else:
        out_image = out["image"]
        out_labels = out["labels"]
        out_boxes = out["boxes"]
        out_masks = out["masks"]
        out_whatever = out["whatever"]

    assert out_image is input_img
    assert out_whatever is whatever
976

977
978
    assert isinstance(out_boxes, tv_tensors.BoundingBoxes)
    assert isinstance(out_masks, tv_tensors.Mask)
979

980
    if labels_getter is None or (callable(labels_getter) and labels_getter({"labels": "blah"}) is None):
981
        assert out_labels is labels
982
    else:
983
984
        assert isinstance(out_labels, torch.Tensor)
        assert out_boxes.shape[0] == out_labels.shape[0] == out_masks.shape[0]
985
        # This works because we conveniently set labels to arange(num_boxes)
986
        assert out_labels.tolist() == valid_indices
987
988


989
990
991
992
993
994
995
996
997
998
def test_sanitize_bounding_boxes_no_label():
    # Non-regression test for https://github.com/pytorch/vision/issues/7878

    img = make_image()
    boxes = make_bounding_boxes()

    with pytest.raises(ValueError, match="or a two-tuple whose second item is a dict"):
        transforms.SanitizeBoundingBoxes()(img, boxes)

    out_img, out_boxes = transforms.SanitizeBoundingBoxes(labels_getter=None)(img, boxes)
999
1000
    assert isinstance(out_img, tv_tensors.Image)
    assert isinstance(out_boxes, tv_tensors.BoundingBoxes)
1001
1002


1003
1004
def test_sanitize_bounding_boxes_errors():

1005
    good_bbox = tv_tensors.BoundingBoxes(
1006
        [[0, 0, 10, 10]],
1007
        format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
1008
        canvas_size=(20, 20),
1009
1010
1011
    )

    with pytest.raises(ValueError, match="min_size must be >= 1"):
1012
        transforms.SanitizeBoundingBoxes(min_size=0)
1013
    with pytest.raises(ValueError, match="labels_getter should either be 'default'"):
1014
        transforms.SanitizeBoundingBoxes(labels_getter=12)
1015
1016
1017

    with pytest.raises(ValueError, match="Could not infer where the labels are"):
        bad_labels_key = {"bbox": good_bbox, "BAD_KEY": torch.arange(good_bbox.shape[0])}
1018
        transforms.SanitizeBoundingBoxes()(bad_labels_key)
1019
1020
1021

    with pytest.raises(ValueError, match="must be a tensor"):
        not_a_tensor = {"bbox": good_bbox, "labels": torch.arange(good_bbox.shape[0]).tolist()}
1022
        transforms.SanitizeBoundingBoxes()(not_a_tensor)
1023
1024
1025

    with pytest.raises(ValueError, match="Number of boxes"):
        different_sizes = {"bbox": good_bbox, "labels": torch.arange(good_bbox.shape[0] + 3)}
1026
        transforms.SanitizeBoundingBoxes()(different_sizes)
1027

1028

Philip Meier's avatar
Philip Meier committed
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
class TestLambda:
    inputs = pytest.mark.parametrize("input", [object(), torch.empty(()), np.empty(()), "string", 1, 0.0])

    @inputs
    def test_default(self, input):
        was_applied = False

        def was_applied_fn(input):
            nonlocal was_applied
            was_applied = True
            return input

        transform = transforms.Lambda(was_applied_fn)

        transform(input)

        assert was_applied

    @inputs
    def test_with_types(self, input):
        was_applied = False

        def was_applied_fn(input):
            nonlocal was_applied
            was_applied = True
            return input

        types = (torch.Tensor, np.ndarray)
        transform = transforms.Lambda(was_applied_fn, *types)

        transform(input)

        assert was_applied is isinstance(input, types)