test_transforms_v2.py 39.3 KB
Newer Older
1
2
import itertools
import pathlib
3
import pickle
4
5
6
7
8
9
10
11
12
import random

import numpy as np

import PIL.Image
import pytest
import torch
import torchvision.transforms.v2 as transforms

13
from common_utils import assert_equal, cpu_and_cuda
14
from torch.utils._pytree import tree_flatten, tree_unflatten
15
from torchvision import tv_tensors
16
17
18
from torchvision.ops.boxes import box_iou
from torchvision.transforms.functional import to_pil_image
from torchvision.transforms.v2 import functional as F
Nicolas Hug's avatar
Nicolas Hug committed
19
from torchvision.transforms.v2._utils import check_type, is_pure_tensor, query_chw
20
from transforms_v2_legacy_utils import (
21
22
23
24
    make_bounding_boxes,
    make_detection_mask,
    make_image,
    make_images,
25
    make_multiple_bounding_boxes,
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
    make_segmentation_mask,
    make_video,
    make_videos,
)


def make_vanilla_tensor_images(*args, **kwargs):
    for image in make_images(*args, **kwargs):
        if image.ndim > 3:
            continue
        yield image.data


def make_pil_images(*args, **kwargs):
    for image in make_vanilla_tensor_images(*args, **kwargs):
        yield to_pil_image(image)


def make_vanilla_tensor_bounding_boxes(*args, **kwargs):
45
    for bounding_boxes in make_multiple_bounding_boxes(*args, **kwargs):
46
        yield bounding_boxes.data
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67


def parametrize(transforms_with_inputs):
    return pytest.mark.parametrize(
        ("transform", "input"),
        [
            pytest.param(
                transform,
                input,
                id=f"{type(transform).__name__}-{type(input).__module__}.{type(input).__name__}-{idx}",
            )
            for transform, inputs in transforms_with_inputs
            for idx, input in enumerate(inputs)
        ],
    )


def auto_augment_adapter(transform, input, device):
    adapted_input = {}
    image_or_video_found = False
    for key, value in input.items():
68
        if isinstance(value, (tv_tensors.BoundingBoxes, tv_tensors.Mask)):
69
70
            # AA transforms don't support bounding boxes or masks
            continue
71
        elif check_type(value, (tv_tensors.Image, tv_tensors.Video, is_pure_tensor, PIL.Image.Image)):
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
            if image_or_video_found:
                # AA transforms only support a single image or video
                continue
            image_or_video_found = True
        adapted_input[key] = value
    return adapted_input


def linear_transformation_adapter(transform, input, device):
    flat_inputs = list(input.values())
    c, h, w = query_chw(
        [
            item
            for item, needs_transform in zip(flat_inputs, transforms.Transform()._needs_transform_list(flat_inputs))
            if needs_transform
        ]
    )
    num_elements = c * h * w
    transform.transformation_matrix = torch.randn((num_elements, num_elements), device=device)
    transform.mean_vector = torch.randn((num_elements,), device=device)
    return {key: value for key, value in input.items() if not isinstance(value, PIL.Image.Image)}


def normalize_adapter(transform, input, device):
    adapted_input = {}
    for key, value in input.items():
        if isinstance(value, PIL.Image.Image):
            # normalize doesn't support PIL images
            continue
101
        elif check_type(value, (tv_tensors.Image, tv_tensors.Video, is_pure_tensor)):
102
            # normalize doesn't support integer images
103
            value = F.to_dtype(value, torch.float32, scale=True)
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
        adapted_input[key] = value
    return adapted_input


class TestSmoke:
    @pytest.mark.parametrize(
        ("transform", "adapter"),
        [
            (transforms.RandomErasing(p=1.0), None),
            (transforms.AugMix(), auto_augment_adapter),
            (transforms.AutoAugment(), auto_augment_adapter),
            (transforms.RandAugment(), auto_augment_adapter),
            (transforms.TrivialAugmentWide(), auto_augment_adapter),
            (transforms.ColorJitter(brightness=0.1, contrast=0.2, saturation=0.3, hue=0.15), None),
            (transforms.RandomAdjustSharpness(sharpness_factor=0.5, p=1.0), None),
            (transforms.RandomAutocontrast(p=1.0), None),
            (transforms.RandomEqualize(p=1.0), None),
            (transforms.RandomInvert(p=1.0), None),
122
            (transforms.RandomChannelPermutation(), None),
123
124
125
126
127
128
129
130
131
132
            (transforms.RandomPhotometricDistort(p=1.0), None),
            (transforms.RandomPosterize(bits=4, p=1.0), None),
            (transforms.RandomSolarize(threshold=0.5, p=1.0), None),
            (transforms.CenterCrop([16, 16]), None),
            (transforms.ElasticTransform(sigma=1.0), None),
            (transforms.Pad(4), None),
            (transforms.RandomAffine(degrees=30.0), None),
            (transforms.RandomCrop([16, 16], pad_if_needed=True), None),
            (transforms.RandomHorizontalFlip(p=1.0), None),
            (transforms.RandomPerspective(p=1.0), None),
133
134
            (transforms.RandomResize(min_size=10, max_size=20, antialias=True), None),
            (transforms.RandomResizedCrop([16, 16], antialias=True), None),
135
            (transforms.RandomRotation(degrees=30), None),
136
            (transforms.RandomShortestSize(min_size=10, antialias=True), None),
137
138
            (transforms.RandomVerticalFlip(p=1.0), None),
            (transforms.Resize([16, 16], antialias=True), None),
139
            (transforms.ScaleJitter((16, 16), scale_range=(0.8, 1.2), antialias=True), None),
140
            (transforms.ClampBoundingBoxes(), None),
141
            (transforms.ConvertBoundingBoxFormat(tv_tensors.BoundingBoxFormat.CXCYWH), None),
142
            (transforms.ConvertImageDtype(), None),
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
            (transforms.GaussianBlur(kernel_size=3), None),
            (
                transforms.LinearTransformation(
                    # These are just dummy values that will be filled by the adapter. We can't define them upfront,
                    # because for we neither know the spatial size nor the device at this point
                    transformation_matrix=torch.empty((1, 1)),
                    mean_vector=torch.empty((1,)),
                ),
                linear_transformation_adapter,
            ),
            (transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), normalize_adapter),
            (transforms.ToDtype(torch.float64), None),
            (transforms.UniformTemporalSubsample(num_samples=2), None),
        ],
        ids=lambda transform: type(transform).__name__,
    )
    @pytest.mark.parametrize("container_type", [dict, list, tuple])
    @pytest.mark.parametrize(
        "image_or_video",
        [
            make_image(),
            make_video(),
            next(make_pil_images(color_spaces=["RGB"])),
            next(make_vanilla_tensor_images()),
        ],
    )
169
    @pytest.mark.parametrize("de_serialize", [lambda t: t, lambda t: pickle.loads(pickle.dumps(t))])
170
    @pytest.mark.parametrize("device", cpu_and_cuda())
171
172
173
    def test_common(self, transform, adapter, container_type, image_or_video, de_serialize, device):
        transform = de_serialize(transform)

Philip Meier's avatar
Philip Meier committed
174
        canvas_size = F.get_size(image_or_video)
175
176
        input = dict(
            image_or_video=image_or_video,
177
178
            image_tv_tensor=make_image(size=canvas_size),
            video_tv_tensor=make_video(size=canvas_size),
Philip Meier's avatar
Philip Meier committed
179
            image_pil=next(make_pil_images(sizes=[canvas_size], color_spaces=["RGB"])),
180
            bounding_boxes_xyxy=make_bounding_boxes(
181
                format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(3,)
182
            ),
183
            bounding_boxes_xywh=make_bounding_boxes(
184
                format=tv_tensors.BoundingBoxFormat.XYWH, canvas_size=canvas_size, batch_dims=(4,)
185
            ),
186
            bounding_boxes_cxcywh=make_bounding_boxes(
187
                format=tv_tensors.BoundingBoxFormat.CXCYWH, canvas_size=canvas_size, batch_dims=(5,)
188
            ),
189
            bounding_boxes_degenerate_xyxy=tv_tensors.BoundingBoxes(
190
191
192
193
194
195
196
197
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [2, 0, 1, 1],  # x1 > x2, y1 < y2
                    [0, 2, 1, 1],  # x1 < x2, y1 > y2
                    [2, 2, 1, 1],  # x1 > x2, y1 > y2
                ],
198
                format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
199
                canvas_size=canvas_size,
200
            ),
201
            bounding_boxes_degenerate_xywh=tv_tensors.BoundingBoxes(
202
203
204
205
206
207
208
209
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [0, 0, 1, -1],  # negative height
                    [0, 0, -1, 1],  # negative width
                    [0, 0, -1, -1],  # negative height and width
                ],
210
                format=tv_tensors.BoundingBoxFormat.XYWH,
Philip Meier's avatar
Philip Meier committed
211
                canvas_size=canvas_size,
212
            ),
213
            bounding_boxes_degenerate_cxcywh=tv_tensors.BoundingBoxes(
214
215
216
217
218
219
220
221
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [0, 0, 1, -1],  # negative height
                    [0, 0, -1, 1],  # negative width
                    [0, 0, -1, -1],  # negative height and width
                ],
222
                format=tv_tensors.BoundingBoxFormat.CXCYWH,
Philip Meier's avatar
Philip Meier committed
223
                canvas_size=canvas_size,
224
            ),
Philip Meier's avatar
Philip Meier committed
225
226
            detection_mask=make_detection_mask(size=canvas_size),
            segmentation_mask=make_segmentation_mask(size=canvas_size),
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
            int=0,
            float=0.0,
            bool=True,
            none=None,
            str="str",
            path=pathlib.Path.cwd(),
            object=object(),
            tensor=torch.empty(5),
            array=np.empty(5),
        )
        if adapter is not None:
            input = adapter(transform, input, device)

        if container_type in {tuple, list}:
            input = container_type(input.values())

        input_flat, input_spec = tree_flatten(input)
        input_flat = [item.to(device) if isinstance(item, torch.Tensor) else item for item in input_flat]
        input = tree_unflatten(input_flat, input_spec)

        torch.manual_seed(0)
        output = transform(input)
        output_flat, output_spec = tree_flatten(output)

        assert output_spec == input_spec

        for output_item, input_item, should_be_transformed in zip(
            output_flat, input_flat, transforms.Transform()._needs_transform_list(input_flat)
        ):
            if should_be_transformed:
                assert type(output_item) is type(input_item)
            else:
                assert output_item is input_item

261
            if isinstance(input_item, tv_tensors.BoundingBoxes) and not isinstance(
262
263
264
265
266
267
268
                transform, transforms.ConvertBoundingBoxFormat
            ):
                assert output_item.format == input_item.format

        # Enforce that the transform does not turn a degenerate box marked by RandomIoUCrop (or any other future
        # transform that does this), back into a valid one.
        # TODO: we should test that against all degenerate boxes above
269
        for format in list(tv_tensors.BoundingBoxFormat):
270
            sample = dict(
271
                boxes=tv_tensors.BoundingBoxes([[0, 0, 0, 0]], format=format, canvas_size=(224, 244)),
272
273
                labels=torch.tensor([3]),
            )
274
            assert transforms.SanitizeBoundingBoxes()(sample)["boxes"].shape == (0, 4)
275
276
277
278
279
280
281
282
283
284
285
286
287

    @parametrize(
        [
            (
                transform,
                itertools.chain.from_iterable(
                    fn(
                        color_spaces=[
                            "GRAY",
                            "RGB",
                        ],
                        dtypes=[torch.uint8],
                        extra_dims=[(), (4,)],
288
                        **(dict(num_frames=[3]) if fn is make_videos else dict()),
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
                    )
                    for fn in [
                        make_images,
                        make_vanilla_tensor_images,
                        make_pil_images,
                        make_videos,
                    ]
                ),
            )
            for transform in (
                transforms.RandAugment(),
                transforms.TrivialAugmentWide(),
                transforms.AutoAugment(),
                transforms.AugMix(),
            )
        ]
    )
    def test_auto_augment(self, transform, input):
        transform(input)

    @parametrize(
        [
            (
                transforms.Normalize(mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
                itertools.chain.from_iterable(
                    fn(color_spaces=["RGB"], dtypes=[torch.float32])
                    for fn in [
                        make_images,
                        make_vanilla_tensor_images,
                        make_videos,
                    ]
                ),
            ),
        ]
    )
    def test_normalize(self, transform, input):
        transform(input)


@pytest.mark.parametrize(
    "flat_inputs",
    itertools.permutations(
        [
            next(make_vanilla_tensor_images()),
            next(make_vanilla_tensor_images()),
            next(make_pil_images()),
            make_image(),
            next(make_videos()),
        ],
        3,
    ),
)
341
342
def test_pure_tensor_heuristic(flat_inputs):
    def split_on_pure_tensor(to_split):
343
        # This takes a sequence that is structurally aligned with `flat_inputs` and splits its items into three parts:
344
345
        # 1. The first pure tensor. If none is present, this will be `None`
        # 2. A list of the remaining pure tensors
346
        # 3. A list of all other items
347
        pure_tensors = []
348
349
350
351
        others = []
        # Splitting always happens on the original `flat_inputs` to avoid any erroneous type changes by the transform to
        # affect the splitting.
        for item, inpt in zip(to_split, flat_inputs):
352
353
            (pure_tensors if is_pure_tensor(inpt) else others).append(item)
        return pure_tensors[0] if pure_tensors else None, pure_tensors[1:], others
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368

    class CopyCloneTransform(transforms.Transform):
        def _transform(self, inpt, params):
            return inpt.clone() if isinstance(inpt, torch.Tensor) else inpt.copy()

        @staticmethod
        def was_applied(output, inpt):
            identity = output is inpt
            if identity:
                return False

            # Make sure nothing fishy is going on
            assert_equal(output, inpt)
            return True

369
    first_pure_tensor_input, other_pure_tensor_inputs, other_inputs = split_on_pure_tensor(flat_inputs)
370
371
372
373

    transform = CopyCloneTransform()
    transformed_sample = transform(flat_inputs)

374
    first_pure_tensor_output, other_pure_tensor_outputs, other_outputs = split_on_pure_tensor(transformed_sample)
375

376
    if first_pure_tensor_input is not None:
377
        if other_inputs:
378
            assert not transform.was_applied(first_pure_tensor_output, first_pure_tensor_input)
379
        else:
380
            assert transform.was_applied(first_pure_tensor_output, first_pure_tensor_input)
381

382
    for output, inpt in zip(other_pure_tensor_outputs, other_pure_tensor_inputs):
383
384
385
386
387
388
389
390
391
        assert not transform.was_applied(output, inpt)

    for input, output in zip(other_inputs, other_outputs):
        assert transform.was_applied(output, input)


class TestElasticTransform:
    def test_assertions(self):

392
        with pytest.raises(TypeError, match="alpha should be a number or a sequence of numbers"):
393
394
            transforms.ElasticTransform({})

395
        with pytest.raises(ValueError, match="alpha is a sequence its length should be 1 or 2"):
396
397
            transforms.ElasticTransform([1.0, 2.0, 3.0])

398
        with pytest.raises(TypeError, match="sigma should be a number or a sequence of numbers"):
399
400
            transforms.ElasticTransform(1.0, {})

401
        with pytest.raises(ValueError, match="sigma is a sequence its length should be 1 or 2"):
402
403
404
405
406
            transforms.ElasticTransform(1.0, [1.0, 2.0, 3.0])

        with pytest.raises(TypeError, match="Got inappropriate fill arg"):
            transforms.ElasticTransform(1.0, 2.0, fill="abc")

Philip Meier's avatar
Philip Meier committed
407
    def test__get_params(self):
408
409
410
        alpha = 2.0
        sigma = 3.0
        transform = transforms.ElasticTransform(alpha, sigma)
Philip Meier's avatar
Philip Meier committed
411
412
413

        h, w = size = (24, 32)
        image = make_image(size)
414
415
416
417
418
419
420
421
422
423
424
425

        params = transform._get_params([image])

        displacement = params["displacement"]
        assert displacement.shape == (1, h, w, 2)
        assert (-alpha / w <= displacement[0, ..., 0]).all() and (displacement[0, ..., 0] <= alpha / w).all()
        assert (-alpha / h <= displacement[0, ..., 1]).all() and (displacement[0, ..., 1] <= alpha / h).all()


class TestTransform:
    @pytest.mark.parametrize(
        "inpt_type",
426
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
427
428
429
430
431
432
433
434
435
436
437
438
439
440
    )
    def test_check_transformed_types(self, inpt_type, mocker):
        # This test ensures that we correctly handle which types to transform and which to bypass
        t = transforms.Transform()
        inpt = mocker.MagicMock(spec=inpt_type)

        if inpt_type in (np.ndarray, str, int):
            output = t(inpt)
            assert output is inpt
        else:
            with pytest.raises(NotImplementedError):
                t(inpt)


441
class TestToImage:
442
443
    @pytest.mark.parametrize(
        "inpt_type",
444
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
445
446
447
    )
    def test__transform(self, inpt_type, mocker):
        fn = mocker.patch(
448
            "torchvision.transforms.v2.functional.to_image",
449
450
451
452
            return_value=torch.rand(1, 3, 8, 8),
        )

        inpt = mocker.MagicMock(spec=inpt_type)
453
        transform = transforms.ToImage()
454
        transform(inpt)
455
        if inpt_type in (tv_tensors.BoundingBoxes, tv_tensors.Image, str, int):
456
457
458
459
460
461
462
463
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt)


class TestToPILImage:
    @pytest.mark.parametrize(
        "inpt_type",
464
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
465
466
    )
    def test__transform(self, inpt_type, mocker):
467
        fn = mocker.patch("torchvision.transforms.v2.functional.to_pil_image")
468
469
470
471

        inpt = mocker.MagicMock(spec=inpt_type)
        transform = transforms.ToPILImage()
        transform(inpt)
472
        if inpt_type in (PIL.Image.Image, tv_tensors.BoundingBoxes, str, int):
473
474
475
476
477
478
479
480
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt, mode=transform.mode)


class TestToTensor:
    @pytest.mark.parametrize(
        "inpt_type",
481
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
482
483
484
485
486
487
488
489
    )
    def test__transform(self, inpt_type, mocker):
        fn = mocker.patch("torchvision.transforms.functional.to_tensor")

        inpt = mocker.MagicMock(spec=inpt_type)
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            transform = transforms.ToTensor()
        transform(inpt)
490
        if inpt_type in (tv_tensors.Image, torch.Tensor, tv_tensors.BoundingBoxes, str, int):
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt)


class TestContainers:
    @pytest.mark.parametrize("transform_cls", [transforms.Compose, transforms.RandomChoice, transforms.RandomOrder])
    def test_assertions(self, transform_cls):
        with pytest.raises(TypeError, match="Argument transforms should be a sequence of callables"):
            transform_cls(transforms.RandomCrop(28))

    @pytest.mark.parametrize("transform_cls", [transforms.Compose, transforms.RandomChoice, transforms.RandomOrder])
    @pytest.mark.parametrize(
        "trfms",
        [
            [transforms.Pad(2), transforms.RandomCrop(28)],
            [lambda x: 2.0 * x, transforms.Pad(2), transforms.RandomCrop(28)],
            [transforms.Pad(2), lambda x: 2.0 * x, transforms.RandomCrop(28)],
        ],
    )
    def test_ctor(self, transform_cls, trfms):
        c = transform_cls(trfms)
        inpt = torch.rand(1, 3, 32, 32)
        output = c(inpt)
        assert isinstance(output, torch.Tensor)
        assert output.ndim == 4


class TestRandomChoice:
    def test_assertions(self):
521
        with pytest.raises(ValueError, match="Length of p doesn't match the number of transforms"):
522
            transforms.RandomChoice([transforms.Pad(2), transforms.RandomCrop(28)], p=[1])
523
524
525


class TestRandomIoUCrop:
526
    @pytest.mark.parametrize("device", cpu_and_cuda())
527
    @pytest.mark.parametrize("options", [[0.5, 0.9], [2.0]])
Philip Meier's avatar
Philip Meier committed
528
529
530
    def test__get_params(self, device, options):
        orig_h, orig_w = size = (24, 32)
        image = make_image(size)
531
        bboxes = tv_tensors.BoundingBoxes(
532
533
            torch.tensor([[1, 1, 10, 10], [20, 20, 23, 23], [1, 20, 10, 23], [20, 1, 23, 10]]),
            format="XYXY",
Philip Meier's avatar
Philip Meier committed
534
            canvas_size=size,
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
            device=device,
        )
        sample = [image, bboxes]

        transform = transforms.RandomIoUCrop(sampler_options=options)

        n_samples = 5
        for _ in range(n_samples):

            params = transform._get_params(sample)

            if options == [2.0]:
                assert len(params) == 0
                return

            assert len(params["is_within_crop_area"]) > 0
            assert params["is_within_crop_area"].dtype == torch.bool

            assert int(transform.min_scale * orig_h) <= params["height"] <= int(transform.max_scale * orig_h)
            assert int(transform.min_scale * orig_w) <= params["width"] <= int(transform.max_scale * orig_w)

            left, top = params["left"], params["top"]
            new_h, new_w = params["height"], params["width"]
            ious = box_iou(
                bboxes,
                torch.tensor([[left, top, left + new_w, top + new_h]], dtype=bboxes.dtype, device=bboxes.device),
            )
            assert ious.max() >= options[0] or ious.max() >= options[1], f"{ious} vs {options}"

    def test__transform_empty_params(self, mocker):
        transform = transforms.RandomIoUCrop(sampler_options=[2.0])
566
567
        image = tv_tensors.Image(torch.rand(1, 3, 4, 4))
        bboxes = tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4))
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
        label = torch.tensor([1])
        sample = [image, bboxes, label]
        # Let's mock transform._get_params to control the output:
        transform._get_params = mocker.MagicMock(return_value={})
        output = transform(sample)
        torch.testing.assert_close(output, sample)

    def test_forward_assertion(self):
        transform = transforms.RandomIoUCrop()
        with pytest.raises(
            TypeError,
            match="requires input sample to contain tensor or PIL images and bounding boxes",
        ):
            transform(torch.tensor(0))

    def test__transform(self, mocker):
        transform = transforms.RandomIoUCrop()

Philip Meier's avatar
Philip Meier committed
586
587
        size = (32, 24)
        image = make_image(size)
588
        bboxes = make_bounding_boxes(format="XYXY", canvas_size=size, batch_dims=(6,))
Philip Meier's avatar
Philip Meier committed
589
        masks = make_detection_mask(size, num_objects=6)
590
591
592
593
594
595
596
597
598
599
600

        sample = [image, bboxes, masks]

        is_within_crop_area = torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool)

        params = dict(top=1, left=2, height=12, width=12, is_within_crop_area=is_within_crop_area)
        transform._get_params = mocker.MagicMock(return_value=params)
        output = transform(sample)

        # check number of bboxes vs number of labels:
        output_bboxes = output[1]
601
        assert isinstance(output_bboxes, tv_tensors.BoundingBoxes)
602
603
604
        assert (output_bboxes[~is_within_crop_area] == 0).all()

        output_masks = output[2]
605
        assert isinstance(output_masks, tv_tensors.Mask)
606
607
608


class TestScaleJitter:
Philip Meier's avatar
Philip Meier committed
609
610
    def test__get_params(self):
        canvas_size = (24, 32)
611
612
613
614
        target_size = (16, 12)
        scale_range = (0.5, 1.5)

        transform = transforms.ScaleJitter(target_size=target_size, scale_range=scale_range)
Philip Meier's avatar
Philip Meier committed
615
616

        sample = make_image(canvas_size)
617
618
619
620
621
622
623
624
625
626
627
628

        n_samples = 5
        for _ in range(n_samples):

            params = transform._get_params([sample])

            assert "size" in params
            size = params["size"]

            assert isinstance(size, tuple) and len(size) == 2
            height, width = size

Philip Meier's avatar
Philip Meier committed
629
630
            r_min = min(target_size[1] / canvas_size[0], target_size[0] / canvas_size[1]) * scale_range[0]
            r_max = min(target_size[1] / canvas_size[0], target_size[0] / canvas_size[1]) * scale_range[1]
631

Philip Meier's avatar
Philip Meier committed
632
633
            assert int(canvas_size[0] * r_min) <= height <= int(canvas_size[0] * r_max)
            assert int(canvas_size[1] * r_min) <= width <= int(canvas_size[1] * r_max)
634
635
636
637


class TestRandomShortestSize:
    @pytest.mark.parametrize("min_size,max_size", [([5, 9], 20), ([5, 9], None)])
Philip Meier's avatar
Philip Meier committed
638
639
    def test__get_params(self, min_size, max_size):
        canvas_size = (3, 10)
640

641
        transform = transforms.RandomShortestSize(min_size=min_size, max_size=max_size, antialias=True)
642

Philip Meier's avatar
Philip Meier committed
643
        sample = make_image(canvas_size)
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
        params = transform._get_params([sample])

        assert "size" in params
        size = params["size"]

        assert isinstance(size, tuple) and len(size) == 2

        longer = max(size)
        shorter = min(size)
        if max_size is not None:
            assert longer <= max_size
            assert shorter <= max_size
        else:
            assert shorter in min_size


class TestLinearTransformation:
    def test_assertions(self):
        with pytest.raises(ValueError, match="transformation_matrix should be square"):
            transforms.LinearTransformation(torch.rand(2, 3), torch.rand(5))

        with pytest.raises(ValueError, match="mean_vector should have the same length"):
            transforms.LinearTransformation(torch.rand(3, 3), torch.rand(5))

    @pytest.mark.parametrize(
        "inpt",
        [
            122 * torch.ones(1, 3, 8, 8),
            122.0 * torch.ones(1, 3, 8, 8),
673
            tv_tensors.Image(122 * torch.ones(1, 3, 8, 8)),
674
675
676
677
678
679
680
681
682
683
            PIL.Image.new("RGB", (8, 8), (122, 122, 122)),
        ],
    )
    def test__transform(self, inpt):

        v = 121 * torch.ones(3 * 8 * 8)
        m = torch.ones(3 * 8 * 8, 3 * 8 * 8)
        transform = transforms.LinearTransformation(m, v)

        if isinstance(inpt, PIL.Image.Image):
684
            with pytest.raises(TypeError, match="does not support PIL images"):
685
686
687
688
689
690
691
692
693
694
695
696
697
                transform(inpt)
        else:
            output = transform(inpt)
            assert isinstance(output, torch.Tensor)
            assert output.unique() == 3 * 8 * 8
            assert output.dtype == inpt.dtype


class TestRandomResize:
    def test__get_params(self):
        min_size = 3
        max_size = 6

698
        transform = transforms.RandomResize(min_size=min_size, max_size=max_size, antialias=True)
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714

        for _ in range(10):
            params = transform._get_params([])

            assert isinstance(params["size"], list) and len(params["size"]) == 1
            size = params["size"][0]

            assert min_size <= size < max_size


class TestUniformTemporalSubsample:
    @pytest.mark.parametrize(
        "inpt",
        [
            torch.zeros(10, 3, 8, 8),
            torch.zeros(1, 10, 3, 8, 8),
715
            tv_tensors.Video(torch.zeros(1, 10, 3, 8, 8)),
716
717
718
719
720
721
722
723
724
725
726
727
        ],
    )
    def test__transform(self, inpt):
        num_samples = 5
        transform = transforms.UniformTemporalSubsample(num_samples)

        output = transform(inpt)
        assert type(output) is type(inpt)
        assert output.shape[-4] == num_samples
        assert output.dtype == inpt.dtype


728
@pytest.mark.parametrize("image_type", (PIL.Image, torch.Tensor, tv_tensors.Image))
729
730
@pytest.mark.parametrize("label_type", (torch.Tensor, int))
@pytest.mark.parametrize("dataset_return_type", (dict, tuple))
731
@pytest.mark.parametrize("to_tensor", (transforms.ToTensor, transforms.ToImage))
732
733
def test_classif_preset(image_type, label_type, dataset_return_type, to_tensor):

734
    image = tv_tensors.Image(torch.randint(0, 256, size=(1, 3, 250, 250), dtype=torch.uint8))
735
736
737
738
    if image_type is PIL.Image:
        image = to_pil_image(image[0])
    elif image_type is torch.Tensor:
        image = image.as_subclass(torch.Tensor)
739
        assert is_pure_tensor(image)
740
741
742
743
744
745
746
747
748
749
750

    label = 1 if label_type is int else torch.tensor([1])

    if dataset_return_type is dict:
        sample = {
            "image": image,
            "label": label,
        }
    else:
        sample = image, label

751
752
753
754
755
756
    if to_tensor is transforms.ToTensor:
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            to_tensor = to_tensor()
    else:
        to_tensor = to_tensor()

757
758
    t = transforms.Compose(
        [
759
            transforms.RandomResizedCrop((224, 224), antialias=True),
760
761
762
763
764
            transforms.RandomHorizontalFlip(p=1),
            transforms.RandAugment(),
            transforms.TrivialAugmentWide(),
            transforms.AugMix(),
            transforms.AutoAugment(),
765
            to_tensor,
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
            # TODO: ConvertImageDtype is a pass-through on PIL images, is that
            # intended?  This results in a failure if we convert to tensor after
            # it, because the image would still be uint8 which make Normalize
            # fail.
            transforms.ConvertImageDtype(torch.float),
            transforms.Normalize(mean=[0, 0, 0], std=[1, 1, 1]),
            transforms.RandomErasing(p=1),
        ]
    )

    out = t(sample)

    assert type(out) == type(sample)

    if dataset_return_type is tuple:
        out_image, out_label = out
    else:
        assert out.keys() == sample.keys()
        out_image, out_label = out.values()

    assert out_image.shape[-2:] == (224, 224)
    assert out_label == label


790
@pytest.mark.parametrize("image_type", (PIL.Image, torch.Tensor, tv_tensors.Image))
791
@pytest.mark.parametrize("data_augmentation", ("hflip", "lsj", "multiscale", "ssd", "ssdlite"))
792
@pytest.mark.parametrize("to_tensor", (transforms.ToTensor, transforms.ToImage))
793
794
795
@pytest.mark.parametrize("sanitize", (True, False))
def test_detection_preset(image_type, data_augmentation, to_tensor, sanitize):
    torch.manual_seed(0)
796
797
798
799
800
801
802

    if to_tensor is transforms.ToTensor:
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            to_tensor = to_tensor()
    else:
        to_tensor = to_tensor()

803
804
805
    if data_augmentation == "hflip":
        t = [
            transforms.RandomHorizontalFlip(p=1),
806
            to_tensor,
807
808
809
810
811
812
813
814
815
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "lsj":
        t = [
            transforms.ScaleJitter(target_size=(1024, 1024), antialias=True),
            # Note: replaced FixedSizeCrop with RandomCrop, becuase we're
            # leaving FixedSizeCrop in prototype for now, and it expects Label
            # classes which we won't release yet.
            # transforms.FixedSizeCrop(
816
            #     size=(1024, 1024), fill=defaultdict(lambda: (123.0, 117.0, 104.0), {tv_tensors.Mask: 0})
817
818
819
            # ),
            transforms.RandomCrop((1024, 1024), pad_if_needed=True),
            transforms.RandomHorizontalFlip(p=1),
820
            to_tensor,
821
822
823
824
825
826
827
828
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "multiscale":
        t = [
            transforms.RandomShortestSize(
                min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333, antialias=True
            ),
            transforms.RandomHorizontalFlip(p=1),
829
            to_tensor,
830
831
832
833
834
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "ssd":
        t = [
            transforms.RandomPhotometricDistort(p=1),
835
            transforms.RandomZoomOut(fill={"others": (123.0, 117.0, 104.0), tv_tensors.Mask: 0}, p=1),
836
837
            transforms.RandomIoUCrop(),
            transforms.RandomHorizontalFlip(p=1),
838
            to_tensor,
839
840
841
842
843
844
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "ssdlite":
        t = [
            transforms.RandomIoUCrop(),
            transforms.RandomHorizontalFlip(p=1),
845
            to_tensor,
846
847
848
            transforms.ConvertImageDtype(torch.float),
        ]
    if sanitize:
849
        t += [transforms.SanitizeBoundingBoxes()]
850
851
852
853
854
    t = transforms.Compose(t)

    num_boxes = 5
    H = W = 250

855
    image = tv_tensors.Image(torch.randint(0, 256, size=(1, 3, H, W), dtype=torch.uint8))
856
857
858
859
    if image_type is PIL.Image:
        image = to_pil_image(image[0])
    elif image_type is torch.Tensor:
        image = image.as_subclass(torch.Tensor)
860
        assert is_pure_tensor(image)
861
862
863
864
865
866

    label = torch.randint(0, 10, size=(num_boxes,))

    boxes = torch.randint(0, min(H, W) // 2, size=(num_boxes, 4))
    boxes[:, 2:] += boxes[:, :2]
    boxes = boxes.clamp(min=0, max=min(H, W))
867
    boxes = tv_tensors.BoundingBoxes(boxes, format="XYXY", canvas_size=(H, W))
868

869
    masks = tv_tensors.Mask(torch.randint(0, 2, size=(num_boxes, H, W), dtype=torch.uint8))
870
871
872
873
874
875
876
877
878
879

    sample = {
        "image": image,
        "label": label,
        "boxes": boxes,
        "masks": masks,
    }

    out = t(sample)

880
    if isinstance(to_tensor, transforms.ToTensor) and image_type is not tv_tensors.Image:
881
        assert is_pure_tensor(out["image"])
882
    else:
883
        assert isinstance(out["image"], tv_tensors.Image)
884
885
886
887
888
889
    assert isinstance(out["label"], type(sample["label"]))

    num_boxes_expected = {
        # ssd and ssdlite contain RandomIoUCrop which may "remove" some bbox. It
        # doesn't remove them strictly speaking, it just marks some boxes as
        # degenerate and those boxes will be later removed by
890
        # SanitizeBoundingBoxes(), which we add to the pipelines if the sanitize
891
892
893
        # param is True.
        # Note that the values below are probably specific to the random seed
        # set above (which is fine).
894
        (True, "ssd"): 5,
895
896
897
898
899
900
901
        (True, "ssdlite"): 4,
    }.get((sanitize, data_augmentation), num_boxes)

    assert out["boxes"].shape[0] == out["masks"].shape[0] == out["label"].shape[0] == num_boxes_expected


@pytest.mark.parametrize("min_size", (1, 10))
902
@pytest.mark.parametrize("labels_getter", ("default", lambda inputs: inputs["labels"], None, lambda inputs: None))
903
904
905
906
907
908
909
910
@pytest.mark.parametrize("sample_type", (tuple, dict))
def test_sanitize_bounding_boxes(min_size, labels_getter, sample_type):

    if sample_type is tuple and not isinstance(labels_getter, str):
        # The "lambda inputs: inputs["labels"]" labels_getter used in this test
        # doesn't work if the input is a tuple.
        return

911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
    H, W = 256, 128

    boxes_and_validity = [
        ([0, 1, 10, 1], False),  # Y1 == Y2
        ([0, 1, 0, 20], False),  # X1 == X2
        ([0, 0, min_size - 1, 10], False),  # H < min_size
        ([0, 0, 10, min_size - 1], False),  # W < min_size
        ([0, 0, 10, H + 1], False),  # Y2 > H
        ([0, 0, W + 1, 10], False),  # X2 > W
        ([-1, 1, 10, 20], False),  # any < 0
        ([0, 0, -1, 20], False),  # any < 0
        ([0, 0, -10, -1], False),  # any < 0
        ([0, 0, min_size, 10], True),  # H < min_size
        ([0, 0, 10, min_size], True),  # W < min_size
        ([0, 0, W, H], True),  # TODO: Is that actually OK?? Should it be -1?
        ([1, 1, 30, 20], True),
        ([0, 0, 10, 10], True),
        ([1, 1, 30, 20], True),
    ]

    random.shuffle(boxes_and_validity)  # For test robustness: mix order of wrong and correct cases
    boxes, is_valid_mask = zip(*boxes_and_validity)
    valid_indices = [i for (i, is_valid) in enumerate(is_valid_mask) if is_valid]

    boxes = torch.tensor(boxes)
    labels = torch.arange(boxes.shape[0])

938
    boxes = tv_tensors.BoundingBoxes(
939
        boxes,
940
        format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
941
        canvas_size=(H, W),
942
943
    )

944
    masks = tv_tensors.Mask(torch.randint(0, 2, size=(boxes.shape[0], H, W)))
945
946
    whatever = torch.rand(10)
    input_img = torch.randint(0, 256, size=(1, 3, H, W), dtype=torch.uint8)
947
    sample = {
948
        "image": input_img,
949
950
        "labels": labels,
        "boxes": boxes,
951
        "whatever": whatever,
952
953
954
955
        "None": None,
        "masks": masks,
    }

956
957
958
959
    if sample_type is tuple:
        img = sample.pop("image")
        sample = (img, sample)

960
    out = transforms.SanitizeBoundingBoxes(min_size=min_size, labels_getter=labels_getter)(sample)
961

962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
    if sample_type is tuple:
        out_image = out[0]
        out_labels = out[1]["labels"]
        out_boxes = out[1]["boxes"]
        out_masks = out[1]["masks"]
        out_whatever = out[1]["whatever"]
    else:
        out_image = out["image"]
        out_labels = out["labels"]
        out_boxes = out["boxes"]
        out_masks = out["masks"]
        out_whatever = out["whatever"]

    assert out_image is input_img
    assert out_whatever is whatever
977

978
979
    assert isinstance(out_boxes, tv_tensors.BoundingBoxes)
    assert isinstance(out_masks, tv_tensors.Mask)
980

981
    if labels_getter is None or (callable(labels_getter) and labels_getter({"labels": "blah"}) is None):
982
        assert out_labels is labels
983
    else:
984
985
        assert isinstance(out_labels, torch.Tensor)
        assert out_boxes.shape[0] == out_labels.shape[0] == out_masks.shape[0]
986
        # This works because we conveniently set labels to arange(num_boxes)
987
        assert out_labels.tolist() == valid_indices
988
989


990
991
992
993
994
995
996
997
998
999
def test_sanitize_bounding_boxes_no_label():
    # Non-regression test for https://github.com/pytorch/vision/issues/7878

    img = make_image()
    boxes = make_bounding_boxes()

    with pytest.raises(ValueError, match="or a two-tuple whose second item is a dict"):
        transforms.SanitizeBoundingBoxes()(img, boxes)

    out_img, out_boxes = transforms.SanitizeBoundingBoxes(labels_getter=None)(img, boxes)
1000
1001
    assert isinstance(out_img, tv_tensors.Image)
    assert isinstance(out_boxes, tv_tensors.BoundingBoxes)
1002
1003


1004
1005
def test_sanitize_bounding_boxes_errors():

1006
    good_bbox = tv_tensors.BoundingBoxes(
1007
        [[0, 0, 10, 10]],
1008
        format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
1009
        canvas_size=(20, 20),
1010
1011
1012
    )

    with pytest.raises(ValueError, match="min_size must be >= 1"):
1013
        transforms.SanitizeBoundingBoxes(min_size=0)
1014
    with pytest.raises(ValueError, match="labels_getter should either be 'default'"):
1015
        transforms.SanitizeBoundingBoxes(labels_getter=12)
1016
1017
1018

    with pytest.raises(ValueError, match="Could not infer where the labels are"):
        bad_labels_key = {"bbox": good_bbox, "BAD_KEY": torch.arange(good_bbox.shape[0])}
1019
        transforms.SanitizeBoundingBoxes()(bad_labels_key)
1020
1021
1022

    with pytest.raises(ValueError, match="must be a tensor"):
        not_a_tensor = {"bbox": good_bbox, "labels": torch.arange(good_bbox.shape[0]).tolist()}
1023
        transforms.SanitizeBoundingBoxes()(not_a_tensor)
1024
1025
1026

    with pytest.raises(ValueError, match="Number of boxes"):
        different_sizes = {"bbox": good_bbox, "labels": torch.arange(good_bbox.shape[0] + 3)}
1027
        transforms.SanitizeBoundingBoxes()(different_sizes)
1028

1029

Philip Meier's avatar
Philip Meier committed
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
class TestLambda:
    inputs = pytest.mark.parametrize("input", [object(), torch.empty(()), np.empty(()), "string", 1, 0.0])

    @inputs
    def test_default(self, input):
        was_applied = False

        def was_applied_fn(input):
            nonlocal was_applied
            was_applied = True
            return input

        transform = transforms.Lambda(was_applied_fn)

        transform(input)

        assert was_applied

    @inputs
    def test_with_types(self, input):
        was_applied = False

        def was_applied_fn(input):
            nonlocal was_applied
            was_applied = True
            return input

        types = (torch.Tensor, np.ndarray)
        transform = transforms.Lambda(was_applied_fn, *types)

        transform(input)

        assert was_applied is isinstance(input, types)