test_transforms_v2.py 42.4 KB
Newer Older
1
2
import itertools
import pathlib
3
import pickle
4
5
6
7
8
9
10
11
12
13
import random
import warnings

import numpy as np

import PIL.Image
import pytest
import torch
import torchvision.transforms.v2 as transforms

14
from common_utils import assert_equal, cpu_and_cuda
15
from torch.utils._pytree import tree_flatten, tree_unflatten
16
from torchvision import tv_tensors
17
18
19
from torchvision.ops.boxes import box_iou
from torchvision.transforms.functional import to_pil_image
from torchvision.transforms.v2 import functional as F
Nicolas Hug's avatar
Nicolas Hug committed
20
from torchvision.transforms.v2._utils import check_type, is_pure_tensor, query_chw
21
from transforms_v2_legacy_utils import (
22
23
24
25
    make_bounding_boxes,
    make_detection_mask,
    make_image,
    make_images,
26
    make_multiple_bounding_boxes,
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
    make_segmentation_mask,
    make_video,
    make_videos,
)


def make_vanilla_tensor_images(*args, **kwargs):
    for image in make_images(*args, **kwargs):
        if image.ndim > 3:
            continue
        yield image.data


def make_pil_images(*args, **kwargs):
    for image in make_vanilla_tensor_images(*args, **kwargs):
        yield to_pil_image(image)


def make_vanilla_tensor_bounding_boxes(*args, **kwargs):
46
    for bounding_boxes in make_multiple_bounding_boxes(*args, **kwargs):
47
        yield bounding_boxes.data
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68


def parametrize(transforms_with_inputs):
    return pytest.mark.parametrize(
        ("transform", "input"),
        [
            pytest.param(
                transform,
                input,
                id=f"{type(transform).__name__}-{type(input).__module__}.{type(input).__name__}-{idx}",
            )
            for transform, inputs in transforms_with_inputs
            for idx, input in enumerate(inputs)
        ],
    )


def auto_augment_adapter(transform, input, device):
    adapted_input = {}
    image_or_video_found = False
    for key, value in input.items():
69
        if isinstance(value, (tv_tensors.BoundingBoxes, tv_tensors.Mask)):
70
71
            # AA transforms don't support bounding boxes or masks
            continue
72
        elif check_type(value, (tv_tensors.Image, tv_tensors.Video, is_pure_tensor, PIL.Image.Image)):
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
            if image_or_video_found:
                # AA transforms only support a single image or video
                continue
            image_or_video_found = True
        adapted_input[key] = value
    return adapted_input


def linear_transformation_adapter(transform, input, device):
    flat_inputs = list(input.values())
    c, h, w = query_chw(
        [
            item
            for item, needs_transform in zip(flat_inputs, transforms.Transform()._needs_transform_list(flat_inputs))
            if needs_transform
        ]
    )
    num_elements = c * h * w
    transform.transformation_matrix = torch.randn((num_elements, num_elements), device=device)
    transform.mean_vector = torch.randn((num_elements,), device=device)
    return {key: value for key, value in input.items() if not isinstance(value, PIL.Image.Image)}


def normalize_adapter(transform, input, device):
    adapted_input = {}
    for key, value in input.items():
        if isinstance(value, PIL.Image.Image):
            # normalize doesn't support PIL images
            continue
102
        elif check_type(value, (tv_tensors.Image, tv_tensors.Video, is_pure_tensor)):
103
            # normalize doesn't support integer images
104
            value = F.to_dtype(value, torch.float32, scale=True)
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
        adapted_input[key] = value
    return adapted_input


class TestSmoke:
    @pytest.mark.parametrize(
        ("transform", "adapter"),
        [
            (transforms.RandomErasing(p=1.0), None),
            (transforms.AugMix(), auto_augment_adapter),
            (transforms.AutoAugment(), auto_augment_adapter),
            (transforms.RandAugment(), auto_augment_adapter),
            (transforms.TrivialAugmentWide(), auto_augment_adapter),
            (transforms.ColorJitter(brightness=0.1, contrast=0.2, saturation=0.3, hue=0.15), None),
            (transforms.RandomAdjustSharpness(sharpness_factor=0.5, p=1.0), None),
            (transforms.RandomAutocontrast(p=1.0), None),
            (transforms.RandomEqualize(p=1.0), None),
            (transforms.RandomInvert(p=1.0), None),
123
            (transforms.RandomChannelPermutation(), None),
124
125
126
127
128
129
130
131
132
133
            (transforms.RandomPhotometricDistort(p=1.0), None),
            (transforms.RandomPosterize(bits=4, p=1.0), None),
            (transforms.RandomSolarize(threshold=0.5, p=1.0), None),
            (transforms.CenterCrop([16, 16]), None),
            (transforms.ElasticTransform(sigma=1.0), None),
            (transforms.Pad(4), None),
            (transforms.RandomAffine(degrees=30.0), None),
            (transforms.RandomCrop([16, 16], pad_if_needed=True), None),
            (transforms.RandomHorizontalFlip(p=1.0), None),
            (transforms.RandomPerspective(p=1.0), None),
134
135
            (transforms.RandomResize(min_size=10, max_size=20, antialias=True), None),
            (transforms.RandomResizedCrop([16, 16], antialias=True), None),
136
            (transforms.RandomRotation(degrees=30), None),
137
            (transforms.RandomShortestSize(min_size=10, antialias=True), None),
138
139
140
            (transforms.RandomVerticalFlip(p=1.0), None),
            (transforms.RandomZoomOut(p=1.0), None),
            (transforms.Resize([16, 16], antialias=True), None),
141
            (transforms.ScaleJitter((16, 16), scale_range=(0.8, 1.2), antialias=True), None),
142
            (transforms.ClampBoundingBoxes(), None),
143
            (transforms.ConvertBoundingBoxFormat(tv_tensors.BoundingBoxFormat.CXCYWH), None),
144
            (transforms.ConvertImageDtype(), None),
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
            (transforms.GaussianBlur(kernel_size=3), None),
            (
                transforms.LinearTransformation(
                    # These are just dummy values that will be filled by the adapter. We can't define them upfront,
                    # because for we neither know the spatial size nor the device at this point
                    transformation_matrix=torch.empty((1, 1)),
                    mean_vector=torch.empty((1,)),
                ),
                linear_transformation_adapter,
            ),
            (transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), normalize_adapter),
            (transforms.ToDtype(torch.float64), None),
            (transforms.UniformTemporalSubsample(num_samples=2), None),
        ],
        ids=lambda transform: type(transform).__name__,
    )
    @pytest.mark.parametrize("container_type", [dict, list, tuple])
    @pytest.mark.parametrize(
        "image_or_video",
        [
            make_image(),
            make_video(),
            next(make_pil_images(color_spaces=["RGB"])),
            next(make_vanilla_tensor_images()),
        ],
    )
171
    @pytest.mark.parametrize("de_serialize", [lambda t: t, lambda t: pickle.loads(pickle.dumps(t))])
172
    @pytest.mark.parametrize("device", cpu_and_cuda())
173
174
175
    def test_common(self, transform, adapter, container_type, image_or_video, de_serialize, device):
        transform = de_serialize(transform)

Philip Meier's avatar
Philip Meier committed
176
        canvas_size = F.get_size(image_or_video)
177
178
        input = dict(
            image_or_video=image_or_video,
179
180
            image_tv_tensor=make_image(size=canvas_size),
            video_tv_tensor=make_video(size=canvas_size),
Philip Meier's avatar
Philip Meier committed
181
            image_pil=next(make_pil_images(sizes=[canvas_size], color_spaces=["RGB"])),
182
            bounding_boxes_xyxy=make_bounding_boxes(
183
                format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(3,)
184
            ),
185
            bounding_boxes_xywh=make_bounding_boxes(
186
                format=tv_tensors.BoundingBoxFormat.XYWH, canvas_size=canvas_size, batch_dims=(4,)
187
            ),
188
            bounding_boxes_cxcywh=make_bounding_boxes(
189
                format=tv_tensors.BoundingBoxFormat.CXCYWH, canvas_size=canvas_size, batch_dims=(5,)
190
            ),
191
            bounding_boxes_degenerate_xyxy=tv_tensors.BoundingBoxes(
192
193
194
195
196
197
198
199
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [2, 0, 1, 1],  # x1 > x2, y1 < y2
                    [0, 2, 1, 1],  # x1 < x2, y1 > y2
                    [2, 2, 1, 1],  # x1 > x2, y1 > y2
                ],
200
                format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
201
                canvas_size=canvas_size,
202
            ),
203
            bounding_boxes_degenerate_xywh=tv_tensors.BoundingBoxes(
204
205
206
207
208
209
210
211
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [0, 0, 1, -1],  # negative height
                    [0, 0, -1, 1],  # negative width
                    [0, 0, -1, -1],  # negative height and width
                ],
212
                format=tv_tensors.BoundingBoxFormat.XYWH,
Philip Meier's avatar
Philip Meier committed
213
                canvas_size=canvas_size,
214
            ),
215
            bounding_boxes_degenerate_cxcywh=tv_tensors.BoundingBoxes(
216
217
218
219
220
221
222
223
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [0, 0, 1, -1],  # negative height
                    [0, 0, -1, 1],  # negative width
                    [0, 0, -1, -1],  # negative height and width
                ],
224
                format=tv_tensors.BoundingBoxFormat.CXCYWH,
Philip Meier's avatar
Philip Meier committed
225
                canvas_size=canvas_size,
226
            ),
Philip Meier's avatar
Philip Meier committed
227
228
            detection_mask=make_detection_mask(size=canvas_size),
            segmentation_mask=make_segmentation_mask(size=canvas_size),
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
            int=0,
            float=0.0,
            bool=True,
            none=None,
            str="str",
            path=pathlib.Path.cwd(),
            object=object(),
            tensor=torch.empty(5),
            array=np.empty(5),
        )
        if adapter is not None:
            input = adapter(transform, input, device)

        if container_type in {tuple, list}:
            input = container_type(input.values())

        input_flat, input_spec = tree_flatten(input)
        input_flat = [item.to(device) if isinstance(item, torch.Tensor) else item for item in input_flat]
        input = tree_unflatten(input_flat, input_spec)

        torch.manual_seed(0)
        output = transform(input)
        output_flat, output_spec = tree_flatten(output)

        assert output_spec == input_spec

        for output_item, input_item, should_be_transformed in zip(
            output_flat, input_flat, transforms.Transform()._needs_transform_list(input_flat)
        ):
            if should_be_transformed:
                assert type(output_item) is type(input_item)
            else:
                assert output_item is input_item

263
            if isinstance(input_item, tv_tensors.BoundingBoxes) and not isinstance(
264
265
266
267
268
269
270
                transform, transforms.ConvertBoundingBoxFormat
            ):
                assert output_item.format == input_item.format

        # Enforce that the transform does not turn a degenerate box marked by RandomIoUCrop (or any other future
        # transform that does this), back into a valid one.
        # TODO: we should test that against all degenerate boxes above
271
        for format in list(tv_tensors.BoundingBoxFormat):
272
            sample = dict(
273
                boxes=tv_tensors.BoundingBoxes([[0, 0, 0, 0]], format=format, canvas_size=(224, 244)),
274
275
                labels=torch.tensor([3]),
            )
276
            assert transforms.SanitizeBoundingBoxes()(sample)["boxes"].shape == (0, 4)
277
278
279
280
281
282
283
284
285
286
287
288
289

    @parametrize(
        [
            (
                transform,
                itertools.chain.from_iterable(
                    fn(
                        color_spaces=[
                            "GRAY",
                            "RGB",
                        ],
                        dtypes=[torch.uint8],
                        extra_dims=[(), (4,)],
290
                        **(dict(num_frames=[3]) if fn is make_videos else dict()),
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
                    )
                    for fn in [
                        make_images,
                        make_vanilla_tensor_images,
                        make_pil_images,
                        make_videos,
                    ]
                ),
            )
            for transform in (
                transforms.RandAugment(),
                transforms.TrivialAugmentWide(),
                transforms.AutoAugment(),
                transforms.AugMix(),
            )
        ]
    )
    def test_auto_augment(self, transform, input):
        transform(input)

    @parametrize(
        [
            (
                transforms.Normalize(mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
                itertools.chain.from_iterable(
                    fn(color_spaces=["RGB"], dtypes=[torch.float32])
                    for fn in [
                        make_images,
                        make_vanilla_tensor_images,
                        make_videos,
                    ]
                ),
            ),
        ]
    )
    def test_normalize(self, transform, input):
        transform(input)


@pytest.mark.parametrize(
    "flat_inputs",
    itertools.permutations(
        [
            next(make_vanilla_tensor_images()),
            next(make_vanilla_tensor_images()),
            next(make_pil_images()),
            make_image(),
            next(make_videos()),
        ],
        3,
    ),
)
343
344
def test_pure_tensor_heuristic(flat_inputs):
    def split_on_pure_tensor(to_split):
345
        # This takes a sequence that is structurally aligned with `flat_inputs` and splits its items into three parts:
346
347
        # 1. The first pure tensor. If none is present, this will be `None`
        # 2. A list of the remaining pure tensors
348
        # 3. A list of all other items
349
        pure_tensors = []
350
351
352
353
        others = []
        # Splitting always happens on the original `flat_inputs` to avoid any erroneous type changes by the transform to
        # affect the splitting.
        for item, inpt in zip(to_split, flat_inputs):
354
355
            (pure_tensors if is_pure_tensor(inpt) else others).append(item)
        return pure_tensors[0] if pure_tensors else None, pure_tensors[1:], others
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370

    class CopyCloneTransform(transforms.Transform):
        def _transform(self, inpt, params):
            return inpt.clone() if isinstance(inpt, torch.Tensor) else inpt.copy()

        @staticmethod
        def was_applied(output, inpt):
            identity = output is inpt
            if identity:
                return False

            # Make sure nothing fishy is going on
            assert_equal(output, inpt)
            return True

371
    first_pure_tensor_input, other_pure_tensor_inputs, other_inputs = split_on_pure_tensor(flat_inputs)
372
373
374
375

    transform = CopyCloneTransform()
    transformed_sample = transform(flat_inputs)

376
    first_pure_tensor_output, other_pure_tensor_outputs, other_outputs = split_on_pure_tensor(transformed_sample)
377

378
    if first_pure_tensor_input is not None:
379
        if other_inputs:
380
            assert not transform.was_applied(first_pure_tensor_output, first_pure_tensor_input)
381
        else:
382
            assert transform.was_applied(first_pure_tensor_output, first_pure_tensor_input)
383

384
    for output, inpt in zip(other_pure_tensor_outputs, other_pure_tensor_inputs):
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
        assert not transform.was_applied(output, inpt)

    for input, output in zip(other_inputs, other_outputs):
        assert transform.was_applied(output, input)


class TestRandomZoomOut:
    def test_assertions(self):
        with pytest.raises(TypeError, match="Got inappropriate fill arg"):
            transforms.RandomZoomOut(fill="abc")

        with pytest.raises(TypeError, match="should be a sequence of length"):
            transforms.RandomZoomOut(0, side_range=0)

        with pytest.raises(ValueError, match="Invalid canvas side range"):
            transforms.RandomZoomOut(0, side_range=[4.0, 1.0])

    @pytest.mark.parametrize("fill", [0, [1, 2, 3], (2, 3, 4)])
    @pytest.mark.parametrize("side_range", [(1.0, 4.0), [2.0, 5.0]])
Philip Meier's avatar
Philip Meier committed
404
    def test__get_params(self, fill, side_range):
405
406
        transform = transforms.RandomZoomOut(fill=fill, side_range=side_range)

Philip Meier's avatar
Philip Meier committed
407
408
        h, w = size = (24, 32)
        image = make_image(size)
409
410
411
412
413
414
415
416
417
418
419
420
421

        params = transform._get_params([image])

        assert len(params["padding"]) == 4
        assert 0 <= params["padding"][0] <= (side_range[1] - 1) * w
        assert 0 <= params["padding"][1] <= (side_range[1] - 1) * h
        assert 0 <= params["padding"][2] <= (side_range[1] - 1) * w
        assert 0 <= params["padding"][3] <= (side_range[1] - 1) * h


class TestElasticTransform:
    def test_assertions(self):

422
        with pytest.raises(TypeError, match="alpha should be a number or a sequence of numbers"):
423
424
            transforms.ElasticTransform({})

425
        with pytest.raises(ValueError, match="alpha is a sequence its length should be 1 or 2"):
426
427
            transforms.ElasticTransform([1.0, 2.0, 3.0])

428
        with pytest.raises(TypeError, match="sigma should be a number or a sequence of numbers"):
429
430
            transforms.ElasticTransform(1.0, {})

431
        with pytest.raises(ValueError, match="sigma is a sequence its length should be 1 or 2"):
432
433
434
435
436
            transforms.ElasticTransform(1.0, [1.0, 2.0, 3.0])

        with pytest.raises(TypeError, match="Got inappropriate fill arg"):
            transforms.ElasticTransform(1.0, 2.0, fill="abc")

Philip Meier's avatar
Philip Meier committed
437
    def test__get_params(self):
438
439
440
        alpha = 2.0
        sigma = 3.0
        transform = transforms.ElasticTransform(alpha, sigma)
Philip Meier's avatar
Philip Meier committed
441
442
443

        h, w = size = (24, 32)
        image = make_image(size)
444
445
446
447
448
449
450
451
452
453
454
455

        params = transform._get_params([image])

        displacement = params["displacement"]
        assert displacement.shape == (1, h, w, 2)
        assert (-alpha / w <= displacement[0, ..., 0]).all() and (displacement[0, ..., 0] <= alpha / w).all()
        assert (-alpha / h <= displacement[0, ..., 1]).all() and (displacement[0, ..., 1] <= alpha / h).all()


class TestTransform:
    @pytest.mark.parametrize(
        "inpt_type",
456
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
457
458
459
460
461
462
463
464
465
466
467
468
469
470
    )
    def test_check_transformed_types(self, inpt_type, mocker):
        # This test ensures that we correctly handle which types to transform and which to bypass
        t = transforms.Transform()
        inpt = mocker.MagicMock(spec=inpt_type)

        if inpt_type in (np.ndarray, str, int):
            output = t(inpt)
            assert output is inpt
        else:
            with pytest.raises(NotImplementedError):
                t(inpt)


471
class TestToImage:
472
473
    @pytest.mark.parametrize(
        "inpt_type",
474
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
475
476
477
    )
    def test__transform(self, inpt_type, mocker):
        fn = mocker.patch(
478
            "torchvision.transforms.v2.functional.to_image",
479
480
481
482
            return_value=torch.rand(1, 3, 8, 8),
        )

        inpt = mocker.MagicMock(spec=inpt_type)
483
        transform = transforms.ToImage()
484
        transform(inpt)
485
        if inpt_type in (tv_tensors.BoundingBoxes, tv_tensors.Image, str, int):
486
487
488
489
490
491
492
493
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt)


class TestToPILImage:
    @pytest.mark.parametrize(
        "inpt_type",
494
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
495
496
    )
    def test__transform(self, inpt_type, mocker):
497
        fn = mocker.patch("torchvision.transforms.v2.functional.to_pil_image")
498
499
500
501

        inpt = mocker.MagicMock(spec=inpt_type)
        transform = transforms.ToPILImage()
        transform(inpt)
502
        if inpt_type in (PIL.Image.Image, tv_tensors.BoundingBoxes, str, int):
503
504
505
506
507
508
509
510
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt, mode=transform.mode)


class TestToTensor:
    @pytest.mark.parametrize(
        "inpt_type",
511
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
512
513
514
515
516
517
518
519
    )
    def test__transform(self, inpt_type, mocker):
        fn = mocker.patch("torchvision.transforms.functional.to_tensor")

        inpt = mocker.MagicMock(spec=inpt_type)
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            transform = transforms.ToTensor()
        transform(inpt)
520
        if inpt_type in (tv_tensors.Image, torch.Tensor, tv_tensors.BoundingBoxes, str, int):
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt)


class TestContainers:
    @pytest.mark.parametrize("transform_cls", [transforms.Compose, transforms.RandomChoice, transforms.RandomOrder])
    def test_assertions(self, transform_cls):
        with pytest.raises(TypeError, match="Argument transforms should be a sequence of callables"):
            transform_cls(transforms.RandomCrop(28))

    @pytest.mark.parametrize("transform_cls", [transforms.Compose, transforms.RandomChoice, transforms.RandomOrder])
    @pytest.mark.parametrize(
        "trfms",
        [
            [transforms.Pad(2), transforms.RandomCrop(28)],
            [lambda x: 2.0 * x, transforms.Pad(2), transforms.RandomCrop(28)],
            [transforms.Pad(2), lambda x: 2.0 * x, transforms.RandomCrop(28)],
        ],
    )
    def test_ctor(self, transform_cls, trfms):
        c = transform_cls(trfms)
        inpt = torch.rand(1, 3, 32, 32)
        output = c(inpt)
        assert isinstance(output, torch.Tensor)
        assert output.ndim == 4


class TestRandomChoice:
    def test_assertions(self):
551
        with pytest.raises(ValueError, match="Length of p doesn't match the number of transforms"):
552
            transforms.RandomChoice([transforms.Pad(2), transforms.RandomCrop(28)], p=[1])
553
554
555


class TestRandomIoUCrop:
556
    @pytest.mark.parametrize("device", cpu_and_cuda())
557
    @pytest.mark.parametrize("options", [[0.5, 0.9], [2.0]])
Philip Meier's avatar
Philip Meier committed
558
559
560
    def test__get_params(self, device, options):
        orig_h, orig_w = size = (24, 32)
        image = make_image(size)
561
        bboxes = tv_tensors.BoundingBoxes(
562
563
            torch.tensor([[1, 1, 10, 10], [20, 20, 23, 23], [1, 20, 10, 23], [20, 1, 23, 10]]),
            format="XYXY",
Philip Meier's avatar
Philip Meier committed
564
            canvas_size=size,
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
            device=device,
        )
        sample = [image, bboxes]

        transform = transforms.RandomIoUCrop(sampler_options=options)

        n_samples = 5
        for _ in range(n_samples):

            params = transform._get_params(sample)

            if options == [2.0]:
                assert len(params) == 0
                return

            assert len(params["is_within_crop_area"]) > 0
            assert params["is_within_crop_area"].dtype == torch.bool

            assert int(transform.min_scale * orig_h) <= params["height"] <= int(transform.max_scale * orig_h)
            assert int(transform.min_scale * orig_w) <= params["width"] <= int(transform.max_scale * orig_w)

            left, top = params["left"], params["top"]
            new_h, new_w = params["height"], params["width"]
            ious = box_iou(
                bboxes,
                torch.tensor([[left, top, left + new_w, top + new_h]], dtype=bboxes.dtype, device=bboxes.device),
            )
            assert ious.max() >= options[0] or ious.max() >= options[1], f"{ious} vs {options}"

    def test__transform_empty_params(self, mocker):
        transform = transforms.RandomIoUCrop(sampler_options=[2.0])
596
597
        image = tv_tensors.Image(torch.rand(1, 3, 4, 4))
        bboxes = tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4))
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
        label = torch.tensor([1])
        sample = [image, bboxes, label]
        # Let's mock transform._get_params to control the output:
        transform._get_params = mocker.MagicMock(return_value={})
        output = transform(sample)
        torch.testing.assert_close(output, sample)

    def test_forward_assertion(self):
        transform = transforms.RandomIoUCrop()
        with pytest.raises(
            TypeError,
            match="requires input sample to contain tensor or PIL images and bounding boxes",
        ):
            transform(torch.tensor(0))

    def test__transform(self, mocker):
        transform = transforms.RandomIoUCrop()

Philip Meier's avatar
Philip Meier committed
616
617
        size = (32, 24)
        image = make_image(size)
618
        bboxes = make_bounding_boxes(format="XYXY", canvas_size=size, batch_dims=(6,))
Philip Meier's avatar
Philip Meier committed
619
        masks = make_detection_mask(size, num_objects=6)
620
621
622
623
624
625
626
627
628
629
630

        sample = [image, bboxes, masks]

        is_within_crop_area = torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool)

        params = dict(top=1, left=2, height=12, width=12, is_within_crop_area=is_within_crop_area)
        transform._get_params = mocker.MagicMock(return_value=params)
        output = transform(sample)

        # check number of bboxes vs number of labels:
        output_bboxes = output[1]
631
        assert isinstance(output_bboxes, tv_tensors.BoundingBoxes)
632
633
634
        assert (output_bboxes[~is_within_crop_area] == 0).all()

        output_masks = output[2]
635
        assert isinstance(output_masks, tv_tensors.Mask)
636
637
638


class TestScaleJitter:
Philip Meier's avatar
Philip Meier committed
639
640
    def test__get_params(self):
        canvas_size = (24, 32)
641
642
643
644
        target_size = (16, 12)
        scale_range = (0.5, 1.5)

        transform = transforms.ScaleJitter(target_size=target_size, scale_range=scale_range)
Philip Meier's avatar
Philip Meier committed
645
646

        sample = make_image(canvas_size)
647
648
649
650
651
652
653
654
655
656
657
658

        n_samples = 5
        for _ in range(n_samples):

            params = transform._get_params([sample])

            assert "size" in params
            size = params["size"]

            assert isinstance(size, tuple) and len(size) == 2
            height, width = size

Philip Meier's avatar
Philip Meier committed
659
660
            r_min = min(target_size[1] / canvas_size[0], target_size[0] / canvas_size[1]) * scale_range[0]
            r_max = min(target_size[1] / canvas_size[0], target_size[0] / canvas_size[1]) * scale_range[1]
661

Philip Meier's avatar
Philip Meier committed
662
663
            assert int(canvas_size[0] * r_min) <= height <= int(canvas_size[0] * r_max)
            assert int(canvas_size[1] * r_min) <= width <= int(canvas_size[1] * r_max)
664
665
666
667


class TestRandomShortestSize:
    @pytest.mark.parametrize("min_size,max_size", [([5, 9], 20), ([5, 9], None)])
Philip Meier's avatar
Philip Meier committed
668
669
    def test__get_params(self, min_size, max_size):
        canvas_size = (3, 10)
670

671
        transform = transforms.RandomShortestSize(min_size=min_size, max_size=max_size, antialias=True)
672

Philip Meier's avatar
Philip Meier committed
673
        sample = make_image(canvas_size)
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
        params = transform._get_params([sample])

        assert "size" in params
        size = params["size"]

        assert isinstance(size, tuple) and len(size) == 2

        longer = max(size)
        shorter = min(size)
        if max_size is not None:
            assert longer <= max_size
            assert shorter <= max_size
        else:
            assert shorter in min_size


class TestLinearTransformation:
    def test_assertions(self):
        with pytest.raises(ValueError, match="transformation_matrix should be square"):
            transforms.LinearTransformation(torch.rand(2, 3), torch.rand(5))

        with pytest.raises(ValueError, match="mean_vector should have the same length"):
            transforms.LinearTransformation(torch.rand(3, 3), torch.rand(5))

    @pytest.mark.parametrize(
        "inpt",
        [
            122 * torch.ones(1, 3, 8, 8),
            122.0 * torch.ones(1, 3, 8, 8),
703
            tv_tensors.Image(122 * torch.ones(1, 3, 8, 8)),
704
705
706
707
708
709
710
711
712
713
            PIL.Image.new("RGB", (8, 8), (122, 122, 122)),
        ],
    )
    def test__transform(self, inpt):

        v = 121 * torch.ones(3 * 8 * 8)
        m = torch.ones(3 * 8 * 8, 3 * 8 * 8)
        transform = transforms.LinearTransformation(m, v)

        if isinstance(inpt, PIL.Image.Image):
714
            with pytest.raises(TypeError, match="does not support PIL images"):
715
716
717
718
719
720
721
722
723
724
725
726
727
                transform(inpt)
        else:
            output = transform(inpt)
            assert isinstance(output, torch.Tensor)
            assert output.unique() == 3 * 8 * 8
            assert output.dtype == inpt.dtype


class TestRandomResize:
    def test__get_params(self):
        min_size = 3
        max_size = 6

728
        transform = transforms.RandomResize(min_size=min_size, max_size=max_size, antialias=True)
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744

        for _ in range(10):
            params = transform._get_params([])

            assert isinstance(params["size"], list) and len(params["size"]) == 1
            size = params["size"][0]

            assert min_size <= size < max_size


class TestUniformTemporalSubsample:
    @pytest.mark.parametrize(
        "inpt",
        [
            torch.zeros(10, 3, 8, 8),
            torch.zeros(1, 10, 3, 8, 8),
745
            tv_tensors.Video(torch.zeros(1, 10, 3, 8, 8)),
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
        ],
    )
    def test__transform(self, inpt):
        num_samples = 5
        transform = transforms.UniformTemporalSubsample(num_samples)

        output = transform(inpt)
        assert type(output) is type(inpt)
        assert output.shape[-4] == num_samples
        assert output.dtype == inpt.dtype


# TODO: remove this test in 0.17 when the default of antialias changes to True
def test_antialias_warning():
    pil_img = PIL.Image.new("RGB", size=(10, 10), color=127)
    tensor_img = torch.randint(0, 256, size=(3, 10, 10), dtype=torch.uint8)
    tensor_video = torch.randint(0, 256, size=(2, 3, 10, 10), dtype=torch.uint8)

    match = "The default value of the antialias parameter"
    with pytest.warns(UserWarning, match=match):
        transforms.RandomResizedCrop((20, 20))(tensor_img)
    with pytest.warns(UserWarning, match=match):
        transforms.ScaleJitter((20, 20))(tensor_img)
    with pytest.warns(UserWarning, match=match):
        transforms.RandomShortestSize((20, 20))(tensor_img)
    with pytest.warns(UserWarning, match=match):
        transforms.RandomResize(10, 20)(tensor_img)

    with pytest.warns(UserWarning, match=match):
775
        F.resized_crop(tv_tensors.Image(tensor_img), 0, 0, 10, 10, (20, 20))
776
777

    with pytest.warns(UserWarning, match=match):
778
        F.resize(tv_tensors.Video(tensor_video), (20, 20))
779
    with pytest.warns(UserWarning, match=match):
780
        F.resized_crop(tv_tensors.Video(tensor_video), 0, 0, 10, 10, (20, 20))
781
782
783
784
785
786
787
788
789
790
791
792
793

    with warnings.catch_warnings():
        warnings.simplefilter("error")
        transforms.RandomResizedCrop((20, 20))(pil_img)
        transforms.ScaleJitter((20, 20))(pil_img)
        transforms.RandomShortestSize((20, 20))(pil_img)
        transforms.RandomResize(10, 20)(pil_img)

        transforms.RandomResizedCrop((20, 20), antialias=True)(tensor_img)
        transforms.ScaleJitter((20, 20), antialias=True)(tensor_img)
        transforms.RandomShortestSize((20, 20), antialias=True)(tensor_img)
        transforms.RandomResize(10, 20, antialias=True)(tensor_img)

794
795
        F.resized_crop(tv_tensors.Image(tensor_img), 0, 0, 10, 10, (20, 20), antialias=True)
        F.resized_crop(tv_tensors.Video(tensor_video), 0, 0, 10, 10, (20, 20), antialias=True)
796
797


798
@pytest.mark.parametrize("image_type", (PIL.Image, torch.Tensor, tv_tensors.Image))
799
800
@pytest.mark.parametrize("label_type", (torch.Tensor, int))
@pytest.mark.parametrize("dataset_return_type", (dict, tuple))
801
@pytest.mark.parametrize("to_tensor", (transforms.ToTensor, transforms.ToImage))
802
803
def test_classif_preset(image_type, label_type, dataset_return_type, to_tensor):

804
    image = tv_tensors.Image(torch.randint(0, 256, size=(1, 3, 250, 250), dtype=torch.uint8))
805
806
807
808
    if image_type is PIL.Image:
        image = to_pil_image(image[0])
    elif image_type is torch.Tensor:
        image = image.as_subclass(torch.Tensor)
809
        assert is_pure_tensor(image)
810
811
812
813
814
815
816
817
818
819
820

    label = 1 if label_type is int else torch.tensor([1])

    if dataset_return_type is dict:
        sample = {
            "image": image,
            "label": label,
        }
    else:
        sample = image, label

821
822
823
824
825
826
    if to_tensor is transforms.ToTensor:
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            to_tensor = to_tensor()
    else:
        to_tensor = to_tensor()

827
828
    t = transforms.Compose(
        [
829
            transforms.RandomResizedCrop((224, 224), antialias=True),
830
831
832
833
834
            transforms.RandomHorizontalFlip(p=1),
            transforms.RandAugment(),
            transforms.TrivialAugmentWide(),
            transforms.AugMix(),
            transforms.AutoAugment(),
835
            to_tensor,
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
            # TODO: ConvertImageDtype is a pass-through on PIL images, is that
            # intended?  This results in a failure if we convert to tensor after
            # it, because the image would still be uint8 which make Normalize
            # fail.
            transforms.ConvertImageDtype(torch.float),
            transforms.Normalize(mean=[0, 0, 0], std=[1, 1, 1]),
            transforms.RandomErasing(p=1),
        ]
    )

    out = t(sample)

    assert type(out) == type(sample)

    if dataset_return_type is tuple:
        out_image, out_label = out
    else:
        assert out.keys() == sample.keys()
        out_image, out_label = out.values()

    assert out_image.shape[-2:] == (224, 224)
    assert out_label == label


860
@pytest.mark.parametrize("image_type", (PIL.Image, torch.Tensor, tv_tensors.Image))
861
@pytest.mark.parametrize("data_augmentation", ("hflip", "lsj", "multiscale", "ssd", "ssdlite"))
862
@pytest.mark.parametrize("to_tensor", (transforms.ToTensor, transforms.ToImage))
863
864
865
@pytest.mark.parametrize("sanitize", (True, False))
def test_detection_preset(image_type, data_augmentation, to_tensor, sanitize):
    torch.manual_seed(0)
866
867
868
869
870
871
872

    if to_tensor is transforms.ToTensor:
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            to_tensor = to_tensor()
    else:
        to_tensor = to_tensor()

873
874
875
    if data_augmentation == "hflip":
        t = [
            transforms.RandomHorizontalFlip(p=1),
876
            to_tensor,
877
878
879
880
881
882
883
884
885
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "lsj":
        t = [
            transforms.ScaleJitter(target_size=(1024, 1024), antialias=True),
            # Note: replaced FixedSizeCrop with RandomCrop, becuase we're
            # leaving FixedSizeCrop in prototype for now, and it expects Label
            # classes which we won't release yet.
            # transforms.FixedSizeCrop(
886
            #     size=(1024, 1024), fill=defaultdict(lambda: (123.0, 117.0, 104.0), {tv_tensors.Mask: 0})
887
888
889
            # ),
            transforms.RandomCrop((1024, 1024), pad_if_needed=True),
            transforms.RandomHorizontalFlip(p=1),
890
            to_tensor,
891
892
893
894
895
896
897
898
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "multiscale":
        t = [
            transforms.RandomShortestSize(
                min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333, antialias=True
            ),
            transforms.RandomHorizontalFlip(p=1),
899
            to_tensor,
900
901
902
903
904
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "ssd":
        t = [
            transforms.RandomPhotometricDistort(p=1),
905
            transforms.RandomZoomOut(fill={"others": (123.0, 117.0, 104.0), tv_tensors.Mask: 0}, p=1),
906
907
            transforms.RandomIoUCrop(),
            transforms.RandomHorizontalFlip(p=1),
908
            to_tensor,
909
910
911
912
913
914
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "ssdlite":
        t = [
            transforms.RandomIoUCrop(),
            transforms.RandomHorizontalFlip(p=1),
915
            to_tensor,
916
917
918
            transforms.ConvertImageDtype(torch.float),
        ]
    if sanitize:
919
        t += [transforms.SanitizeBoundingBoxes()]
920
921
922
923
924
    t = transforms.Compose(t)

    num_boxes = 5
    H = W = 250

925
    image = tv_tensors.Image(torch.randint(0, 256, size=(1, 3, H, W), dtype=torch.uint8))
926
927
928
929
    if image_type is PIL.Image:
        image = to_pil_image(image[0])
    elif image_type is torch.Tensor:
        image = image.as_subclass(torch.Tensor)
930
        assert is_pure_tensor(image)
931
932
933
934
935
936

    label = torch.randint(0, 10, size=(num_boxes,))

    boxes = torch.randint(0, min(H, W) // 2, size=(num_boxes, 4))
    boxes[:, 2:] += boxes[:, :2]
    boxes = boxes.clamp(min=0, max=min(H, W))
937
    boxes = tv_tensors.BoundingBoxes(boxes, format="XYXY", canvas_size=(H, W))
938

939
    masks = tv_tensors.Mask(torch.randint(0, 2, size=(num_boxes, H, W), dtype=torch.uint8))
940
941
942
943
944
945
946
947
948
949

    sample = {
        "image": image,
        "label": label,
        "boxes": boxes,
        "masks": masks,
    }

    out = t(sample)

950
    if isinstance(to_tensor, transforms.ToTensor) and image_type is not tv_tensors.Image:
951
        assert is_pure_tensor(out["image"])
952
    else:
953
        assert isinstance(out["image"], tv_tensors.Image)
954
955
956
957
958
959
    assert isinstance(out["label"], type(sample["label"]))

    num_boxes_expected = {
        # ssd and ssdlite contain RandomIoUCrop which may "remove" some bbox. It
        # doesn't remove them strictly speaking, it just marks some boxes as
        # degenerate and those boxes will be later removed by
960
        # SanitizeBoundingBoxes(), which we add to the pipelines if the sanitize
961
962
963
        # param is True.
        # Note that the values below are probably specific to the random seed
        # set above (which is fine).
964
        (True, "ssd"): 5,
965
966
967
968
969
970
971
        (True, "ssdlite"): 4,
    }.get((sanitize, data_augmentation), num_boxes)

    assert out["boxes"].shape[0] == out["masks"].shape[0] == out["label"].shape[0] == num_boxes_expected


@pytest.mark.parametrize("min_size", (1, 10))
972
@pytest.mark.parametrize("labels_getter", ("default", lambda inputs: inputs["labels"], None, lambda inputs: None))
973
974
975
976
977
978
979
980
@pytest.mark.parametrize("sample_type", (tuple, dict))
def test_sanitize_bounding_boxes(min_size, labels_getter, sample_type):

    if sample_type is tuple and not isinstance(labels_getter, str):
        # The "lambda inputs: inputs["labels"]" labels_getter used in this test
        # doesn't work if the input is a tuple.
        return

981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
    H, W = 256, 128

    boxes_and_validity = [
        ([0, 1, 10, 1], False),  # Y1 == Y2
        ([0, 1, 0, 20], False),  # X1 == X2
        ([0, 0, min_size - 1, 10], False),  # H < min_size
        ([0, 0, 10, min_size - 1], False),  # W < min_size
        ([0, 0, 10, H + 1], False),  # Y2 > H
        ([0, 0, W + 1, 10], False),  # X2 > W
        ([-1, 1, 10, 20], False),  # any < 0
        ([0, 0, -1, 20], False),  # any < 0
        ([0, 0, -10, -1], False),  # any < 0
        ([0, 0, min_size, 10], True),  # H < min_size
        ([0, 0, 10, min_size], True),  # W < min_size
        ([0, 0, W, H], True),  # TODO: Is that actually OK?? Should it be -1?
        ([1, 1, 30, 20], True),
        ([0, 0, 10, 10], True),
        ([1, 1, 30, 20], True),
    ]

    random.shuffle(boxes_and_validity)  # For test robustness: mix order of wrong and correct cases
    boxes, is_valid_mask = zip(*boxes_and_validity)
    valid_indices = [i for (i, is_valid) in enumerate(is_valid_mask) if is_valid]

    boxes = torch.tensor(boxes)
    labels = torch.arange(boxes.shape[0])

1008
    boxes = tv_tensors.BoundingBoxes(
1009
        boxes,
1010
        format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
1011
        canvas_size=(H, W),
1012
1013
    )

1014
    masks = tv_tensors.Mask(torch.randint(0, 2, size=(boxes.shape[0], H, W)))
1015
1016
    whatever = torch.rand(10)
    input_img = torch.randint(0, 256, size=(1, 3, H, W), dtype=torch.uint8)
1017
    sample = {
1018
        "image": input_img,
1019
1020
        "labels": labels,
        "boxes": boxes,
1021
        "whatever": whatever,
1022
1023
1024
1025
        "None": None,
        "masks": masks,
    }

1026
1027
1028
1029
    if sample_type is tuple:
        img = sample.pop("image")
        sample = (img, sample)

1030
    out = transforms.SanitizeBoundingBoxes(min_size=min_size, labels_getter=labels_getter)(sample)
1031

1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
    if sample_type is tuple:
        out_image = out[0]
        out_labels = out[1]["labels"]
        out_boxes = out[1]["boxes"]
        out_masks = out[1]["masks"]
        out_whatever = out[1]["whatever"]
    else:
        out_image = out["image"]
        out_labels = out["labels"]
        out_boxes = out["boxes"]
        out_masks = out["masks"]
        out_whatever = out["whatever"]

    assert out_image is input_img
    assert out_whatever is whatever
1047

1048
1049
    assert isinstance(out_boxes, tv_tensors.BoundingBoxes)
    assert isinstance(out_masks, tv_tensors.Mask)
1050

1051
    if labels_getter is None or (callable(labels_getter) and labels_getter({"labels": "blah"}) is None):
1052
        assert out_labels is labels
1053
    else:
1054
1055
        assert isinstance(out_labels, torch.Tensor)
        assert out_boxes.shape[0] == out_labels.shape[0] == out_masks.shape[0]
1056
        # This works because we conveniently set labels to arange(num_boxes)
1057
        assert out_labels.tolist() == valid_indices
1058
1059


1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
def test_sanitize_bounding_boxes_no_label():
    # Non-regression test for https://github.com/pytorch/vision/issues/7878

    img = make_image()
    boxes = make_bounding_boxes()

    with pytest.raises(ValueError, match="or a two-tuple whose second item is a dict"):
        transforms.SanitizeBoundingBoxes()(img, boxes)

    out_img, out_boxes = transforms.SanitizeBoundingBoxes(labels_getter=None)(img, boxes)
1070
1071
    assert isinstance(out_img, tv_tensors.Image)
    assert isinstance(out_boxes, tv_tensors.BoundingBoxes)
1072
1073


1074
1075
def test_sanitize_bounding_boxes_errors():

1076
    good_bbox = tv_tensors.BoundingBoxes(
1077
        [[0, 0, 10, 10]],
1078
        format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
1079
        canvas_size=(20, 20),
1080
1081
1082
    )

    with pytest.raises(ValueError, match="min_size must be >= 1"):
1083
        transforms.SanitizeBoundingBoxes(min_size=0)
1084
    with pytest.raises(ValueError, match="labels_getter should either be 'default'"):
1085
        transforms.SanitizeBoundingBoxes(labels_getter=12)
1086
1087
1088

    with pytest.raises(ValueError, match="Could not infer where the labels are"):
        bad_labels_key = {"bbox": good_bbox, "BAD_KEY": torch.arange(good_bbox.shape[0])}
1089
        transforms.SanitizeBoundingBoxes()(bad_labels_key)
1090
1091
1092

    with pytest.raises(ValueError, match="must be a tensor"):
        not_a_tensor = {"bbox": good_bbox, "labels": torch.arange(good_bbox.shape[0]).tolist()}
1093
        transforms.SanitizeBoundingBoxes()(not_a_tensor)
1094
1095
1096

    with pytest.raises(ValueError, match="Number of boxes"):
        different_sizes = {"bbox": good_bbox, "labels": torch.arange(good_bbox.shape[0] + 3)}
1097
        transforms.SanitizeBoundingBoxes()(different_sizes)
1098

1099

Philip Meier's avatar
Philip Meier committed
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
class TestLambda:
    inputs = pytest.mark.parametrize("input", [object(), torch.empty(()), np.empty(()), "string", 1, 0.0])

    @inputs
    def test_default(self, input):
        was_applied = False

        def was_applied_fn(input):
            nonlocal was_applied
            was_applied = True
            return input

        transform = transforms.Lambda(was_applied_fn)

        transform(input)

        assert was_applied

    @inputs
    def test_with_types(self, input):
        was_applied = False

        def was_applied_fn(input):
            nonlocal was_applied
            was_applied = True
            return input

        types = (torch.Tensor, np.ndarray)
        transform = transforms.Lambda(was_applied_fn, *types)

        transform(input)

        assert was_applied is isinstance(input, types)