test_transforms_v2.py 43.1 KB
Newer Older
1
2
import itertools
import pathlib
3
import pickle
4
5
6
7
8
9
10
11
12
13
import random
import warnings

import numpy as np

import PIL.Image
import pytest
import torch
import torchvision.transforms.v2 as transforms

14
from common_utils import assert_equal, cpu_and_cuda
15
from torch.utils._pytree import tree_flatten, tree_unflatten
16
from torchvision import tv_tensors
17
18
19
from torchvision.ops.boxes import box_iou
from torchvision.transforms.functional import to_pil_image
from torchvision.transforms.v2 import functional as F
Nicolas Hug's avatar
Nicolas Hug committed
20
from torchvision.transforms.v2._utils import check_type, is_pure_tensor, query_chw
21
from transforms_v2_legacy_utils import (
22
23
24
25
    make_bounding_boxes,
    make_detection_mask,
    make_image,
    make_images,
26
    make_multiple_bounding_boxes,
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
    make_segmentation_mask,
    make_video,
    make_videos,
)


def make_vanilla_tensor_images(*args, **kwargs):
    for image in make_images(*args, **kwargs):
        if image.ndim > 3:
            continue
        yield image.data


def make_pil_images(*args, **kwargs):
    for image in make_vanilla_tensor_images(*args, **kwargs):
        yield to_pil_image(image)


def make_vanilla_tensor_bounding_boxes(*args, **kwargs):
46
    for bounding_boxes in make_multiple_bounding_boxes(*args, **kwargs):
47
        yield bounding_boxes.data
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68


def parametrize(transforms_with_inputs):
    return pytest.mark.parametrize(
        ("transform", "input"),
        [
            pytest.param(
                transform,
                input,
                id=f"{type(transform).__name__}-{type(input).__module__}.{type(input).__name__}-{idx}",
            )
            for transform, inputs in transforms_with_inputs
            for idx, input in enumerate(inputs)
        ],
    )


def auto_augment_adapter(transform, input, device):
    adapted_input = {}
    image_or_video_found = False
    for key, value in input.items():
69
        if isinstance(value, (tv_tensors.BoundingBoxes, tv_tensors.Mask)):
70
71
            # AA transforms don't support bounding boxes or masks
            continue
72
        elif check_type(value, (tv_tensors.Image, tv_tensors.Video, is_pure_tensor, PIL.Image.Image)):
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
            if image_or_video_found:
                # AA transforms only support a single image or video
                continue
            image_or_video_found = True
        adapted_input[key] = value
    return adapted_input


def linear_transformation_adapter(transform, input, device):
    flat_inputs = list(input.values())
    c, h, w = query_chw(
        [
            item
            for item, needs_transform in zip(flat_inputs, transforms.Transform()._needs_transform_list(flat_inputs))
            if needs_transform
        ]
    )
    num_elements = c * h * w
    transform.transformation_matrix = torch.randn((num_elements, num_elements), device=device)
    transform.mean_vector = torch.randn((num_elements,), device=device)
    return {key: value for key, value in input.items() if not isinstance(value, PIL.Image.Image)}


def normalize_adapter(transform, input, device):
    adapted_input = {}
    for key, value in input.items():
        if isinstance(value, PIL.Image.Image):
            # normalize doesn't support PIL images
            continue
102
        elif check_type(value, (tv_tensors.Image, tv_tensors.Video, is_pure_tensor)):
103
            # normalize doesn't support integer images
104
            value = F.to_dtype(value, torch.float32, scale=True)
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
        adapted_input[key] = value
    return adapted_input


class TestSmoke:
    @pytest.mark.parametrize(
        ("transform", "adapter"),
        [
            (transforms.RandomErasing(p=1.0), None),
            (transforms.AugMix(), auto_augment_adapter),
            (transforms.AutoAugment(), auto_augment_adapter),
            (transforms.RandAugment(), auto_augment_adapter),
            (transforms.TrivialAugmentWide(), auto_augment_adapter),
            (transforms.ColorJitter(brightness=0.1, contrast=0.2, saturation=0.3, hue=0.15), None),
            (transforms.Grayscale(), None),
            (transforms.RandomAdjustSharpness(sharpness_factor=0.5, p=1.0), None),
            (transforms.RandomAutocontrast(p=1.0), None),
            (transforms.RandomEqualize(p=1.0), None),
            (transforms.RandomGrayscale(p=1.0), None),
            (transforms.RandomInvert(p=1.0), None),
125
            (transforms.RandomChannelPermutation(), None),
126
127
128
129
130
131
132
133
134
135
            (transforms.RandomPhotometricDistort(p=1.0), None),
            (transforms.RandomPosterize(bits=4, p=1.0), None),
            (transforms.RandomSolarize(threshold=0.5, p=1.0), None),
            (transforms.CenterCrop([16, 16]), None),
            (transforms.ElasticTransform(sigma=1.0), None),
            (transforms.Pad(4), None),
            (transforms.RandomAffine(degrees=30.0), None),
            (transforms.RandomCrop([16, 16], pad_if_needed=True), None),
            (transforms.RandomHorizontalFlip(p=1.0), None),
            (transforms.RandomPerspective(p=1.0), None),
136
137
            (transforms.RandomResize(min_size=10, max_size=20, antialias=True), None),
            (transforms.RandomResizedCrop([16, 16], antialias=True), None),
138
            (transforms.RandomRotation(degrees=30), None),
139
            (transforms.RandomShortestSize(min_size=10, antialias=True), None),
140
141
142
            (transforms.RandomVerticalFlip(p=1.0), None),
            (transforms.RandomZoomOut(p=1.0), None),
            (transforms.Resize([16, 16], antialias=True), None),
143
            (transforms.ScaleJitter((16, 16), scale_range=(0.8, 1.2), antialias=True), None),
144
            (transforms.ClampBoundingBoxes(), None),
145
            (transforms.ConvertBoundingBoxFormat(tv_tensors.BoundingBoxFormat.CXCYWH), None),
146
            (transforms.ConvertImageDtype(), None),
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
            (transforms.GaussianBlur(kernel_size=3), None),
            (
                transforms.LinearTransformation(
                    # These are just dummy values that will be filled by the adapter. We can't define them upfront,
                    # because for we neither know the spatial size nor the device at this point
                    transformation_matrix=torch.empty((1, 1)),
                    mean_vector=torch.empty((1,)),
                ),
                linear_transformation_adapter,
            ),
            (transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), normalize_adapter),
            (transforms.ToDtype(torch.float64), None),
            (transforms.UniformTemporalSubsample(num_samples=2), None),
        ],
        ids=lambda transform: type(transform).__name__,
    )
    @pytest.mark.parametrize("container_type", [dict, list, tuple])
    @pytest.mark.parametrize(
        "image_or_video",
        [
            make_image(),
            make_video(),
            next(make_pil_images(color_spaces=["RGB"])),
            next(make_vanilla_tensor_images()),
        ],
    )
173
    @pytest.mark.parametrize("de_serialize", [lambda t: t, lambda t: pickle.loads(pickle.dumps(t))])
174
    @pytest.mark.parametrize("device", cpu_and_cuda())
175
176
177
    def test_common(self, transform, adapter, container_type, image_or_video, de_serialize, device):
        transform = de_serialize(transform)

Philip Meier's avatar
Philip Meier committed
178
        canvas_size = F.get_size(image_or_video)
179
180
        input = dict(
            image_or_video=image_or_video,
181
182
            image_tv_tensor=make_image(size=canvas_size),
            video_tv_tensor=make_video(size=canvas_size),
Philip Meier's avatar
Philip Meier committed
183
            image_pil=next(make_pil_images(sizes=[canvas_size], color_spaces=["RGB"])),
184
            bounding_boxes_xyxy=make_bounding_boxes(
185
                format=tv_tensors.BoundingBoxFormat.XYXY, canvas_size=canvas_size, batch_dims=(3,)
186
            ),
187
            bounding_boxes_xywh=make_bounding_boxes(
188
                format=tv_tensors.BoundingBoxFormat.XYWH, canvas_size=canvas_size, batch_dims=(4,)
189
            ),
190
            bounding_boxes_cxcywh=make_bounding_boxes(
191
                format=tv_tensors.BoundingBoxFormat.CXCYWH, canvas_size=canvas_size, batch_dims=(5,)
192
            ),
193
            bounding_boxes_degenerate_xyxy=tv_tensors.BoundingBoxes(
194
195
196
197
198
199
200
201
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [2, 0, 1, 1],  # x1 > x2, y1 < y2
                    [0, 2, 1, 1],  # x1 < x2, y1 > y2
                    [2, 2, 1, 1],  # x1 > x2, y1 > y2
                ],
202
                format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
203
                canvas_size=canvas_size,
204
            ),
205
            bounding_boxes_degenerate_xywh=tv_tensors.BoundingBoxes(
206
207
208
209
210
211
212
213
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [0, 0, 1, -1],  # negative height
                    [0, 0, -1, 1],  # negative width
                    [0, 0, -1, -1],  # negative height and width
                ],
214
                format=tv_tensors.BoundingBoxFormat.XYWH,
Philip Meier's avatar
Philip Meier committed
215
                canvas_size=canvas_size,
216
            ),
217
            bounding_boxes_degenerate_cxcywh=tv_tensors.BoundingBoxes(
218
219
220
221
222
223
224
225
                [
                    [0, 0, 0, 0],  # no height or width
                    [0, 0, 0, 1],  # no height
                    [0, 0, 1, 0],  # no width
                    [0, 0, 1, -1],  # negative height
                    [0, 0, -1, 1],  # negative width
                    [0, 0, -1, -1],  # negative height and width
                ],
226
                format=tv_tensors.BoundingBoxFormat.CXCYWH,
Philip Meier's avatar
Philip Meier committed
227
                canvas_size=canvas_size,
228
            ),
Philip Meier's avatar
Philip Meier committed
229
230
            detection_mask=make_detection_mask(size=canvas_size),
            segmentation_mask=make_segmentation_mask(size=canvas_size),
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
            int=0,
            float=0.0,
            bool=True,
            none=None,
            str="str",
            path=pathlib.Path.cwd(),
            object=object(),
            tensor=torch.empty(5),
            array=np.empty(5),
        )
        if adapter is not None:
            input = adapter(transform, input, device)

        if container_type in {tuple, list}:
            input = container_type(input.values())

        input_flat, input_spec = tree_flatten(input)
        input_flat = [item.to(device) if isinstance(item, torch.Tensor) else item for item in input_flat]
        input = tree_unflatten(input_flat, input_spec)

        torch.manual_seed(0)
        output = transform(input)
        output_flat, output_spec = tree_flatten(output)

        assert output_spec == input_spec

        for output_item, input_item, should_be_transformed in zip(
            output_flat, input_flat, transforms.Transform()._needs_transform_list(input_flat)
        ):
            if should_be_transformed:
                assert type(output_item) is type(input_item)
            else:
                assert output_item is input_item

265
            if isinstance(input_item, tv_tensors.BoundingBoxes) and not isinstance(
266
267
268
269
270
271
272
                transform, transforms.ConvertBoundingBoxFormat
            ):
                assert output_item.format == input_item.format

        # Enforce that the transform does not turn a degenerate box marked by RandomIoUCrop (or any other future
        # transform that does this), back into a valid one.
        # TODO: we should test that against all degenerate boxes above
273
        for format in list(tv_tensors.BoundingBoxFormat):
274
            sample = dict(
275
                boxes=tv_tensors.BoundingBoxes([[0, 0, 0, 0]], format=format, canvas_size=(224, 244)),
276
277
                labels=torch.tensor([3]),
            )
278
            assert transforms.SanitizeBoundingBoxes()(sample)["boxes"].shape == (0, 4)
279
280
281
282
283
284
285
286
287
288
289
290
291

    @parametrize(
        [
            (
                transform,
                itertools.chain.from_iterable(
                    fn(
                        color_spaces=[
                            "GRAY",
                            "RGB",
                        ],
                        dtypes=[torch.uint8],
                        extra_dims=[(), (4,)],
292
                        **(dict(num_frames=[3]) if fn is make_videos else dict()),
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
                    )
                    for fn in [
                        make_images,
                        make_vanilla_tensor_images,
                        make_pil_images,
                        make_videos,
                    ]
                ),
            )
            for transform in (
                transforms.RandAugment(),
                transforms.TrivialAugmentWide(),
                transforms.AutoAugment(),
                transforms.AugMix(),
            )
        ]
    )
    def test_auto_augment(self, transform, input):
        transform(input)

    @parametrize(
        [
            (
                transforms.Normalize(mean=[0.0, 0.0, 0.0], std=[1.0, 1.0, 1.0]),
                itertools.chain.from_iterable(
                    fn(color_spaces=["RGB"], dtypes=[torch.float32])
                    for fn in [
                        make_images,
                        make_vanilla_tensor_images,
                        make_videos,
                    ]
                ),
            ),
        ]
    )
    def test_normalize(self, transform, input):
        transform(input)


@pytest.mark.parametrize(
    "flat_inputs",
    itertools.permutations(
        [
            next(make_vanilla_tensor_images()),
            next(make_vanilla_tensor_images()),
            next(make_pil_images()),
            make_image(),
            next(make_videos()),
        ],
        3,
    ),
)
345
346
def test_pure_tensor_heuristic(flat_inputs):
    def split_on_pure_tensor(to_split):
347
        # This takes a sequence that is structurally aligned with `flat_inputs` and splits its items into three parts:
348
349
        # 1. The first pure tensor. If none is present, this will be `None`
        # 2. A list of the remaining pure tensors
350
        # 3. A list of all other items
351
        pure_tensors = []
352
353
354
355
        others = []
        # Splitting always happens on the original `flat_inputs` to avoid any erroneous type changes by the transform to
        # affect the splitting.
        for item, inpt in zip(to_split, flat_inputs):
356
357
            (pure_tensors if is_pure_tensor(inpt) else others).append(item)
        return pure_tensors[0] if pure_tensors else None, pure_tensors[1:], others
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372

    class CopyCloneTransform(transforms.Transform):
        def _transform(self, inpt, params):
            return inpt.clone() if isinstance(inpt, torch.Tensor) else inpt.copy()

        @staticmethod
        def was_applied(output, inpt):
            identity = output is inpt
            if identity:
                return False

            # Make sure nothing fishy is going on
            assert_equal(output, inpt)
            return True

373
    first_pure_tensor_input, other_pure_tensor_inputs, other_inputs = split_on_pure_tensor(flat_inputs)
374
375
376
377

    transform = CopyCloneTransform()
    transformed_sample = transform(flat_inputs)

378
    first_pure_tensor_output, other_pure_tensor_outputs, other_outputs = split_on_pure_tensor(transformed_sample)
379

380
    if first_pure_tensor_input is not None:
381
        if other_inputs:
382
            assert not transform.was_applied(first_pure_tensor_output, first_pure_tensor_input)
383
        else:
384
            assert transform.was_applied(first_pure_tensor_output, first_pure_tensor_input)
385

386
    for output, inpt in zip(other_pure_tensor_outputs, other_pure_tensor_inputs):
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
        assert not transform.was_applied(output, inpt)

    for input, output in zip(other_inputs, other_outputs):
        assert transform.was_applied(output, input)


class TestRandomZoomOut:
    def test_assertions(self):
        with pytest.raises(TypeError, match="Got inappropriate fill arg"):
            transforms.RandomZoomOut(fill="abc")

        with pytest.raises(TypeError, match="should be a sequence of length"):
            transforms.RandomZoomOut(0, side_range=0)

        with pytest.raises(ValueError, match="Invalid canvas side range"):
            transforms.RandomZoomOut(0, side_range=[4.0, 1.0])

    @pytest.mark.parametrize("fill", [0, [1, 2, 3], (2, 3, 4)])
    @pytest.mark.parametrize("side_range", [(1.0, 4.0), [2.0, 5.0]])
Philip Meier's avatar
Philip Meier committed
406
    def test__get_params(self, fill, side_range):
407
408
        transform = transforms.RandomZoomOut(fill=fill, side_range=side_range)

Philip Meier's avatar
Philip Meier committed
409
410
        h, w = size = (24, 32)
        image = make_image(size)
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428

        params = transform._get_params([image])

        assert len(params["padding"]) == 4
        assert 0 <= params["padding"][0] <= (side_range[1] - 1) * w
        assert 0 <= params["padding"][1] <= (side_range[1] - 1) * h
        assert 0 <= params["padding"][2] <= (side_range[1] - 1) * w
        assert 0 <= params["padding"][3] <= (side_range[1] - 1) * h


class TestRandomPerspective:
    def test_assertions(self):
        with pytest.raises(ValueError, match="Argument distortion_scale value should be between 0 and 1"):
            transforms.RandomPerspective(distortion_scale=-1.0)

        with pytest.raises(TypeError, match="Got inappropriate fill arg"):
            transforms.RandomPerspective(0.5, fill="abc")

Philip Meier's avatar
Philip Meier committed
429
    def test__get_params(self):
430
431
        dscale = 0.5
        transform = transforms.RandomPerspective(dscale)
Philip Meier's avatar
Philip Meier committed
432
433

        image = make_image((24, 32))
434
435
436
437
438
439
440
441
442
443

        params = transform._get_params([image])

        assert "coefficients" in params
        assert len(params["coefficients"]) == 8


class TestElasticTransform:
    def test_assertions(self):

444
        with pytest.raises(TypeError, match="alpha should be a number or a sequence of numbers"):
445
446
            transforms.ElasticTransform({})

447
        with pytest.raises(ValueError, match="alpha is a sequence its length should be 1 or 2"):
448
449
            transforms.ElasticTransform([1.0, 2.0, 3.0])

450
        with pytest.raises(TypeError, match="sigma should be a number or a sequence of numbers"):
451
452
            transforms.ElasticTransform(1.0, {})

453
        with pytest.raises(ValueError, match="sigma is a sequence its length should be 1 or 2"):
454
455
456
457
458
            transforms.ElasticTransform(1.0, [1.0, 2.0, 3.0])

        with pytest.raises(TypeError, match="Got inappropriate fill arg"):
            transforms.ElasticTransform(1.0, 2.0, fill="abc")

Philip Meier's avatar
Philip Meier committed
459
    def test__get_params(self):
460
461
462
        alpha = 2.0
        sigma = 3.0
        transform = transforms.ElasticTransform(alpha, sigma)
Philip Meier's avatar
Philip Meier committed
463
464
465

        h, w = size = (24, 32)
        image = make_image(size)
466
467
468
469
470
471
472
473
474
475
476
477

        params = transform._get_params([image])

        displacement = params["displacement"]
        assert displacement.shape == (1, h, w, 2)
        assert (-alpha / w <= displacement[0, ..., 0]).all() and (displacement[0, ..., 0] <= alpha / w).all()
        assert (-alpha / h <= displacement[0, ..., 1]).all() and (displacement[0, ..., 1] <= alpha / h).all()


class TestTransform:
    @pytest.mark.parametrize(
        "inpt_type",
478
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
479
480
481
482
483
484
485
486
487
488
489
490
491
492
    )
    def test_check_transformed_types(self, inpt_type, mocker):
        # This test ensures that we correctly handle which types to transform and which to bypass
        t = transforms.Transform()
        inpt = mocker.MagicMock(spec=inpt_type)

        if inpt_type in (np.ndarray, str, int):
            output = t(inpt)
            assert output is inpt
        else:
            with pytest.raises(NotImplementedError):
                t(inpt)


493
class TestToImage:
494
495
    @pytest.mark.parametrize(
        "inpt_type",
496
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
497
498
499
    )
    def test__transform(self, inpt_type, mocker):
        fn = mocker.patch(
500
            "torchvision.transforms.v2.functional.to_image",
501
502
503
504
            return_value=torch.rand(1, 3, 8, 8),
        )

        inpt = mocker.MagicMock(spec=inpt_type)
505
        transform = transforms.ToImage()
506
        transform(inpt)
507
        if inpt_type in (tv_tensors.BoundingBoxes, tv_tensors.Image, str, int):
508
509
510
511
512
513
514
515
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt)


class TestToPILImage:
    @pytest.mark.parametrize(
        "inpt_type",
516
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
517
518
    )
    def test__transform(self, inpt_type, mocker):
519
        fn = mocker.patch("torchvision.transforms.v2.functional.to_pil_image")
520
521
522
523

        inpt = mocker.MagicMock(spec=inpt_type)
        transform = transforms.ToPILImage()
        transform(inpt)
524
        if inpt_type in (PIL.Image.Image, tv_tensors.BoundingBoxes, str, int):
525
526
527
528
529
530
531
532
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt, mode=transform.mode)


class TestToTensor:
    @pytest.mark.parametrize(
        "inpt_type",
533
        [torch.Tensor, PIL.Image.Image, tv_tensors.Image, np.ndarray, tv_tensors.BoundingBoxes, str, int],
534
535
536
537
538
539
540
541
    )
    def test__transform(self, inpt_type, mocker):
        fn = mocker.patch("torchvision.transforms.functional.to_tensor")

        inpt = mocker.MagicMock(spec=inpt_type)
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            transform = transforms.ToTensor()
        transform(inpt)
542
        if inpt_type in (tv_tensors.Image, torch.Tensor, tv_tensors.BoundingBoxes, str, int):
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
            assert fn.call_count == 0
        else:
            fn.assert_called_once_with(inpt)


class TestContainers:
    @pytest.mark.parametrize("transform_cls", [transforms.Compose, transforms.RandomChoice, transforms.RandomOrder])
    def test_assertions(self, transform_cls):
        with pytest.raises(TypeError, match="Argument transforms should be a sequence of callables"):
            transform_cls(transforms.RandomCrop(28))

    @pytest.mark.parametrize("transform_cls", [transforms.Compose, transforms.RandomChoice, transforms.RandomOrder])
    @pytest.mark.parametrize(
        "trfms",
        [
            [transforms.Pad(2), transforms.RandomCrop(28)],
            [lambda x: 2.0 * x, transforms.Pad(2), transforms.RandomCrop(28)],
            [transforms.Pad(2), lambda x: 2.0 * x, transforms.RandomCrop(28)],
        ],
    )
    def test_ctor(self, transform_cls, trfms):
        c = transform_cls(trfms)
        inpt = torch.rand(1, 3, 32, 32)
        output = c(inpt)
        assert isinstance(output, torch.Tensor)
        assert output.ndim == 4


class TestRandomChoice:
    def test_assertions(self):
573
        with pytest.raises(ValueError, match="Length of p doesn't match the number of transforms"):
574
            transforms.RandomChoice([transforms.Pad(2), transforms.RandomCrop(28)], p=[1])
575
576
577


class TestRandomIoUCrop:
578
    @pytest.mark.parametrize("device", cpu_and_cuda())
579
    @pytest.mark.parametrize("options", [[0.5, 0.9], [2.0]])
Philip Meier's avatar
Philip Meier committed
580
581
582
    def test__get_params(self, device, options):
        orig_h, orig_w = size = (24, 32)
        image = make_image(size)
583
        bboxes = tv_tensors.BoundingBoxes(
584
585
            torch.tensor([[1, 1, 10, 10], [20, 20, 23, 23], [1, 20, 10, 23], [20, 1, 23, 10]]),
            format="XYXY",
Philip Meier's avatar
Philip Meier committed
586
            canvas_size=size,
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
            device=device,
        )
        sample = [image, bboxes]

        transform = transforms.RandomIoUCrop(sampler_options=options)

        n_samples = 5
        for _ in range(n_samples):

            params = transform._get_params(sample)

            if options == [2.0]:
                assert len(params) == 0
                return

            assert len(params["is_within_crop_area"]) > 0
            assert params["is_within_crop_area"].dtype == torch.bool

            assert int(transform.min_scale * orig_h) <= params["height"] <= int(transform.max_scale * orig_h)
            assert int(transform.min_scale * orig_w) <= params["width"] <= int(transform.max_scale * orig_w)

            left, top = params["left"], params["top"]
            new_h, new_w = params["height"], params["width"]
            ious = box_iou(
                bboxes,
                torch.tensor([[left, top, left + new_w, top + new_h]], dtype=bboxes.dtype, device=bboxes.device),
            )
            assert ious.max() >= options[0] or ious.max() >= options[1], f"{ious} vs {options}"

    def test__transform_empty_params(self, mocker):
        transform = transforms.RandomIoUCrop(sampler_options=[2.0])
618
619
        image = tv_tensors.Image(torch.rand(1, 3, 4, 4))
        bboxes = tv_tensors.BoundingBoxes(torch.tensor([[1, 1, 2, 2]]), format="XYXY", canvas_size=(4, 4))
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
        label = torch.tensor([1])
        sample = [image, bboxes, label]
        # Let's mock transform._get_params to control the output:
        transform._get_params = mocker.MagicMock(return_value={})
        output = transform(sample)
        torch.testing.assert_close(output, sample)

    def test_forward_assertion(self):
        transform = transforms.RandomIoUCrop()
        with pytest.raises(
            TypeError,
            match="requires input sample to contain tensor or PIL images and bounding boxes",
        ):
            transform(torch.tensor(0))

    def test__transform(self, mocker):
        transform = transforms.RandomIoUCrop()

Philip Meier's avatar
Philip Meier committed
638
639
        size = (32, 24)
        image = make_image(size)
640
        bboxes = make_bounding_boxes(format="XYXY", canvas_size=size, batch_dims=(6,))
Philip Meier's avatar
Philip Meier committed
641
        masks = make_detection_mask(size, num_objects=6)
642
643
644
645
646
647
648
649
650
651
652

        sample = [image, bboxes, masks]

        is_within_crop_area = torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool)

        params = dict(top=1, left=2, height=12, width=12, is_within_crop_area=is_within_crop_area)
        transform._get_params = mocker.MagicMock(return_value=params)
        output = transform(sample)

        # check number of bboxes vs number of labels:
        output_bboxes = output[1]
653
        assert isinstance(output_bboxes, tv_tensors.BoundingBoxes)
654
655
656
        assert (output_bboxes[~is_within_crop_area] == 0).all()

        output_masks = output[2]
657
        assert isinstance(output_masks, tv_tensors.Mask)
658
659
660


class TestScaleJitter:
Philip Meier's avatar
Philip Meier committed
661
662
    def test__get_params(self):
        canvas_size = (24, 32)
663
664
665
666
        target_size = (16, 12)
        scale_range = (0.5, 1.5)

        transform = transforms.ScaleJitter(target_size=target_size, scale_range=scale_range)
Philip Meier's avatar
Philip Meier committed
667
668

        sample = make_image(canvas_size)
669
670
671
672
673
674
675
676
677
678
679
680

        n_samples = 5
        for _ in range(n_samples):

            params = transform._get_params([sample])

            assert "size" in params
            size = params["size"]

            assert isinstance(size, tuple) and len(size) == 2
            height, width = size

Philip Meier's avatar
Philip Meier committed
681
682
            r_min = min(target_size[1] / canvas_size[0], target_size[0] / canvas_size[1]) * scale_range[0]
            r_max = min(target_size[1] / canvas_size[0], target_size[0] / canvas_size[1]) * scale_range[1]
683

Philip Meier's avatar
Philip Meier committed
684
685
            assert int(canvas_size[0] * r_min) <= height <= int(canvas_size[0] * r_max)
            assert int(canvas_size[1] * r_min) <= width <= int(canvas_size[1] * r_max)
686
687
688
689


class TestRandomShortestSize:
    @pytest.mark.parametrize("min_size,max_size", [([5, 9], 20), ([5, 9], None)])
Philip Meier's avatar
Philip Meier committed
690
691
    def test__get_params(self, min_size, max_size):
        canvas_size = (3, 10)
692

693
        transform = transforms.RandomShortestSize(min_size=min_size, max_size=max_size, antialias=True)
694

Philip Meier's avatar
Philip Meier committed
695
        sample = make_image(canvas_size)
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
        params = transform._get_params([sample])

        assert "size" in params
        size = params["size"]

        assert isinstance(size, tuple) and len(size) == 2

        longer = max(size)
        shorter = min(size)
        if max_size is not None:
            assert longer <= max_size
            assert shorter <= max_size
        else:
            assert shorter in min_size


class TestLinearTransformation:
    def test_assertions(self):
        with pytest.raises(ValueError, match="transformation_matrix should be square"):
            transforms.LinearTransformation(torch.rand(2, 3), torch.rand(5))

        with pytest.raises(ValueError, match="mean_vector should have the same length"):
            transforms.LinearTransformation(torch.rand(3, 3), torch.rand(5))

    @pytest.mark.parametrize(
        "inpt",
        [
            122 * torch.ones(1, 3, 8, 8),
            122.0 * torch.ones(1, 3, 8, 8),
725
            tv_tensors.Image(122 * torch.ones(1, 3, 8, 8)),
726
727
728
729
730
731
732
733
734
735
            PIL.Image.new("RGB", (8, 8), (122, 122, 122)),
        ],
    )
    def test__transform(self, inpt):

        v = 121 * torch.ones(3 * 8 * 8)
        m = torch.ones(3 * 8 * 8, 3 * 8 * 8)
        transform = transforms.LinearTransformation(m, v)

        if isinstance(inpt, PIL.Image.Image):
736
            with pytest.raises(TypeError, match="does not support PIL images"):
737
738
739
740
741
742
743
744
745
746
747
748
749
                transform(inpt)
        else:
            output = transform(inpt)
            assert isinstance(output, torch.Tensor)
            assert output.unique() == 3 * 8 * 8
            assert output.dtype == inpt.dtype


class TestRandomResize:
    def test__get_params(self):
        min_size = 3
        max_size = 6

750
        transform = transforms.RandomResize(min_size=min_size, max_size=max_size, antialias=True)
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766

        for _ in range(10):
            params = transform._get_params([])

            assert isinstance(params["size"], list) and len(params["size"]) == 1
            size = params["size"][0]

            assert min_size <= size < max_size


class TestUniformTemporalSubsample:
    @pytest.mark.parametrize(
        "inpt",
        [
            torch.zeros(10, 3, 8, 8),
            torch.zeros(1, 10, 3, 8, 8),
767
            tv_tensors.Video(torch.zeros(1, 10, 3, 8, 8)),
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
        ],
    )
    def test__transform(self, inpt):
        num_samples = 5
        transform = transforms.UniformTemporalSubsample(num_samples)

        output = transform(inpt)
        assert type(output) is type(inpt)
        assert output.shape[-4] == num_samples
        assert output.dtype == inpt.dtype


# TODO: remove this test in 0.17 when the default of antialias changes to True
def test_antialias_warning():
    pil_img = PIL.Image.new("RGB", size=(10, 10), color=127)
    tensor_img = torch.randint(0, 256, size=(3, 10, 10), dtype=torch.uint8)
    tensor_video = torch.randint(0, 256, size=(2, 3, 10, 10), dtype=torch.uint8)

    match = "The default value of the antialias parameter"
    with pytest.warns(UserWarning, match=match):
        transforms.RandomResizedCrop((20, 20))(tensor_img)
    with pytest.warns(UserWarning, match=match):
        transforms.ScaleJitter((20, 20))(tensor_img)
    with pytest.warns(UserWarning, match=match):
        transforms.RandomShortestSize((20, 20))(tensor_img)
    with pytest.warns(UserWarning, match=match):
        transforms.RandomResize(10, 20)(tensor_img)

    with pytest.warns(UserWarning, match=match):
797
        F.resized_crop(tv_tensors.Image(tensor_img), 0, 0, 10, 10, (20, 20))
798
799

    with pytest.warns(UserWarning, match=match):
800
        F.resize(tv_tensors.Video(tensor_video), (20, 20))
801
    with pytest.warns(UserWarning, match=match):
802
        F.resized_crop(tv_tensors.Video(tensor_video), 0, 0, 10, 10, (20, 20))
803
804
805
806
807
808
809
810
811
812
813
814
815

    with warnings.catch_warnings():
        warnings.simplefilter("error")
        transforms.RandomResizedCrop((20, 20))(pil_img)
        transforms.ScaleJitter((20, 20))(pil_img)
        transforms.RandomShortestSize((20, 20))(pil_img)
        transforms.RandomResize(10, 20)(pil_img)

        transforms.RandomResizedCrop((20, 20), antialias=True)(tensor_img)
        transforms.ScaleJitter((20, 20), antialias=True)(tensor_img)
        transforms.RandomShortestSize((20, 20), antialias=True)(tensor_img)
        transforms.RandomResize(10, 20, antialias=True)(tensor_img)

816
817
        F.resized_crop(tv_tensors.Image(tensor_img), 0, 0, 10, 10, (20, 20), antialias=True)
        F.resized_crop(tv_tensors.Video(tensor_video), 0, 0, 10, 10, (20, 20), antialias=True)
818
819


820
@pytest.mark.parametrize("image_type", (PIL.Image, torch.Tensor, tv_tensors.Image))
821
822
@pytest.mark.parametrize("label_type", (torch.Tensor, int))
@pytest.mark.parametrize("dataset_return_type", (dict, tuple))
823
@pytest.mark.parametrize("to_tensor", (transforms.ToTensor, transforms.ToImage))
824
825
def test_classif_preset(image_type, label_type, dataset_return_type, to_tensor):

826
    image = tv_tensors.Image(torch.randint(0, 256, size=(1, 3, 250, 250), dtype=torch.uint8))
827
828
829
830
    if image_type is PIL.Image:
        image = to_pil_image(image[0])
    elif image_type is torch.Tensor:
        image = image.as_subclass(torch.Tensor)
831
        assert is_pure_tensor(image)
832
833
834
835
836
837
838
839
840
841
842

    label = 1 if label_type is int else torch.tensor([1])

    if dataset_return_type is dict:
        sample = {
            "image": image,
            "label": label,
        }
    else:
        sample = image, label

843
844
845
846
847
848
    if to_tensor is transforms.ToTensor:
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            to_tensor = to_tensor()
    else:
        to_tensor = to_tensor()

849
850
    t = transforms.Compose(
        [
851
            transforms.RandomResizedCrop((224, 224), antialias=True),
852
853
854
855
856
            transforms.RandomHorizontalFlip(p=1),
            transforms.RandAugment(),
            transforms.TrivialAugmentWide(),
            transforms.AugMix(),
            transforms.AutoAugment(),
857
            to_tensor,
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
            # TODO: ConvertImageDtype is a pass-through on PIL images, is that
            # intended?  This results in a failure if we convert to tensor after
            # it, because the image would still be uint8 which make Normalize
            # fail.
            transforms.ConvertImageDtype(torch.float),
            transforms.Normalize(mean=[0, 0, 0], std=[1, 1, 1]),
            transforms.RandomErasing(p=1),
        ]
    )

    out = t(sample)

    assert type(out) == type(sample)

    if dataset_return_type is tuple:
        out_image, out_label = out
    else:
        assert out.keys() == sample.keys()
        out_image, out_label = out.values()

    assert out_image.shape[-2:] == (224, 224)
    assert out_label == label


882
@pytest.mark.parametrize("image_type", (PIL.Image, torch.Tensor, tv_tensors.Image))
883
@pytest.mark.parametrize("data_augmentation", ("hflip", "lsj", "multiscale", "ssd", "ssdlite"))
884
@pytest.mark.parametrize("to_tensor", (transforms.ToTensor, transforms.ToImage))
885
886
887
@pytest.mark.parametrize("sanitize", (True, False))
def test_detection_preset(image_type, data_augmentation, to_tensor, sanitize):
    torch.manual_seed(0)
888
889
890
891
892
893
894

    if to_tensor is transforms.ToTensor:
        with pytest.warns(UserWarning, match="deprecated and will be removed"):
            to_tensor = to_tensor()
    else:
        to_tensor = to_tensor()

895
896
897
    if data_augmentation == "hflip":
        t = [
            transforms.RandomHorizontalFlip(p=1),
898
            to_tensor,
899
900
901
902
903
904
905
906
907
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "lsj":
        t = [
            transforms.ScaleJitter(target_size=(1024, 1024), antialias=True),
            # Note: replaced FixedSizeCrop with RandomCrop, becuase we're
            # leaving FixedSizeCrop in prototype for now, and it expects Label
            # classes which we won't release yet.
            # transforms.FixedSizeCrop(
908
            #     size=(1024, 1024), fill=defaultdict(lambda: (123.0, 117.0, 104.0), {tv_tensors.Mask: 0})
909
910
911
            # ),
            transforms.RandomCrop((1024, 1024), pad_if_needed=True),
            transforms.RandomHorizontalFlip(p=1),
912
            to_tensor,
913
914
915
916
917
918
919
920
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "multiscale":
        t = [
            transforms.RandomShortestSize(
                min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333, antialias=True
            ),
            transforms.RandomHorizontalFlip(p=1),
921
            to_tensor,
922
923
924
925
926
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "ssd":
        t = [
            transforms.RandomPhotometricDistort(p=1),
927
            transforms.RandomZoomOut(fill={"others": (123.0, 117.0, 104.0), tv_tensors.Mask: 0}, p=1),
928
929
            transforms.RandomIoUCrop(),
            transforms.RandomHorizontalFlip(p=1),
930
            to_tensor,
931
932
933
934
935
936
            transforms.ConvertImageDtype(torch.float),
        ]
    elif data_augmentation == "ssdlite":
        t = [
            transforms.RandomIoUCrop(),
            transforms.RandomHorizontalFlip(p=1),
937
            to_tensor,
938
939
940
            transforms.ConvertImageDtype(torch.float),
        ]
    if sanitize:
941
        t += [transforms.SanitizeBoundingBoxes()]
942
943
944
945
946
    t = transforms.Compose(t)

    num_boxes = 5
    H = W = 250

947
    image = tv_tensors.Image(torch.randint(0, 256, size=(1, 3, H, W), dtype=torch.uint8))
948
949
950
951
    if image_type is PIL.Image:
        image = to_pil_image(image[0])
    elif image_type is torch.Tensor:
        image = image.as_subclass(torch.Tensor)
952
        assert is_pure_tensor(image)
953
954
955
956
957
958

    label = torch.randint(0, 10, size=(num_boxes,))

    boxes = torch.randint(0, min(H, W) // 2, size=(num_boxes, 4))
    boxes[:, 2:] += boxes[:, :2]
    boxes = boxes.clamp(min=0, max=min(H, W))
959
    boxes = tv_tensors.BoundingBoxes(boxes, format="XYXY", canvas_size=(H, W))
960

961
    masks = tv_tensors.Mask(torch.randint(0, 2, size=(num_boxes, H, W), dtype=torch.uint8))
962
963
964
965
966
967
968
969
970
971

    sample = {
        "image": image,
        "label": label,
        "boxes": boxes,
        "masks": masks,
    }

    out = t(sample)

972
    if isinstance(to_tensor, transforms.ToTensor) and image_type is not tv_tensors.Image:
973
        assert is_pure_tensor(out["image"])
974
    else:
975
        assert isinstance(out["image"], tv_tensors.Image)
976
977
978
979
980
981
    assert isinstance(out["label"], type(sample["label"]))

    num_boxes_expected = {
        # ssd and ssdlite contain RandomIoUCrop which may "remove" some bbox. It
        # doesn't remove them strictly speaking, it just marks some boxes as
        # degenerate and those boxes will be later removed by
982
        # SanitizeBoundingBoxes(), which we add to the pipelines if the sanitize
983
984
985
        # param is True.
        # Note that the values below are probably specific to the random seed
        # set above (which is fine).
986
        (True, "ssd"): 5,
987
988
989
990
991
992
993
        (True, "ssdlite"): 4,
    }.get((sanitize, data_augmentation), num_boxes)

    assert out["boxes"].shape[0] == out["masks"].shape[0] == out["label"].shape[0] == num_boxes_expected


@pytest.mark.parametrize("min_size", (1, 10))
994
@pytest.mark.parametrize("labels_getter", ("default", lambda inputs: inputs["labels"], None, lambda inputs: None))
995
996
997
998
999
1000
1001
1002
@pytest.mark.parametrize("sample_type", (tuple, dict))
def test_sanitize_bounding_boxes(min_size, labels_getter, sample_type):

    if sample_type is tuple and not isinstance(labels_getter, str):
        # The "lambda inputs: inputs["labels"]" labels_getter used in this test
        # doesn't work if the input is a tuple.
        return

1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
    H, W = 256, 128

    boxes_and_validity = [
        ([0, 1, 10, 1], False),  # Y1 == Y2
        ([0, 1, 0, 20], False),  # X1 == X2
        ([0, 0, min_size - 1, 10], False),  # H < min_size
        ([0, 0, 10, min_size - 1], False),  # W < min_size
        ([0, 0, 10, H + 1], False),  # Y2 > H
        ([0, 0, W + 1, 10], False),  # X2 > W
        ([-1, 1, 10, 20], False),  # any < 0
        ([0, 0, -1, 20], False),  # any < 0
        ([0, 0, -10, -1], False),  # any < 0
        ([0, 0, min_size, 10], True),  # H < min_size
        ([0, 0, 10, min_size], True),  # W < min_size
        ([0, 0, W, H], True),  # TODO: Is that actually OK?? Should it be -1?
        ([1, 1, 30, 20], True),
        ([0, 0, 10, 10], True),
        ([1, 1, 30, 20], True),
    ]

    random.shuffle(boxes_and_validity)  # For test robustness: mix order of wrong and correct cases
    boxes, is_valid_mask = zip(*boxes_and_validity)
    valid_indices = [i for (i, is_valid) in enumerate(is_valid_mask) if is_valid]

    boxes = torch.tensor(boxes)
    labels = torch.arange(boxes.shape[0])

1030
    boxes = tv_tensors.BoundingBoxes(
1031
        boxes,
1032
        format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
1033
        canvas_size=(H, W),
1034
1035
    )

1036
    masks = tv_tensors.Mask(torch.randint(0, 2, size=(boxes.shape[0], H, W)))
1037
1038
    whatever = torch.rand(10)
    input_img = torch.randint(0, 256, size=(1, 3, H, W), dtype=torch.uint8)
1039
    sample = {
1040
        "image": input_img,
1041
1042
        "labels": labels,
        "boxes": boxes,
1043
        "whatever": whatever,
1044
1045
1046
1047
        "None": None,
        "masks": masks,
    }

1048
1049
1050
1051
    if sample_type is tuple:
        img = sample.pop("image")
        sample = (img, sample)

1052
    out = transforms.SanitizeBoundingBoxes(min_size=min_size, labels_getter=labels_getter)(sample)
1053

1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
    if sample_type is tuple:
        out_image = out[0]
        out_labels = out[1]["labels"]
        out_boxes = out[1]["boxes"]
        out_masks = out[1]["masks"]
        out_whatever = out[1]["whatever"]
    else:
        out_image = out["image"]
        out_labels = out["labels"]
        out_boxes = out["boxes"]
        out_masks = out["masks"]
        out_whatever = out["whatever"]

    assert out_image is input_img
    assert out_whatever is whatever
1069

1070
1071
    assert isinstance(out_boxes, tv_tensors.BoundingBoxes)
    assert isinstance(out_masks, tv_tensors.Mask)
1072

1073
    if labels_getter is None or (callable(labels_getter) and labels_getter({"labels": "blah"}) is None):
1074
        assert out_labels is labels
1075
    else:
1076
1077
        assert isinstance(out_labels, torch.Tensor)
        assert out_boxes.shape[0] == out_labels.shape[0] == out_masks.shape[0]
1078
        # This works because we conveniently set labels to arange(num_boxes)
1079
        assert out_labels.tolist() == valid_indices
1080
1081


1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
def test_sanitize_bounding_boxes_no_label():
    # Non-regression test for https://github.com/pytorch/vision/issues/7878

    img = make_image()
    boxes = make_bounding_boxes()

    with pytest.raises(ValueError, match="or a two-tuple whose second item is a dict"):
        transforms.SanitizeBoundingBoxes()(img, boxes)

    out_img, out_boxes = transforms.SanitizeBoundingBoxes(labels_getter=None)(img, boxes)
1092
1093
    assert isinstance(out_img, tv_tensors.Image)
    assert isinstance(out_boxes, tv_tensors.BoundingBoxes)
1094
1095


1096
1097
def test_sanitize_bounding_boxes_errors():

1098
    good_bbox = tv_tensors.BoundingBoxes(
1099
        [[0, 0, 10, 10]],
1100
        format=tv_tensors.BoundingBoxFormat.XYXY,
Philip Meier's avatar
Philip Meier committed
1101
        canvas_size=(20, 20),
1102
1103
1104
    )

    with pytest.raises(ValueError, match="min_size must be >= 1"):
1105
        transforms.SanitizeBoundingBoxes(min_size=0)
1106
    with pytest.raises(ValueError, match="labels_getter should either be 'default'"):
1107
        transforms.SanitizeBoundingBoxes(labels_getter=12)
1108
1109
1110

    with pytest.raises(ValueError, match="Could not infer where the labels are"):
        bad_labels_key = {"bbox": good_bbox, "BAD_KEY": torch.arange(good_bbox.shape[0])}
1111
        transforms.SanitizeBoundingBoxes()(bad_labels_key)
1112
1113
1114

    with pytest.raises(ValueError, match="must be a tensor"):
        not_a_tensor = {"bbox": good_bbox, "labels": torch.arange(good_bbox.shape[0]).tolist()}
1115
        transforms.SanitizeBoundingBoxes()(not_a_tensor)
1116
1117
1118

    with pytest.raises(ValueError, match="Number of boxes"):
        different_sizes = {"bbox": good_bbox, "labels": torch.arange(good_bbox.shape[0] + 3)}
1119
        transforms.SanitizeBoundingBoxes()(different_sizes)
1120

1121

Philip Meier's avatar
Philip Meier committed
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
class TestLambda:
    inputs = pytest.mark.parametrize("input", [object(), torch.empty(()), np.empty(()), "string", 1, 0.0])

    @inputs
    def test_default(self, input):
        was_applied = False

        def was_applied_fn(input):
            nonlocal was_applied
            was_applied = True
            return input

        transform = transforms.Lambda(was_applied_fn)

        transform(input)

        assert was_applied

    @inputs
    def test_with_types(self, input):
        was_applied = False

        def was_applied_fn(input):
            nonlocal was_applied
            was_applied = True
            return input

        types = (torch.Tensor, np.ndarray)
        transform = transforms.Lambda(was_applied_fn, *types)

        transform(input)

        assert was_applied is isinstance(input, types)