test_functional_tensor.py 48.9 KB
Newer Older
1
import colorsys
2
import itertools
3
import math
4
5
6
import os
from functools import partial
from typing import Dict, List, Sequence, Tuple
7

vfdev's avatar
vfdev committed
8
import numpy as np
9
import pytest
vfdev's avatar
vfdev committed
10
import torch
11
import torchvision.transforms as T
12
13
14
import torchvision.transforms.functional as F
import torchvision.transforms.functional_pil as F_pil
import torchvision.transforms.functional_tensor as F_t
Nicolas Hug's avatar
Nicolas Hug committed
15
16
17
18
19
20
21
22
from common_utils import (
    cpu_and_gpu,
    needs_cuda,
    _create_data,
    _create_data_batch,
    _assert_equal_tensor_to_pil,
    _assert_approx_equal_tensor_to_pil,
    _test_fn_on_batch,
23
    assert_equal,
Nicolas Hug's avatar
Nicolas Hug committed
24
)
25
from torchvision.transforms import InterpolationMode
26

27

28
NEAREST, BILINEAR, BICUBIC = InterpolationMode.NEAREST, InterpolationMode.BILINEAR, InterpolationMode.BICUBIC
29
30


31
32
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("fn", [F.get_image_size, F.get_image_num_channels])
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def test_image_sizes(device, fn):
    script_F = torch.jit.script(fn)

    img_tensor, pil_img = _create_data(16, 18, 3, device=device)
    value_img = fn(img_tensor)
    value_pil_img = fn(pil_img)
    assert value_img == value_pil_img

    value_img_script = script_F(img_tensor)
    assert value_img == value_img_script

    batch_tensors = _create_data_batch(16, 18, 3, num_samples=4, device=device)
    value_img_batch = fn(batch_tensors)
    assert value_img == value_img_batch


49
50
51
52
53
54
55
56
@needs_cuda
def test_scale_channel():
    """Make sure that _scale_channel gives the same results on CPU and GPU as
    histc or bincount are used depending on the device.
    """
    # TODO: when # https://github.com/pytorch/pytorch/issues/53194 is fixed,
    # only use bincount and remove that test.
    size = (1_000,)
57
    img_chan = torch.randint(0, 256, size=size).to("cpu")
58
    scaled_cpu = F_t._scale_channel(img_chan)
59
60
    scaled_cuda = F_t._scale_channel(img_chan.to("cuda"))
    assert_equal(scaled_cpu, scaled_cuda.to("cpu"))
61

62

63
64
65
66
67
68
class TestRotate:

    ALL_DTYPES = [None, torch.float32, torch.float64, torch.float16]
    scripted_rotate = torch.jit.script(F.rotate)
    IMG_W = 26

69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
    @pytest.mark.parametrize("device", cpu_and_gpu())
    @pytest.mark.parametrize("height, width", [(26, IMG_W), (32, IMG_W)])
    @pytest.mark.parametrize(
        "center",
        [
            None,
            (int(IMG_W * 0.3), int(IMG_W * 0.4)),
            [int(IMG_W * 0.5), int(IMG_W * 0.6)],
        ],
    )
    @pytest.mark.parametrize("dt", ALL_DTYPES)
    @pytest.mark.parametrize("angle", range(-180, 180, 17))
    @pytest.mark.parametrize("expand", [True, False])
    @pytest.mark.parametrize(
        "fill",
        [
            None,
            [0, 0, 0],
            (1, 2, 3),
            [255, 255, 255],
            [
                1,
            ],
            (2.0,),
        ],
    )
    @pytest.mark.parametrize("fn", [F.rotate, scripted_rotate])
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
    def test_rotate(self, device, height, width, center, dt, angle, expand, fill, fn):
        tensor, pil_img = _create_data(height, width, device=device)

        if dt == torch.float16 and torch.device(device).type == "cpu":
            # skip float16 on CPU case
            return

        if dt is not None:
            tensor = tensor.to(dtype=dt)

        f_pil = int(fill[0]) if fill is not None and len(fill) == 1 else fill
        out_pil_img = F.rotate(pil_img, angle=angle, interpolation=NEAREST, expand=expand, center=center, fill=f_pil)
        out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))

        out_tensor = fn(tensor, angle=angle, interpolation=NEAREST, expand=expand, center=center, fill=fill).cpu()

        if out_tensor.dtype != torch.uint8:
            out_tensor = out_tensor.to(torch.uint8)

        assert out_tensor.shape == out_pil_tensor.shape, (
116
117
            f"{(height, width, NEAREST, dt, angle, expand, center)}: " f"{out_tensor.shape} vs {out_pil_tensor.shape}"
        )
118
119
120
121
122
123
124

        num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
        # Tolerance : less than 3% of different pixels
        assert ratio_diff_pixels < 0.03, (
            f"{(height, width, NEAREST, dt, angle, expand, center, fill)}: "
            f"{ratio_diff_pixels}\n{out_tensor[0, :7, :7]} vs \n"
125
126
            f"{out_pil_tensor[0, :7, :7]}"
        )
127

128
129
    @pytest.mark.parametrize("device", cpu_and_gpu())
    @pytest.mark.parametrize("dt", ALL_DTYPES)
130
131
132
133
134
135
136
137
138
139
    def test_rotate_batch(self, device, dt):
        if dt == torch.float16 and device == "cpu":
            # skip float16 on CPU case
            return

        batch_tensors = _create_data_batch(26, 36, num_samples=4, device=device)
        if dt is not None:
            batch_tensors = batch_tensors.to(dtype=dt)

        center = (20, 22)
140
        _test_fn_on_batch(batch_tensors, F.rotate, angle=32, interpolation=NEAREST, expand=True, center=center)
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

    def test_rotate_deprecation_resample(self):
        tensor, _ = _create_data(26, 26)
        # assert deprecation warning and non-BC
        with pytest.warns(UserWarning, match=r"Argument resample is deprecated and will be removed"):
            res1 = F.rotate(tensor, 45, resample=2)
            res2 = F.rotate(tensor, 45, interpolation=BILINEAR)
            assert_equal(res1, res2)

    def test_rotate_interpolation_type(self):
        tensor, _ = _create_data(26, 26)
        # assert changed type warning
        with pytest.warns(UserWarning, match=r"Argument interpolation should be of type InterpolationMode"):
            res1 = F.rotate(tensor, 45, interpolation=2)
            res2 = F.rotate(tensor, 45, interpolation=BILINEAR)
            assert_equal(res1, res2)


159
160
161
162
163
class TestAffine:

    ALL_DTYPES = [None, torch.float32, torch.float64, torch.float16]
    scripted_affine = torch.jit.script(F.affine)

164
165
166
    @pytest.mark.parametrize("device", cpu_and_gpu())
    @pytest.mark.parametrize("height, width", [(26, 26), (32, 26)])
    @pytest.mark.parametrize("dt", ALL_DTYPES)
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
    def test_identity_map(self, device, height, width, dt):
        # Tests on square and rectangular images
        tensor, pil_img = _create_data(height, width, device=device)

        if dt == torch.float16 and device == "cpu":
            # skip float16 on CPU case
            return

        if dt is not None:
            tensor = tensor.to(dtype=dt)

        # 1) identity map
        out_tensor = F.affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST)

        assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]))
        out_tensor = self.scripted_affine(
            tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
        )
        assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]))

187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
    @pytest.mark.parametrize("device", cpu_and_gpu())
    @pytest.mark.parametrize("height, width", [(26, 26)])
    @pytest.mark.parametrize("dt", ALL_DTYPES)
    @pytest.mark.parametrize(
        "angle, config",
        [
            (90, {"k": 1, "dims": (-1, -2)}),
            (45, None),
            (30, None),
            (-30, None),
            (-45, None),
            (-90, {"k": -1, "dims": (-1, -2)}),
            (180, {"k": 2, "dims": (-1, -2)}),
        ],
    )
    @pytest.mark.parametrize("fn", [F.affine, scripted_affine])
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
    def test_square_rotations(self, device, height, width, dt, angle, config, fn):
        # 2) Test rotation
        tensor, pil_img = _create_data(height, width, device=device)

        if dt == torch.float16 and device == "cpu":
            # skip float16 on CPU case
            return

        if dt is not None:
            tensor = tensor.to(dtype=dt)

        out_pil_img = F.affine(
            pil_img, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
        )
        out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))).to(device)

219
        out_tensor = fn(tensor, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST)
220
        if config is not None:
221
            assert_equal(torch.rot90(tensor, **config), out_tensor)
222
223
224
225
226
227
228
229
230
231
232

        if out_tensor.dtype != torch.uint8:
            out_tensor = out_tensor.to(torch.uint8)

        num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
        # Tolerance : less than 6% of different pixels
        assert ratio_diff_pixels < 0.06, "{}\n{} vs \n{}".format(
            ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
        )

233
234
235
236
237
    @pytest.mark.parametrize("device", cpu_and_gpu())
    @pytest.mark.parametrize("height, width", [(32, 26)])
    @pytest.mark.parametrize("dt", ALL_DTYPES)
    @pytest.mark.parametrize("angle", [90, 45, 15, -30, -60, -120])
    @pytest.mark.parametrize("fn", [F.affine, scripted_affine])
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
    def test_rect_rotations(self, device, height, width, dt, angle, fn):
        # Tests on rectangular images
        tensor, pil_img = _create_data(height, width, device=device)

        if dt == torch.float16 and device == "cpu":
            # skip float16 on CPU case
            return

        if dt is not None:
            tensor = tensor.to(dtype=dt)

        out_pil_img = F.affine(
            pil_img, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST
        )
        out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))

254
        out_tensor = fn(tensor, angle=angle, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST).cpu()
255
256
257
258
259
260
261
262
263
264
265

        if out_tensor.dtype != torch.uint8:
            out_tensor = out_tensor.to(torch.uint8)

        num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
        # Tolerance : less than 3% of different pixels
        assert ratio_diff_pixels < 0.03, "{}: {}\n{} vs \n{}".format(
            angle, ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
        )

266
267
268
269
270
    @pytest.mark.parametrize("device", cpu_and_gpu())
    @pytest.mark.parametrize("height, width", [(26, 26), (32, 26)])
    @pytest.mark.parametrize("dt", ALL_DTYPES)
    @pytest.mark.parametrize("t", [[10, 12], (-12, -13)])
    @pytest.mark.parametrize("fn", [F.affine, scripted_affine])
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
    def test_translations(self, device, height, width, dt, t, fn):
        # 3) Test translation
        tensor, pil_img = _create_data(height, width, device=device)

        if dt == torch.float16 and device == "cpu":
            # skip float16 on CPU case
            return

        if dt is not None:
            tensor = tensor.to(dtype=dt)

        out_pil_img = F.affine(pil_img, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST)

        out_tensor = fn(tensor, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST)

        if out_tensor.dtype != torch.uint8:
            out_tensor = out_tensor.to(torch.uint8)

        _assert_equal_tensor_to_pil(out_tensor, out_pil_img)

291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
    @pytest.mark.parametrize("device", cpu_and_gpu())
    @pytest.mark.parametrize("height, width", [(26, 26), (32, 26)])
    @pytest.mark.parametrize("dt", ALL_DTYPES)
    @pytest.mark.parametrize(
        "a, t, s, sh, f",
        [
            (45.5, [5, 6], 1.0, [0.0, 0.0], None),
            (33, (5, -4), 1.0, [0.0, 0.0], [0, 0, 0]),
            (45, [-5, 4], 1.2, [0.0, 0.0], (1, 2, 3)),
            (33, (-4, -8), 2.0, [0.0, 0.0], [255, 255, 255]),
            (
                85,
                (10, -10),
                0.7,
                [0.0, 0.0],
                [
                    1,
                ],
            ),
            (
                0,
                [0, 0],
                1.0,
                [
                    35.0,
                ],
                (2.0,),
            ),
            (-25, [0, 0], 1.2, [0.0, 15.0], None),
            (-45, [-10, 0], 0.7, [2.0, 5.0], None),
            (-45, [-10, -10], 1.2, [4.0, 5.0], None),
            (-90, [0, 0], 1.0, [0.0, 0.0], None),
        ],
    )
    @pytest.mark.parametrize("fn", [F.affine, scripted_affine])
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
    def test_all_ops(self, device, height, width, dt, a, t, s, sh, f, fn):
        # 4) Test rotation + translation + scale + shear
        tensor, pil_img = _create_data(height, width, device=device)

        if dt == torch.float16 and device == "cpu":
            # skip float16 on CPU case
            return

        if dt is not None:
            tensor = tensor.to(dtype=dt)

        f_pil = int(f[0]) if f is not None and len(f) == 1 else f
        out_pil_img = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh, interpolation=NEAREST, fill=f_pil)
        out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))

        out_tensor = fn(tensor, angle=a, translate=t, scale=s, shear=sh, interpolation=NEAREST, fill=f).cpu()

        if out_tensor.dtype != torch.uint8:
            out_tensor = out_tensor.to(torch.uint8)

        num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
        # Tolerance : less than 5% (cpu), 6% (cuda) of different pixels
        tol = 0.06 if device == "cuda" else 0.05
        assert ratio_diff_pixels < tol, "{}: {}\n{} vs \n{}".format(
351
            (NEAREST, a, t, s, sh, f), ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
352
353
        )

354
355
    @pytest.mark.parametrize("device", cpu_and_gpu())
    @pytest.mark.parametrize("dt", ALL_DTYPES)
356
357
358
359
360
361
362
363
364
    def test_batches(self, device, dt):
        if dt == torch.float16 and device == "cpu":
            # skip float16 on CPU case
            return

        batch_tensors = _create_data_batch(26, 36, num_samples=4, device=device)
        if dt is not None:
            batch_tensors = batch_tensors.to(dtype=dt)

365
        _test_fn_on_batch(batch_tensors, F.affine, angle=-43, translate=[-3, 4], scale=1.2, shear=[4.0, 5.0])
366

367
    @pytest.mark.parametrize("device", cpu_and_gpu())
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
    def test_warnings(self, device):
        tensor, pil_img = _create_data(26, 26, device=device)

        # assert deprecation warning and non-BC
        with pytest.warns(UserWarning, match=r"Argument resample is deprecated and will be removed"):
            res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=2)
            res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR)
            assert_equal(res1, res2)

        # assert changed type warning
        with pytest.warns(UserWarning, match=r"Argument interpolation should be of type InterpolationMode"):
            res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=2)
            res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR)
            assert_equal(res1, res2)

        with pytest.warns(UserWarning, match=r"Argument fillcolor is deprecated and will be removed"):
            res1 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fillcolor=10)
            res2 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fill=10)
            # we convert the PIL images to numpy as assert_equal doesn't work on PIL images.
            assert_equal(np.asarray(res1), np.asarray(res2))


390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
def _get_data_dims_and_points_for_perspective():
    # Ideally we would parametrize independently over data dims and points, but
    # we want to tests on some points that also depend on the data dims.
    # Pytest doesn't support covariant parametrization, so we do it somewhat manually here.

    data_dims = [(26, 34), (26, 26)]
    points = [
        [[[0, 0], [33, 0], [33, 25], [0, 25]], [[3, 2], [32, 3], [30, 24], [2, 25]]],
        [[[3, 2], [32, 3], [30, 24], [2, 25]], [[0, 0], [33, 0], [33, 25], [0, 25]]],
        [[[3, 2], [32, 3], [30, 24], [2, 25]], [[5, 5], [30, 3], [33, 19], [4, 25]]],
    ]

    dims_and_points = list(itertools.product(data_dims, points))

    # up to here, we could just have used 2 @parametrized.
    # Down below is the covarariant part as the points depend on the data dims.

    n = 10
    for dim in data_dims:
409
        points += [(dim, T.RandomPerspective.get_params(dim[1], dim[0], i / n)) for i in range(n)]
410
411
412
    return dims_and_points


413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dims_and_points", _get_data_dims_and_points_for_perspective())
@pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize(
    "fill",
    (
        None,
        [0, 0, 0],
        [1, 2, 3],
        [255, 255, 255],
        [
            1,
        ],
        (2.0,),
    ),
)
@pytest.mark.parametrize("fn", [F.perspective, torch.jit.script(F.perspective)])
Nicolas Hug's avatar
Nicolas Hug committed
430
def test_perspective_pil_vs_tensor(device, dims_and_points, dt, fill, fn):
431
432
433
434
435
436
437

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

    data_dims, (spoints, epoints) = dims_and_points

Nicolas Hug's avatar
Nicolas Hug committed
438
    tensor, pil_img = _create_data(*data_dims, device=device)
439
440
441
442
443
    if dt is not None:
        tensor = tensor.to(dtype=dt)

    interpolation = NEAREST
    fill_pil = int(fill[0]) if fill is not None and len(fill) == 1 else fill
444
445
446
    out_pil_img = F.perspective(
        pil_img, startpoints=spoints, endpoints=epoints, interpolation=interpolation, fill=fill_pil
    )
447
448
449
450
451
452
453
454
455
456
457
458
    out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
    out_tensor = fn(tensor, startpoints=spoints, endpoints=epoints, interpolation=interpolation, fill=fill).cpu()

    if out_tensor.dtype != torch.uint8:
        out_tensor = out_tensor.to(torch.uint8)

    num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
    ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
    # Tolerance : less than 5% of different pixels
    assert ratio_diff_pixels < 0.05


459
460
461
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dims_and_points", _get_data_dims_and_points_for_perspective())
@pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16])
Nicolas Hug's avatar
Nicolas Hug committed
462
def test_perspective_batch(device, dims_and_points, dt):
463
464
465
466
467
468
469

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

    data_dims, (spoints, epoints) = dims_and_points

Nicolas Hug's avatar
Nicolas Hug committed
470
    batch_tensors = _create_data_batch(*data_dims, num_samples=4, device=device)
471
472
473
474
475
476
    if dt is not None:
        batch_tensors = batch_tensors.to(dtype=dt)

    # Ignore the equivalence between scripted and regular function on float16 cuda. The pixels at
    # the border may be entirely different due to small rounding errors.
    scripted_fn_atol = -1 if (dt == torch.float16 and device == "cuda") else 1e-8
Nicolas Hug's avatar
Nicolas Hug committed
477
    _test_fn_on_batch(
478
479
480
481
482
483
        batch_tensors,
        F.perspective,
        scripted_fn_atol=scripted_fn_atol,
        startpoints=spoints,
        endpoints=epoints,
        interpolation=NEAREST,
484
485
486
    )


Nicolas Hug's avatar
Nicolas Hug committed
487
def test_perspective_interpolation_warning():
488
489
490
491
    # assert changed type warning
    spoints = [[0, 0], [33, 0], [33, 25], [0, 25]]
    epoints = [[3, 2], [32, 3], [30, 24], [2, 25]]
    tensor = torch.randint(0, 256, (3, 26, 26))
Nicolas Hug's avatar
Nicolas Hug committed
492
    with pytest.warns(UserWarning, match="Argument interpolation should be of type InterpolationMode"):
493
494
        res1 = F.perspective(tensor, startpoints=spoints, endpoints=epoints, interpolation=2)
        res2 = F.perspective(tensor, startpoints=spoints, endpoints=epoints, interpolation=BILINEAR)
Nicolas Hug's avatar
Nicolas Hug committed
495
        assert_equal(res1, res2)
496
497


498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize(
    "size",
    [
        32,
        26,
        [
            32,
        ],
        [32, 32],
        (32, 32),
        [26, 35],
    ],
)
@pytest.mark.parametrize("max_size", [None, 34, 40, 1000])
@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC, NEAREST])
Nicolas Hug's avatar
Nicolas Hug committed
515
def test_resize(device, dt, size, max_size, interpolation):
516
517
518
519
520
521
522
523
524
525

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

    if max_size is not None and isinstance(size, Sequence) and len(size) != 1:
        return  # unsupported

    torch.manual_seed(12)
    script_fn = torch.jit.script(F.resize)
Nicolas Hug's avatar
Nicolas Hug committed
526
527
    tensor, pil_img = _create_data(26, 36, device=device)
    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
528
529
530
531
532
533
534
535
536
537
538

    if dt is not None:
        # This is a trivial cast to float of uint8 data to test all cases
        tensor = tensor.to(dt)
        batch_tensors = batch_tensors.to(dt)

    resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size)
    resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size)

    assert resized_tensor.size()[1:] == resized_pil_img.size[::-1]

539
540
541
    if interpolation not in [
        NEAREST,
    ]:
542
543
544
545
546
547
548
549
550
        # We can not check values if mode = NEAREST, as results are different
        # E.g. resized_tensor  = [[a, a, b, c, d, d, e, ...]]
        # E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]]
        resized_tensor_f = resized_tensor
        # we need to cast to uint8 to compare with PIL image
        if resized_tensor_f.dtype == torch.uint8:
            resized_tensor_f = resized_tensor_f.to(torch.float)

        # Pay attention to high tolerance for MAE
Nicolas Hug's avatar
Nicolas Hug committed
551
        _assert_approx_equal_tensor_to_pil(resized_tensor_f, resized_pil_img, tol=8.0)
552
553

    if isinstance(size, int):
554
555
556
        script_size = [
            size,
        ]
557
558
559
    else:
        script_size = size

560
    resize_result = script_fn(tensor, size=script_size, interpolation=interpolation, max_size=max_size)
561
562
    assert_equal(resized_tensor, resize_result)

563
    _test_fn_on_batch(batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size)
564
565


566
@pytest.mark.parametrize("device", cpu_and_gpu())
Nicolas Hug's avatar
Nicolas Hug committed
567
def test_resize_asserts(device):
568

Nicolas Hug's avatar
Nicolas Hug committed
569
    tensor, pil_img = _create_data(26, 36, device=device)
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585

    # assert changed type warning
    with pytest.warns(UserWarning, match=r"Argument interpolation should be of type InterpolationMode"):
        res1 = F.resize(tensor, size=32, interpolation=2)

    res2 = F.resize(tensor, size=32, interpolation=BILINEAR)
    assert_equal(res1, res2)

    for img in (tensor, pil_img):
        exp_msg = "max_size should only be passed if size specifies the length of the smaller edge"
        with pytest.raises(ValueError, match=exp_msg):
            F.resize(img, size=(32, 34), max_size=35)
        with pytest.raises(ValueError, match="max_size = 32 must be strictly greater"):
            F.resize(img, size=32, max_size=32)


586
587
588
589
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize("size", [[96, 72], [96, 420], [420, 72]])
@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC])
Nicolas Hug's avatar
Nicolas Hug committed
590
def test_resize_antialias(device, dt, size, interpolation):
591
592
593
594
595

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

596
    torch.manual_seed(12)
597
    script_fn = torch.jit.script(F.resize)
Nicolas Hug's avatar
Nicolas Hug committed
598
    tensor, pil_img = _create_data(320, 290, device=device)
599
600
601
602
603
604
605
606

    if dt is not None:
        # This is a trivial cast to float of uint8 data to test all cases
        tensor = tensor.to(dt)

    resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, antialias=True)
    resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation)

Nicolas Hug's avatar
Nicolas Hug committed
607
    assert resized_tensor.size()[1:] == resized_pil_img.size[::-1]
608
609
610
611
612
613

    resized_tensor_f = resized_tensor
    # we need to cast to uint8 to compare with PIL image
    if resized_tensor_f.dtype == torch.uint8:
        resized_tensor_f = resized_tensor_f.to(torch.float)

614
    _assert_approx_equal_tensor_to_pil(resized_tensor_f, resized_pil_img, tol=0.5, msg=f"{size}, {interpolation}, {dt}")
615
616
617
618
619
620
621
622
623

    accepted_tol = 1.0 + 1e-5
    if interpolation == BICUBIC:
        # this overall mean value to make the tests pass
        # High value is mostly required for test cases with
        # downsampling and upsampling where we can not exactly
        # match PIL implementation.
        accepted_tol = 15.0

Nicolas Hug's avatar
Nicolas Hug committed
624
    _assert_approx_equal_tensor_to_pil(
625
        resized_tensor_f, resized_pil_img, tol=accepted_tol, agg_method="max", msg=f"{size}, {interpolation}, {dt}"
626
627
628
    )

    if isinstance(size, int):
629
630
631
        script_size = [
            size,
        ]
632
633
634
635
    else:
        script_size = size

    resize_result = script_fn(tensor, size=script_size, interpolation=interpolation, antialias=True)
Nicolas Hug's avatar
Nicolas Hug committed
636
    assert_equal(resized_tensor, resize_result)
637
638


639
@needs_cuda
640
@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC])
Nicolas Hug's avatar
Nicolas Hug committed
641
def test_assert_resize_antialias(interpolation):
642
643
644
645

    # Checks implementation on very large scales
    # and catch TORCH_CHECK inside interpolate_aa_kernels.cu
    torch.manual_seed(12)
Nicolas Hug's avatar
Nicolas Hug committed
646
    tensor, pil_img = _create_data(1000, 1000, device="cuda")
647
648
649
650
651

    with pytest.raises(RuntimeError, match=r"Max supported scale factor is"):
        F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True)


652
653
654
655
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dt", [torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize("size", [[10, 7], [10, 42], [42, 7]])
@pytest.mark.parametrize("interpolation", [BILINEAR, BICUBIC])
656
def test_interpolate_antialias_backward(device, dt, size, interpolation):
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

    torch.manual_seed(12)
    if interpolation == BILINEAR:
        forward_op = torch.ops.torchvision._interpolate_bilinear2d_aa
        backward_op = torch.ops.torchvision._interpolate_bilinear2d_aa_backward
    elif interpolation == BICUBIC:
        forward_op = torch.ops.torchvision._interpolate_bicubic2d_aa
        backward_op = torch.ops.torchvision._interpolate_bicubic2d_aa_backward

    class F(torch.autograd.Function):
        @staticmethod
        def forward(ctx, i):
            result = forward_op(i, size, False)
            ctx.save_for_backward(i, result)
            return result

        @staticmethod
        def backward(ctx, grad_output):
            i, result = ctx.saved_tensors
            ishape = i.shape
            oshape = result.shape[2:]
            return backward_op(grad_output, oshape, ishape, False)

684
    x = (torch.rand(1, 32, 29, 3, dtype=torch.double, device=device).permute(0, 3, 1, 2).requires_grad_(True),)
685
686
    assert torch.autograd.gradcheck(F.apply, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)

687
    x = (torch.rand(1, 3, 32, 29, dtype=torch.double, device=device, requires_grad=True),)
688
689
690
    assert torch.autograd.gradcheck(F.apply, x, eps=1e-8, atol=1e-6, rtol=1e-6, fast_mode=False)


691
692
693
def check_functional_vs_PIL_vs_scripted(
    fn, fn_pil, fn_t, config, device, dtype, channels=3, tol=2.0 + 1e-10, agg_method="max"
):
694
695
696

    script_fn = torch.jit.script(fn)
    torch.manual_seed(15)
697
698
    tensor, pil_img = _create_data(26, 34, channels=channels, device=device)
    batch_tensors = _create_data_batch(16, 18, num_samples=4, channels=channels, device=device)
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716

    if dtype is not None:
        tensor = F.convert_image_dtype(tensor, dtype)
        batch_tensors = F.convert_image_dtype(batch_tensors, dtype)

    out_fn_t = fn_t(tensor, **config)
    out_pil = fn_pil(pil_img, **config)
    out_scripted = script_fn(tensor, **config)
    assert out_fn_t.dtype == out_scripted.dtype
    assert out_fn_t.size()[1:] == out_pil.size[::-1]

    rbg_tensor = out_fn_t

    if out_fn_t.dtype != torch.uint8:
        rbg_tensor = F.convert_image_dtype(out_fn_t, torch.uint8)

    # Check that max difference does not exceed 2 in [0, 255] range
    # Exact matching is not possible due to incompatibility convert_image_dtype and PIL results
Nicolas Hug's avatar
Nicolas Hug committed
717
    _assert_approx_equal_tensor_to_pil(rbg_tensor.float(), out_pil, tol=tol, agg_method=agg_method)
718
719
720
721
722
723
724

    atol = 1e-6
    if out_fn_t.dtype == torch.uint8 and "cuda" in torch.device(device).type:
        atol = 1.0
    assert out_fn_t.allclose(out_scripted, atol=atol)

    # FIXME: fn will be scripted again in _test_fn_on_batch. We could avoid that.
Nicolas Hug's avatar
Nicolas Hug committed
725
    _test_fn_on_batch(batch_tensors, fn, scripted_fn_atol=atol, **config)
726
727


728
729
730
731
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dtype", (None, torch.float32, torch.float64))
@pytest.mark.parametrize("config", [{"brightness_factor": f} for f in (0.1, 0.5, 1.0, 1.34, 2.5)])
@pytest.mark.parametrize("channels", [1, 3])
732
def test_adjust_brightness(device, dtype, config, channels):
733
734
735
736
737
738
739
    check_functional_vs_PIL_vs_scripted(
        F.adjust_brightness,
        F_pil.adjust_brightness,
        F_t.adjust_brightness,
        config,
        device,
        dtype,
740
        channels,
741
742
743
    )


744
745
746
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dtype", (None, torch.float32, torch.float64))
@pytest.mark.parametrize("channels", [1, 3])
747
def test_invert(device, dtype, channels):
748
    check_functional_vs_PIL_vs_scripted(
749
        F.invert, F_pil.invert, F_t.invert, {}, device, dtype, channels, tol=1.0, agg_method="max"
750
751
752
    )


753
754
755
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("config", [{"bits": bits} for bits in range(0, 8)])
@pytest.mark.parametrize("channels", [1, 3])
756
def test_posterize(device, config, channels):
757
758
759
760
761
762
763
    check_functional_vs_PIL_vs_scripted(
        F.posterize,
        F_pil.posterize,
        F_t.posterize,
        config,
        device,
        dtype=None,
764
        channels=channels,
765
766
767
768
769
        tol=1.0,
        agg_method="max",
    )


770
771
772
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("config", [{"threshold": threshold} for threshold in [0, 64, 128, 192, 255]])
@pytest.mark.parametrize("channels", [1, 3])
773
def test_solarize1(device, config, channels):
774
775
776
777
778
779
780
    check_functional_vs_PIL_vs_scripted(
        F.solarize,
        F_pil.solarize,
        F_t.solarize,
        config,
        device,
        dtype=None,
781
        channels=channels,
782
783
784
785
786
        tol=1.0,
        agg_method="max",
    )


787
788
789
790
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dtype", (torch.float32, torch.float64))
@pytest.mark.parametrize("config", [{"threshold": threshold} for threshold in [0.0, 0.25, 0.5, 0.75, 1.0]])
@pytest.mark.parametrize("channels", [1, 3])
791
def test_solarize2(device, dtype, config, channels):
792
793
794
795
796
797
798
    check_functional_vs_PIL_vs_scripted(
        F.solarize,
        lambda img, threshold: F_pil.solarize(img, 255 * threshold),
        F_t.solarize,
        config,
        device,
        dtype,
799
        channels,
800
801
802
803
804
        tol=1.0,
        agg_method="max",
    )


805
806
807
808
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dtype", (None, torch.float32, torch.float64))
@pytest.mark.parametrize("config", [{"sharpness_factor": f} for f in [0.2, 0.5, 1.0, 1.5, 2.0]])
@pytest.mark.parametrize("channels", [1, 3])
809
def test_adjust_sharpness(device, dtype, config, channels):
810
811
812
813
814
815
816
    check_functional_vs_PIL_vs_scripted(
        F.adjust_sharpness,
        F_pil.adjust_sharpness,
        F_t.adjust_sharpness,
        config,
        device,
        dtype,
817
        channels,
818
819
820
    )


821
822
823
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dtype", (None, torch.float32, torch.float64))
@pytest.mark.parametrize("channels", [1, 3])
824
def test_autocontrast(device, dtype, channels):
825
    check_functional_vs_PIL_vs_scripted(
826
        F.autocontrast, F_pil.autocontrast, F_t.autocontrast, {}, device, dtype, channels, tol=1.0, agg_method="max"
827
828
829
    )


830
831
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("channels", [1, 3])
832
def test_equalize(device, channels):
833
    torch.use_deterministic_algorithms(False)
834
835
836
837
838
839
840
    check_functional_vs_PIL_vs_scripted(
        F.equalize,
        F_pil.equalize,
        F_t.equalize,
        {},
        device,
        dtype=None,
841
        channels=channels,
842
843
844
845
846
        tol=1.0,
        agg_method="max",
    )


847
848
849
850
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dtype", (None, torch.float32, torch.float64))
@pytest.mark.parametrize("config", [{"contrast_factor": f} for f in [0.2, 0.5, 1.0, 1.5, 2.0]])
@pytest.mark.parametrize("channels", [1, 3])
851
def test_adjust_contrast(device, dtype, config, channels):
852
    check_functional_vs_PIL_vs_scripted(
853
        F.adjust_contrast, F_pil.adjust_contrast, F_t.adjust_contrast, config, device, dtype, channels
854
855
856
    )


857
858
859
860
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dtype", (None, torch.float32, torch.float64))
@pytest.mark.parametrize("config", [{"saturation_factor": f} for f in [0.5, 0.75, 1.0, 1.5, 2.0]])
@pytest.mark.parametrize("channels", [1, 3])
861
def test_adjust_saturation(device, dtype, config, channels):
862
    check_functional_vs_PIL_vs_scripted(
863
        F.adjust_saturation, F_pil.adjust_saturation, F_t.adjust_saturation, config, device, dtype, channels
864
865
866
    )


867
868
869
870
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dtype", (None, torch.float32, torch.float64))
@pytest.mark.parametrize("config", [{"hue_factor": f} for f in [-0.45, -0.25, 0.0, 0.25, 0.45]])
@pytest.mark.parametrize("channels", [1, 3])
871
def test_adjust_hue(device, dtype, config, channels):
872
    check_functional_vs_PIL_vs_scripted(
873
        F.adjust_hue, F_pil.adjust_hue, F_t.adjust_hue, config, device, dtype, channels, tol=16.1, agg_method="max"
874
875
876
    )


877
878
879
880
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dtype", (None, torch.float32, torch.float64))
@pytest.mark.parametrize("config", [{"gamma": g1, "gain": g2} for g1, g2 in zip([0.8, 1.0, 1.2], [0.7, 1.0, 1.3])])
@pytest.mark.parametrize("channels", [1, 3])
881
def test_adjust_gamma(device, dtype, config, channels):
882
883
884
885
886
887
888
    check_functional_vs_PIL_vs_scripted(
        F.adjust_gamma,
        F_pil.adjust_gamma,
        F_t.adjust_gamma,
        config,
        device,
        dtype,
889
        channels,
890
891
892
    )


893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize(
    "pad",
    [
        2,
        [
            3,
        ],
        [0, 3],
        (3, 3),
        [4, 2, 4, 3],
    ],
)
@pytest.mark.parametrize(
    "config",
    [
        {"padding_mode": "constant", "fill": 0},
        {"padding_mode": "constant", "fill": 10},
        {"padding_mode": "constant", "fill": 20},
        {"padding_mode": "edge"},
        {"padding_mode": "reflect"},
        {"padding_mode": "symmetric"},
    ],
)
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
def test_pad(device, dt, pad, config):
    script_fn = torch.jit.script(F.pad)
    tensor, pil_img = _create_data(7, 8, device=device)
    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

    if dt is not None:
        # This is a trivial cast to float of uint8 data to test all cases
        tensor = tensor.to(dt)
        batch_tensors = batch_tensors.to(dt)

    pad_tensor = F_t.pad(tensor, pad, **config)
    pad_pil_img = F_pil.pad(pil_img, pad, **config)

    pad_tensor_8b = pad_tensor
    # we need to cast to uint8 to compare with PIL image
    if pad_tensor_8b.dtype != torch.uint8:
        pad_tensor_8b = pad_tensor_8b.to(torch.uint8)

    _assert_equal_tensor_to_pil(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, config))

    if isinstance(pad, int):
943
944
945
        script_pad = [
            pad,
        ]
946
947
948
949
950
951
952
953
    else:
        script_pad = pad
    pad_tensor_script = script_fn(tensor, script_pad, **config)
    assert_equal(pad_tensor, pad_tensor_script, msg="{}, {}".format(pad, config))

    _test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **config)


954
955
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("mode", [NEAREST, BILINEAR, BICUBIC])
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
def test_resized_crop(device, mode):
    # test values of F.resized_crop in several cases:
    # 1) resize to the same size, crop to the same size => should be identity
    tensor, _ = _create_data(26, 36, device=device)

    out_tensor = F.resized_crop(tensor, top=0, left=0, height=26, width=36, size=[26, 36], interpolation=mode)
    assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]))

    # 2) resize by half and crop a TL corner
    tensor, _ = _create_data(26, 36, device=device)
    out_tensor = F.resized_crop(tensor, top=0, left=0, height=20, width=30, size=[10, 15], interpolation=NEAREST)
    expected_out_tensor = tensor[:, :20:2, :30:2]
    assert_equal(
        expected_out_tensor,
        out_tensor,
        msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]),
    )

    batch_tensors = _create_data_batch(26, 36, num_samples=4, device=device)
    _test_fn_on_batch(
        batch_tensors, F.resized_crop, top=1, left=2, height=20, width=30, size=[10, 15], interpolation=NEAREST
    )


980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize(
    "func, args",
    [
        (F_t.get_image_size, ()),
        (F_t.vflip, ()),
        (F_t.hflip, ()),
        (F_t.crop, (1, 2, 4, 5)),
        (F_t.adjust_brightness, (0.0,)),
        (F_t.adjust_contrast, (1.0,)),
        (F_t.adjust_hue, (-0.5,)),
        (F_t.adjust_saturation, (2.0,)),
        (
            F_t.pad,
            (
                [
                    2,
                ],
                2,
                "constant",
            ),
        ),
        (F_t.resize, ([10, 11],)),
        (
            F_t.perspective,
            (
                [
                    0.2,
                ]
            ),
        ),
        (F_t.gaussian_blur, ((2, 2), (0.7, 0.5))),
        (F_t.invert, ()),
        (F_t.posterize, (0,)),
        (F_t.solarize, (0.3,)),
        (F_t.adjust_sharpness, (0.3,)),
        (F_t.autocontrast, ()),
        (F_t.equalize, ()),
    ],
)
1020
1021
1022
1023
1024
1025
1026
def test_assert_image_tensor(device, func, args):
    shape = (100,)
    tensor = torch.rand(*shape, dtype=torch.float, device=device)
    with pytest.raises(Exception, match=r"Tensor is not a torch image."):
        func(tensor, *args)


1027
@pytest.mark.parametrize("device", cpu_and_gpu())
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
def test_vflip(device):
    script_vflip = torch.jit.script(F.vflip)

    img_tensor, pil_img = _create_data(16, 18, device=device)
    vflipped_img = F.vflip(img_tensor)
    vflipped_pil_img = F.vflip(pil_img)
    _assert_equal_tensor_to_pil(vflipped_img, vflipped_pil_img)

    # scriptable function test
    vflipped_img_script = script_vflip(img_tensor)
    assert_equal(vflipped_img, vflipped_img_script)

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    _test_fn_on_batch(batch_tensors, F.vflip)


1044
@pytest.mark.parametrize("device", cpu_and_gpu())
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
def test_hflip(device):
    script_hflip = torch.jit.script(F.hflip)

    img_tensor, pil_img = _create_data(16, 18, device=device)
    hflipped_img = F.hflip(img_tensor)
    hflipped_pil_img = F.hflip(pil_img)
    _assert_equal_tensor_to_pil(hflipped_img, hflipped_pil_img)

    # scriptable function test
    hflipped_img_script = script_hflip(img_tensor)
    assert_equal(hflipped_img, hflipped_img_script)

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    _test_fn_on_batch(batch_tensors, F.hflip)


1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize(
    "top, left, height, width",
    [
        (1, 2, 4, 5),  # crop inside top-left corner
        (2, 12, 3, 4),  # crop inside top-right corner
        (8, 3, 5, 6),  # crop inside bottom-left corner
        (8, 11, 4, 3),  # crop inside bottom-right corner
    ],
)
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
def test_crop(device, top, left, height, width):
    script_crop = torch.jit.script(F.crop)

    img_tensor, pil_img = _create_data(16, 18, device=device)

    pil_img_cropped = F.crop(pil_img, top, left, height, width)

    img_tensor_cropped = F.crop(img_tensor, top, left, height, width)
    _assert_equal_tensor_to_pil(img_tensor_cropped, pil_img_cropped)

    img_tensor_cropped = script_crop(img_tensor, top, left, height, width)
    _assert_equal_tensor_to_pil(img_tensor_cropped, pil_img_cropped)

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    _test_fn_on_batch(batch_tensors, F.crop, top=top, left=left, height=height, width=width)


1088
1089
1090
1091
1092
1093
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("image_size", ("small", "large"))
@pytest.mark.parametrize("dt", [None, torch.float32, torch.float64, torch.float16])
@pytest.mark.parametrize("ksize", [(3, 3), [3, 5], (23, 23)])
@pytest.mark.parametrize("sigma", [[0.5, 0.5], (0.5, 0.5), (0.8, 0.8), (1.7, 1.7)])
@pytest.mark.parametrize("fn", [F.gaussian_blur, torch.jit.script(F.gaussian_blur)])
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
def test_gaussian_blur(device, image_size, dt, ksize, sigma, fn):

    # true_cv2_results = {
    #     # np_img = np.arange(3 * 10 * 12, dtype="uint8").reshape((10, 12, 3))
    #     # cv2.GaussianBlur(np_img, ksize=(3, 3), sigmaX=0.8)
    #     "3_3_0.8": ...
    #     # cv2.GaussianBlur(np_img, ksize=(3, 3), sigmaX=0.5)
    #     "3_3_0.5": ...
    #     # cv2.GaussianBlur(np_img, ksize=(3, 5), sigmaX=0.8)
    #     "3_5_0.8": ...
    #     # cv2.GaussianBlur(np_img, ksize=(3, 5), sigmaX=0.5)
    #     "3_5_0.5": ...
    #     # np_img2 = np.arange(26 * 28, dtype="uint8").reshape((26, 28))
    #     # cv2.GaussianBlur(np_img2, ksize=(23, 23), sigmaX=1.7)
    #     "23_23_1.7": ...
    # }
1110
    p = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "gaussian_blur_opencv_results.pt")
1111
1112
    true_cv2_results = torch.load(p)

1113
1114
1115
1116
    if image_size == "small":
        tensor = (
            torch.from_numpy(np.arange(3 * 10 * 12, dtype="uint8").reshape((10, 12, 3))).permute(2, 0, 1).to(device)
        )
1117
    else:
1118
        tensor = torch.from_numpy(np.arange(26 * 28, dtype="uint8").reshape((1, 26, 28))).to(device)
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129

    if dt == torch.float16 and device == "cpu":
        # skip float16 on CPU case
        return

    if dt is not None:
        tensor = tensor.to(dtype=dt)

    _ksize = (ksize, ksize) if isinstance(ksize, int) else ksize
    _sigma = sigma[0] if sigma is not None else None
    shape = tensor.shape
1130
    gt_key = "{}_{}_{}__{}_{}_{}".format(shape[-2], shape[-1], shape[-3], _ksize[0], _ksize[1], _sigma)
1131
1132
1133
    if gt_key not in true_cv2_results:
        return

1134
1135
1136
    true_out = (
        torch.tensor(true_cv2_results[gt_key]).reshape(shape[-2], shape[-1], shape[-3]).permute(2, 0, 1).to(tensor)
    )
1137
1138

    out = fn(tensor, kernel_size=ksize, sigma=sigma)
1139
    torch.testing.assert_close(out, true_out, rtol=0.0, atol=1.0, msg="{}, {}".format(ksize, sigma))
1140
1141


1142
@pytest.mark.parametrize("device", cpu_and_gpu())
1143
1144
1145
1146
1147
1148
1149
1150
def test_hsv2rgb(device):
    scripted_fn = torch.jit.script(F_t._hsv2rgb)
    shape = (3, 100, 150)
    for _ in range(10):
        hsv_img = torch.rand(*shape, dtype=torch.float, device=device)
        rgb_img = F_t._hsv2rgb(hsv_img)
        ft_img = rgb_img.permute(1, 2, 0).flatten(0, 1)

1151
1152
1153
1154
1155
        (
            h,
            s,
            v,
        ) = hsv_img.unbind(0)
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
        h = h.flatten().cpu().numpy()
        s = s.flatten().cpu().numpy()
        v = v.flatten().cpu().numpy()

        rgb = []
        for h1, s1, v1 in zip(h, s, v):
            rgb.append(colorsys.hsv_to_rgb(h1, s1, v1))
        colorsys_img = torch.tensor(rgb, dtype=torch.float32, device=device)
        torch.testing.assert_close(ft_img, colorsys_img, rtol=0.0, atol=1e-5)

        s_rgb_img = scripted_fn(hsv_img)
        torch.testing.assert_close(rgb_img, s_rgb_img)

    batch_tensors = _create_data_batch(120, 100, num_samples=4, device=device).float()
    _test_fn_on_batch(batch_tensors, F_t._hsv2rgb)


1173
@pytest.mark.parametrize("device", cpu_and_gpu())
1174
1175
1176
1177
1178
1179
1180
1181
def test_rgb2hsv(device):
    scripted_fn = torch.jit.script(F_t._rgb2hsv)
    shape = (3, 150, 100)
    for _ in range(10):
        rgb_img = torch.rand(*shape, dtype=torch.float, device=device)
        hsv_img = F_t._rgb2hsv(rgb_img)
        ft_hsv_img = hsv_img.permute(1, 2, 0).flatten(0, 1)

1182
1183
1184
1185
1186
        (
            r,
            g,
            b,
        ) = rgb_img.unbind(dim=-3)
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
        r = r.flatten().cpu().numpy()
        g = g.flatten().cpu().numpy()
        b = b.flatten().cpu().numpy()

        hsv = []
        for r1, g1, b1 in zip(r, g, b):
            hsv.append(colorsys.rgb_to_hsv(r1, g1, b1))

        colorsys_img = torch.tensor(hsv, dtype=torch.float32, device=device)

        ft_hsv_img_h, ft_hsv_img_sv = torch.split(ft_hsv_img, [1, 2], dim=1)
        colorsys_img_h, colorsys_img_sv = torch.split(colorsys_img, [1, 2], dim=1)

        max_diff_h = ((colorsys_img_h * 2 * math.pi).sin() - (ft_hsv_img_h * 2 * math.pi).sin()).abs().max()
        max_diff_sv = (colorsys_img_sv - ft_hsv_img_sv).abs().max()
        max_diff = max(max_diff_h, max_diff_sv)
        assert max_diff < 1e-5

        s_hsv_img = scripted_fn(rgb_img)
        torch.testing.assert_close(hsv_img, s_hsv_img, rtol=1e-5, atol=1e-7)

    batch_tensors = _create_data_batch(120, 100, num_samples=4, device=device).float()
    _test_fn_on_batch(batch_tensors, F_t._rgb2hsv)


1212
1213
@pytest.mark.parametrize("device", cpu_and_gpu())
@pytest.mark.parametrize("num_output_channels", (3, 1))
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
def test_rgb_to_grayscale(device, num_output_channels):
    script_rgb_to_grayscale = torch.jit.script(F.rgb_to_grayscale)

    img_tensor, pil_img = _create_data(32, 34, device=device)

    gray_pil_image = F.rgb_to_grayscale(pil_img, num_output_channels=num_output_channels)
    gray_tensor = F.rgb_to_grayscale(img_tensor, num_output_channels=num_output_channels)

    _assert_approx_equal_tensor_to_pil(gray_tensor.float(), gray_pil_image, tol=1.0 + 1e-10, agg_method="max")

    s_gray_tensor = script_rgb_to_grayscale(img_tensor, num_output_channels=num_output_channels)
    assert_equal(s_gray_tensor, gray_tensor)

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    _test_fn_on_batch(batch_tensors, F.rgb_to_grayscale, num_output_channels=num_output_channels)


1231
@pytest.mark.parametrize("device", cpu_and_gpu())
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
def test_center_crop(device):
    script_center_crop = torch.jit.script(F.center_crop)

    img_tensor, pil_img = _create_data(32, 34, device=device)

    cropped_pil_image = F.center_crop(pil_img, [10, 11])

    cropped_tensor = F.center_crop(img_tensor, [10, 11])
    _assert_equal_tensor_to_pil(cropped_tensor, cropped_pil_image)

    cropped_tensor = script_center_crop(img_tensor, [10, 11])
    _assert_equal_tensor_to_pil(cropped_tensor, cropped_pil_image)

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    _test_fn_on_batch(batch_tensors, F.center_crop, output_size=[10, 11])


1249
@pytest.mark.parametrize("device", cpu_and_gpu())
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
def test_five_crop(device):
    script_five_crop = torch.jit.script(F.five_crop)

    img_tensor, pil_img = _create_data(32, 34, device=device)

    cropped_pil_images = F.five_crop(pil_img, [10, 11])

    cropped_tensors = F.five_crop(img_tensor, [10, 11])
    for i in range(5):
        _assert_equal_tensor_to_pil(cropped_tensors[i], cropped_pil_images[i])

    cropped_tensors = script_five_crop(img_tensor, [10, 11])
    for i in range(5):
        _assert_equal_tensor_to_pil(cropped_tensors[i], cropped_pil_images[i])

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    tuple_transformed_batches = F.five_crop(batch_tensors, [10, 11])
    for i in range(len(batch_tensors)):
        img_tensor = batch_tensors[i, ...]
        tuple_transformed_imgs = F.five_crop(img_tensor, [10, 11])
        assert len(tuple_transformed_imgs) == len(tuple_transformed_batches)

        for j in range(len(tuple_transformed_imgs)):
            true_transformed_img = tuple_transformed_imgs[j]
            transformed_img = tuple_transformed_batches[j][i, ...]
            assert_equal(true_transformed_img, transformed_img)

    # scriptable function test
    s_tuple_transformed_batches = script_five_crop(batch_tensors, [10, 11])
    for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches):
        assert_equal(transformed_batch, s_transformed_batch)


1283
@pytest.mark.parametrize("device", cpu_and_gpu())
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
def test_ten_crop(device):
    script_ten_crop = torch.jit.script(F.ten_crop)

    img_tensor, pil_img = _create_data(32, 34, device=device)

    cropped_pil_images = F.ten_crop(pil_img, [10, 11])

    cropped_tensors = F.ten_crop(img_tensor, [10, 11])
    for i in range(10):
        _assert_equal_tensor_to_pil(cropped_tensors[i], cropped_pil_images[i])

    cropped_tensors = script_ten_crop(img_tensor, [10, 11])
    for i in range(10):
        _assert_equal_tensor_to_pil(cropped_tensors[i], cropped_pil_images[i])

    batch_tensors = _create_data_batch(16, 18, num_samples=4, device=device)
    tuple_transformed_batches = F.ten_crop(batch_tensors, [10, 11])
    for i in range(len(batch_tensors)):
        img_tensor = batch_tensors[i, ...]
        tuple_transformed_imgs = F.ten_crop(img_tensor, [10, 11])
        assert len(tuple_transformed_imgs) == len(tuple_transformed_batches)

        for j in range(len(tuple_transformed_imgs)):
            true_transformed_img = tuple_transformed_imgs[j]
            transformed_img = tuple_transformed_batches[j][i, ...]
            assert_equal(true_transformed_img, transformed_img)

    # scriptable function test
    s_tuple_transformed_batches = script_ten_crop(batch_tensors, [10, 11])
    for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches):
        assert_equal(transformed_batch, s_transformed_batch)


1317
if __name__ == "__main__":
1318
    pytest.main([__file__])