test_functional_tensor.py 24.3 KB
Newer Older
1
import unittest
2
import colorsys
3
import math
4

vfdev's avatar
vfdev committed
5
import numpy as np
6
from PIL.Image import NEAREST, BILINEAR, BICUBIC
vfdev's avatar
vfdev committed
7
8
9
10
11
12

import torch
import torchvision.transforms as transforms
import torchvision.transforms.functional_tensor as F_t
import torchvision.transforms.functional_pil as F_pil
import torchvision.transforms.functional as F
13

14
from common_utils import TransformsTester
15

16

17
class Tester(TransformsTester):
vfdev's avatar
vfdev committed
18

19
20
21
22
    def setUp(self):
        self.device = "cpu"

    def test_vflip(self):
23
        script_vflip = torch.jit.script(F_t.vflip)
24
        img_tensor = torch.randn(3, 16, 16, device=self.device)
25
        img_tensor_clone = img_tensor.clone()
26
27
28
29
        vflipped_img = F_t.vflip(img_tensor)
        vflipped_img_again = F_t.vflip(vflipped_img)
        self.assertEqual(vflipped_img.shape, img_tensor.shape)
        self.assertTrue(torch.equal(img_tensor, vflipped_img_again))
30
        self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
31
32
33
        # scriptable function test
        vflipped_img_script = script_vflip(img_tensor)
        self.assertTrue(torch.equal(vflipped_img, vflipped_img_script))
34

35
    def test_hflip(self):
36
        script_hflip = torch.jit.script(F_t.hflip)
37
        img_tensor = torch.randn(3, 16, 16, device=self.device)
38
        img_tensor_clone = img_tensor.clone()
39
40
41
42
        hflipped_img = F_t.hflip(img_tensor)
        hflipped_img_again = F_t.hflip(hflipped_img)
        self.assertEqual(hflipped_img.shape, img_tensor.shape)
        self.assertTrue(torch.equal(img_tensor, hflipped_img_again))
43
        self.assertTrue(torch.equal(img_tensor, img_tensor_clone))
44
45
46
        # scriptable function test
        hflipped_img_script = script_hflip(img_tensor)
        self.assertTrue(torch.equal(hflipped_img, hflipped_img_script))
47

48
    def test_crop(self):
49
        script_crop = torch.jit.script(F.crop)
50

51
        img_tensor, pil_img = self._create_data(16, 18, device=self.device)
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67

        test_configs = [
            (1, 2, 4, 5),   # crop inside top-left corner
            (2, 12, 3, 4),  # crop inside top-right corner
            (8, 3, 5, 6),   # crop inside bottom-left corner
            (8, 11, 4, 3),  # crop inside bottom-right corner
        ]

        for top, left, height, width in test_configs:
            pil_img_cropped = F.crop(pil_img, top, left, height, width)

            img_tensor_cropped = F.crop(img_tensor, top, left, height, width)
            self.compareTensorToPIL(img_tensor_cropped, pil_img_cropped)

            img_tensor_cropped = script_crop(img_tensor, top, left, height, width)
            self.compareTensorToPIL(img_tensor_cropped, pil_img_cropped)
ekka's avatar
ekka committed
68

69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    def test_hsv2rgb(self):
        shape = (3, 100, 150)
        for _ in range(20):
            img = torch.rand(*shape, dtype=torch.float)
            ft_img = F_t._hsv2rgb(img).permute(1, 2, 0).flatten(0, 1)

            h, s, v, = img.unbind(0)
            h = h.flatten().numpy()
            s = s.flatten().numpy()
            v = v.flatten().numpy()

            rgb = []
            for h1, s1, v1 in zip(h, s, v):
                rgb.append(colorsys.hsv_to_rgb(h1, s1, v1))

            colorsys_img = torch.tensor(rgb, dtype=torch.float32)
            max_diff = (ft_img - colorsys_img).abs().max()
            self.assertLess(max_diff, 1e-5)

    def test_rgb2hsv(self):
        shape = (3, 150, 100)
        for _ in range(20):
            img = torch.rand(*shape, dtype=torch.float)
            ft_hsv_img = F_t._rgb2hsv(img).permute(1, 2, 0).flatten(0, 1)

            r, g, b, = img.unbind(0)
            r = r.flatten().numpy()
            g = g.flatten().numpy()
            b = b.flatten().numpy()

            hsv = []
            for r1, g1, b1 in zip(r, g, b):
                hsv.append(colorsys.rgb_to_hsv(r1, g1, b1))

            colorsys_img = torch.tensor(hsv, dtype=torch.float32)

105
106
107
108
109
110
111
            ft_hsv_img_h, ft_hsv_img_sv = torch.split(ft_hsv_img, [1, 2], dim=1)
            colorsys_img_h, colorsys_img_sv = torch.split(colorsys_img, [1, 2], dim=1)

            max_diff_h = ((colorsys_img_h * 2 * math.pi).sin() - (ft_hsv_img_h * 2 * math.pi).sin()).abs().max()
            max_diff_sv = (colorsys_img_sv - ft_hsv_img_sv).abs().max()
            max_diff = max(max_diff_h, max_diff_sv)

112
113
            self.assertLess(max_diff, 1e-5)

114
    def test_rgb_to_grayscale(self):
115
116
        script_rgb_to_grayscale = torch.jit.script(F.rgb_to_grayscale)

117
        img_tensor, pil_img = self._create_data(32, 34, device=self.device)
118
119
120
121
122
123
124
125
126
127
128
129
130

        for num_output_channels in (3, 1):
            gray_pil_image = F.rgb_to_grayscale(pil_img, num_output_channels=num_output_channels)
            gray_tensor = F.rgb_to_grayscale(img_tensor, num_output_channels=num_output_channels)

            if num_output_channels == 1:
                print(gray_tensor.shape)

            self.approxEqualTensorToPIL(gray_tensor.float(), gray_pil_image, tol=1.0 + 1e-10, agg_method="max")

            s_gray_tensor = script_rgb_to_grayscale(img_tensor, num_output_channels=num_output_channels)
            self.assertTrue(s_gray_tensor.equal(gray_tensor))

131
    def test_center_crop(self):
132
133
        script_center_crop = torch.jit.script(F.center_crop)

134
        img_tensor, pil_img = self._create_data(32, 34, device=self.device)
135
136
137
138
139
140
141
142

        cropped_pil_image = F.center_crop(pil_img, [10, 11])

        cropped_tensor = F.center_crop(img_tensor, [10, 11])
        self.compareTensorToPIL(cropped_tensor, cropped_pil_image)

        cropped_tensor = script_center_crop(img_tensor, [10, 11])
        self.compareTensorToPIL(cropped_tensor, cropped_pil_image)
143

144
    def test_five_crop(self):
145
146
        script_five_crop = torch.jit.script(F.five_crop)

147
        img_tensor, pil_img = self._create_data(32, 34, device=self.device)
148
149
150
151
152
153
154
155
156
157

        cropped_pil_images = F.five_crop(pil_img, [10, 11])

        cropped_tensors = F.five_crop(img_tensor, [10, 11])
        for i in range(5):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])

        cropped_tensors = script_five_crop(img_tensor, [10, 11])
        for i in range(5):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])
158

159
    def test_ten_crop(self):
160
161
        script_ten_crop = torch.jit.script(F.ten_crop)

162
        img_tensor, pil_img = self._create_data(32, 34, device=self.device)
163
164
165
166
167
168
169
170
171
172

        cropped_pil_images = F.ten_crop(pil_img, [10, 11])

        cropped_tensors = F.ten_crop(img_tensor, [10, 11])
        for i in range(10):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])

        cropped_tensors = script_ten_crop(img_tensor, [10, 11])
        for i in range(10):
            self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i])
173

174
    def test_pad(self):
175
        script_fn = torch.jit.script(F_t.pad)
176
        tensor, pil_img = self._create_data(7, 8, device=self.device)
177
178
179
180
181
182
183
184
185
186
187
188

        for dt in [None, torch.float32, torch.float64]:
            if dt is not None:
                # This is a trivial cast to float of uint8 data to test all cases
                tensor = tensor.to(dt)
            for pad in [2, [3, ], [0, 3], (3, 3), [4, 2, 4, 3]]:
                configs = [
                    {"padding_mode": "constant", "fill": 0},
                    {"padding_mode": "constant", "fill": 10},
                    {"padding_mode": "constant", "fill": 20},
                    {"padding_mode": "edge"},
                    {"padding_mode": "reflect"},
189
                    {"padding_mode": "symmetric"},
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
                ]
                for kwargs in configs:
                    pad_tensor = F_t.pad(tensor, pad, **kwargs)
                    pad_pil_img = F_pil.pad(pil_img, pad, **kwargs)

                    pad_tensor_8b = pad_tensor
                    # we need to cast to uint8 to compare with PIL image
                    if pad_tensor_8b.dtype != torch.uint8:
                        pad_tensor_8b = pad_tensor_8b.to(torch.uint8)

                    self.compareTensorToPIL(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, kwargs))

                    if isinstance(pad, int):
                        script_pad = [pad, ]
                    else:
                        script_pad = pad
                    pad_tensor_script = script_fn(tensor, script_pad, **kwargs)
                    self.assertTrue(pad_tensor.equal(pad_tensor_script), msg="{}, {}".format(pad, kwargs))
208

209
210
211
        with self.assertRaises(ValueError, msg="Padding can not be negative for symmetric padding_mode"):
            F_t.pad(tensor, (-2, -3), padding_mode="symmetric")

vfdev's avatar
vfdev committed
212
213
    def _test_adjust_fn(self, fn, fn_pil, fn_t, configs):
        script_fn = torch.jit.script(fn)
214

vfdev's avatar
vfdev committed
215
216
217
218
219
        torch.manual_seed(15)

        tensor, pil_img = self._create_data(26, 34, device=self.device)

        for dt in [None, torch.float32, torch.float64]:
220
221
222
223

            if dt is not None:
                tensor = F.convert_image_dtype(tensor, dt)

vfdev's avatar
vfdev committed
224
            for config in configs:
225

vfdev's avatar
vfdev committed
226
227
228
229
230
231
                adjusted_tensor = fn_t(tensor, **config)
                adjusted_pil = fn_pil(pil_img, **config)
                scripted_result = script_fn(tensor, **config)
                msg = "{}, {}".format(dt, config)
                self.assertEqual(adjusted_tensor.dtype, scripted_result.dtype, msg=msg)
                self.assertEqual(adjusted_tensor.size()[1:], adjusted_pil.size[::-1], msg=msg)
232
233

                rbg_tensor = adjusted_tensor
vfdev's avatar
vfdev committed
234

235
236
237
                if adjusted_tensor.dtype != torch.uint8:
                    rbg_tensor = F.convert_image_dtype(adjusted_tensor, torch.uint8)

vfdev's avatar
vfdev committed
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
                # Check that max difference does not exceed 2 in [0, 255] range
                # Exact matching is not possible due to incompatibility convert_image_dtype and PIL results
                tol = 2.0 + 1e-10
                self.approxEqualTensorToPIL(rbg_tensor.float(), adjusted_pil, tol, msg=msg, agg_method="max")
                self.assertTrue(adjusted_tensor.allclose(scripted_result), msg=msg)

    def test_adjust_brightness(self):
        self._test_adjust_fn(
            F.adjust_brightness,
            F_pil.adjust_brightness,
            F_t.adjust_brightness,
            [{"brightness_factor": f} for f in [0.1, 0.5, 1.0, 1.34, 2.5]]
        )

    def test_adjust_contrast(self):
        self._test_adjust_fn(
            F.adjust_contrast,
            F_pil.adjust_contrast,
            F_t.adjust_contrast,
            [{"contrast_factor": f} for f in [0.2, 0.5, 1.0, 1.5, 2.0]]
        )

    def test_adjust_saturation(self):
        self._test_adjust_fn(
            F.adjust_saturation,
            F_pil.adjust_saturation,
            F_t.adjust_saturation,
            [{"saturation_factor": f} for f in [0.5, 0.75, 1.0, 1.5, 2.0]]
        )
267

vfdev's avatar
vfdev committed
268
269
270
271
272
273
274
    def test_adjust_gamma(self):
        self._test_adjust_fn(
            F.adjust_gamma,
            F_pil.adjust_gamma,
            F_t.adjust_gamma,
            [{"gamma": g1, "gain": g2} for g1, g2 in zip([0.8, 1.0, 1.2], [0.7, 1.0, 1.3])]
        )
275

276
    def test_resize(self):
vfdev's avatar
vfdev committed
277
        script_fn = torch.jit.script(F_t.resize)
278
        tensor, pil_img = self._create_data(26, 36, device=self.device)
vfdev's avatar
vfdev committed
279
280
281
282
283

        for dt in [None, torch.float32, torch.float64]:
            if dt is not None:
                # This is a trivial cast to float of uint8 data to test all cases
                tensor = tensor.to(dt)
284
            for size in [32, 26, [32, ], [32, 32], (32, 32), [26, 35]]:
vfdev's avatar
vfdev committed
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
                for interpolation in [BILINEAR, BICUBIC, NEAREST]:
                    resized_tensor = F_t.resize(tensor, size=size, interpolation=interpolation)
                    resized_pil_img = F_pil.resize(pil_img, size=size, interpolation=interpolation)

                    self.assertEqual(
                        resized_tensor.size()[1:], resized_pil_img.size[::-1], msg="{}, {}".format(size, interpolation)
                    )

                    if interpolation != NEAREST:
                        # We can not check values if mode = NEAREST, as results are different
                        # E.g. resized_tensor  = [[a, a, b, c, d, d, e, ...]]
                        # E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]]
                        resized_tensor_f = resized_tensor
                        # we need to cast to uint8 to compare with PIL image
                        if resized_tensor_f.dtype == torch.uint8:
                            resized_tensor_f = resized_tensor_f.to(torch.float)

                        # Pay attention to high tolerance for MAE
                        self.approxEqualTensorToPIL(
                            resized_tensor_f, resized_pil_img, tol=8.0, msg="{}, {}".format(size, interpolation)
                        )

                    if isinstance(size, int):
                        script_size = [size, ]
                    else:
                        script_size = size
311
312
                    resize_result = script_fn(tensor, size=script_size, interpolation=interpolation)
                    self.assertTrue(resized_tensor.equal(resize_result), msg="{}, {}".format(size, interpolation))
vfdev's avatar
vfdev committed
313

314
    def test_resized_crop(self):
315
316
        # test values of F.resized_crop in several cases:
        # 1) resize to the same size, crop to the same size => should be identity
317
        tensor, _ = self._create_data(26, 36, device=self.device)
318
319
320
321
322
        for i in [0, 2, 3]:
            out_tensor = F.resized_crop(tensor, top=0, left=0, height=26, width=36, size=[26, 36], interpolation=i)
            self.assertTrue(tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5]))

        # 2) resize by half and crop a TL corner
323
        tensor, _ = self._create_data(26, 36, device=self.device)
324
325
326
327
328
329
330
        out_tensor = F.resized_crop(tensor, top=0, left=0, height=20, width=30, size=[10, 15], interpolation=0)
        expected_out_tensor = tensor[:, :20:2, :30:2]
        self.assertTrue(
            expected_out_tensor.equal(out_tensor),
            msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10])
        )

331
    def test_affine(self):
332
        # Tests on square and rectangular images
vfdev's avatar
vfdev committed
333
334
        scripted_affine = torch.jit.script(F.affine)

335
336
        data = [self._create_data(26, 26, device=self.device), self._create_data(32, 26, device=self.device)]
        for tensor, pil_img in data:
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359

            # 1) identity map
            out_tensor = F.affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0)
            self.assertTrue(
                tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])
            )
            out_tensor = scripted_affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0)
            self.assertTrue(
                tensor.equal(out_tensor), msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])
            )

            if pil_img.size[0] == pil_img.size[1]:
                # 2) Test rotation
                test_configs = [
                    (90, torch.rot90(tensor, k=1, dims=(-1, -2))),
                    (45, None),
                    (30, None),
                    (-30, None),
                    (-45, None),
                    (-90, torch.rot90(tensor, k=-1, dims=(-1, -2))),
                    (180, torch.rot90(tensor, k=2, dims=(-1, -2))),
                ]
                for a, true_tensor in test_configs:
360
361
362
363

                    out_pil_img = F.affine(
                        pil_img, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0
                    )
364
                    out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))).to(self.device)
365

366
                    for fn in [F.affine, scripted_affine]:
367
368
369
                        out_tensor = fn(
                            tensor, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0
                        )
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
                        if true_tensor is not None:
                            self.assertTrue(
                                true_tensor.equal(out_tensor),
                                msg="{}\n{} vs \n{}".format(a, out_tensor[0, :5, :5], true_tensor[0, :5, :5])
                            )
                        else:
                            true_tensor = out_tensor

                        num_diff_pixels = (true_tensor != out_pil_tensor).sum().item() / 3.0
                        ratio_diff_pixels = num_diff_pixels / true_tensor.shape[-1] / true_tensor.shape[-2]
                        # Tolerance : less than 6% of different pixels
                        self.assertLess(
                            ratio_diff_pixels,
                            0.06,
                            msg="{}\n{} vs \n{}".format(
                                ratio_diff_pixels, true_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
                            )
                        )
            else:
                test_configs = [
                    90, 45, 15, -30, -60, -120
                ]
                for a in test_configs:
393
394
395
396
397
398

                    out_pil_img = F.affine(
                        pil_img, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0
                    )
                    out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))

399
                    for fn in [F.affine, scripted_affine]:
400
401
402
                        out_tensor = fn(
                            tensor, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=0
                        ).cpu()
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419

                        num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
                        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
                        # Tolerance : less than 3% of different pixels
                        self.assertLess(
                            ratio_diff_pixels,
                            0.03,
                            msg="{}: {}\n{} vs \n{}".format(
                                a, ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
                            )
                        )

            # 3) Test translation
            test_configs = [
                [10, 12], (-12, -13)
            ]
            for t in test_configs:
420
421
422

                out_pil_img = F.affine(pil_img, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], resample=0)

vfdev's avatar
vfdev committed
423
                for fn in [F.affine, scripted_affine]:
424
                    out_tensor = fn(tensor, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], resample=0)
425

426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
                    self.compareTensorToPIL(out_tensor, out_pil_img)

            # 3) Test rotation + translation + scale + share
            test_configs = [
                (45, [5, 6], 1.0, [0.0, 0.0]),
                (33, (5, -4), 1.0, [0.0, 0.0]),
                (45, [-5, 4], 1.2, [0.0, 0.0]),
                (33, (-4, -8), 2.0, [0.0, 0.0]),
                (85, (10, -10), 0.7, [0.0, 0.0]),
                (0, [0, 0], 1.0, [35.0, ]),
                (-25, [0, 0], 1.2, [0.0, 15.0]),
                (-45, [-10, 0], 0.7, [2.0, 5.0]),
                (-45, [-10, -10], 1.2, [4.0, 5.0]),
                (-90, [0, 0], 1.0, [0.0, 0.0]),
            ]
            for r in [0, ]:
                for a, t, s, sh in test_configs:
                    out_pil_img = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh, resample=r)
                    out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))

                    for fn in [F.affine, scripted_affine]:
447
                        out_tensor = fn(tensor, angle=a, translate=t, scale=s, shear=sh, resample=r).cpu()
448
449
                        num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
                        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
450
                        # Tolerance : less than 5% (cpu), 6% (cuda) of different pixels
451
                        tol = 0.06 if self.device == "cuda" else 0.05
452
453
                        self.assertLess(
                            ratio_diff_pixels,
454
                            tol,
455
456
457
                            msg="{}: {}\n{} vs \n{}".format(
                                (r, a, t, s, sh), ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7]
                            )
vfdev's avatar
vfdev committed
458
459
                        )

460
    def test_rotate(self):
vfdev's avatar
vfdev committed
461
462
463
        # Tests on square image
        scripted_rotate = torch.jit.script(F.rotate)

464
465
        data = [self._create_data(26, 26, device=self.device), self._create_data(32, 26, device=self.device)]
        for tensor, pil_img in data:
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481

            img_size = pil_img.size
            centers = [
                None,
                (int(img_size[0] * 0.3), int(img_size[0] * 0.4)),
                [int(img_size[0] * 0.5), int(img_size[0] * 0.6)]
            ]

            for r in [0, ]:
                for a in range(-180, 180, 17):
                    for e in [True, False]:
                        for c in centers:

                            out_pil_img = F.rotate(pil_img, angle=a, resample=r, expand=e, center=c)
                            out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))
                            for fn in [F.rotate, scripted_rotate]:
482
                                out_tensor = fn(tensor, angle=a, resample=r, expand=e, center=c).cpu()
483
484
485
486
487
488
489

                                self.assertEqual(
                                    out_tensor.shape,
                                    out_pil_tensor.shape,
                                    msg="{}: {} vs {}".format(
                                        (img_size, r, a, e, c), out_tensor.shape, out_pil_tensor.shape
                                    )
vfdev's avatar
vfdev committed
490
                                )
491
492
493
494
495
496
497
498
499
500
501
502
                                num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
                                ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
                                # Tolerance : less than 2% of different pixels
                                self.assertLess(
                                    ratio_diff_pixels,
                                    0.02,
                                    msg="{}: {}\n{} vs \n{}".format(
                                        (img_size, r, a, e, c),
                                        ratio_diff_pixels,
                                        out_tensor[0, :7, :7],
                                        out_pil_tensor[0, :7, :7]
                                    )
vfdev's avatar
vfdev committed
503
504
                                )

505
    def test_perspective(self):
506
507
508

        from torchvision.transforms import RandomPerspective

509
510
        data = [self._create_data(26, 34, device=self.device), self._create_data(26, 26, device=self.device)]
        for tensor, pil_img in data:
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529

            scripted_tranform = torch.jit.script(F.perspective)

            test_configs = [
                [[[0, 0], [33, 0], [33, 25], [0, 25]], [[3, 2], [32, 3], [30, 24], [2, 25]]],
                [[[3, 2], [32, 3], [30, 24], [2, 25]], [[0, 0], [33, 0], [33, 25], [0, 25]]],
                [[[3, 2], [32, 3], [30, 24], [2, 25]], [[5, 5], [30, 3], [33, 19], [4, 25]]],
            ]
            n = 10
            test_configs += [
                RandomPerspective.get_params(pil_img.size[0], pil_img.size[1], i / n) for i in range(n)
            ]

            for r in [0, ]:
                for spoints, epoints in test_configs:
                    out_pil_img = F.perspective(pil_img, startpoints=spoints, endpoints=epoints, interpolation=r)
                    out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1)))

                    for fn in [F.perspective, scripted_tranform]:
530
                        out_tensor = fn(tensor, startpoints=spoints, endpoints=epoints, interpolation=r).cpu()
531
532
533

                        num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0
                        ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2]
534
                        # Tolerance : less than 5% of different pixels
535
536
                        self.assertLess(
                            ratio_diff_pixels,
537
                            0.05,
538
539
540
541
542
543
544
545
                            msg="{}: {}\n{} vs \n{}".format(
                                (r, spoints, epoints),
                                ratio_diff_pixels,
                                out_tensor[0, :7, :7],
                                out_pil_tensor[0, :7, :7]
                            )
                        )

546

547
548
549
550
551
@unittest.skipIf(not torch.cuda.is_available(), reason="Skip if no CUDA device")
class CUDATester(Tester):

    def setUp(self):
        self.device = "cuda"
552

553
554
555

if __name__ == '__main__':
    unittest.main()