test_transforms.py 85.7 KB
Newer Older
1
import itertools
2
import os
3
4
import torch
import torchvision.transforms as transforms
5
import torchvision.transforms.functional as F
6
import torchvision.transforms.functional_tensor as F_t
7
from torch._utils_internal import get_file_path_2
8
from numpy.testing import assert_array_almost_equal
9
import unittest
10
import math
11
import random
12
import numpy as np
13
import pytest
14
15
16
17
18
19
from PIL import Image
try:
    import accimage
except ImportError:
    accimage = None

20
21
22
23
24
try:
    from scipy import stats
except ImportError:
    stats = None

25
from common_utils import cycle_over, int_dtypes, float_dtypes
26
from _assert_utils import assert_equal
27
28


29
GRACE_HOPPER = get_file_path_2(
30
    os.path.dirname(os.path.abspath(__file__)), 'assets', 'encode_jpeg', 'grace_hopper_517x606.jpg')
31
32


33
class Tester(unittest.TestCase):
34

35
    def test_center_crop(self):
36
37
38
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        oheight = random.randint(5, (height - 2) / 2) * 2
39
40
        owidth = random.randint(5, (width - 2) / 2) * 2

41
        img = torch.ones(3, height, width)
42
43
44
        oh1 = (height - oheight) // 2
        ow1 = (width - owidth) // 2
        imgnarrow = img[:, oh1:oh1 + oheight, ow1:ow1 + owidth]
45
46
47
48
49
50
        imgnarrow.fill_(0)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
51
52
        self.assertEqual(result.sum(), 0,
                         "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
53
54
55
56
57
58
59
60
        oheight += 1
        owidth += 1
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
        sum1 = result.sum()
61
62
        self.assertGreater(sum1, 1,
                           "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
63
        oheight += 1
64
        owidth += 1
65
66
67
68
69
70
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
        sum2 = result.sum()
71
72
73
74
        self.assertGreater(sum2, 0,
                           "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
        self.assertGreater(sum2, sum1,
                           "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
75

76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
    def test_center_crop_2(self):
        """ Tests when center crop size is larger than image size, along any dimension"""
        even_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2)
        odd_image_size = (even_image_size[0] + 1, even_image_size[1] + 1)

        # Since height is independent of width, we can ignore images with odd height and even width and vice-versa.
        input_image_sizes = [even_image_size, odd_image_size]

        # Get different crop sizes
        delta = random.choice((1, 3, 5))
        crop_size_delta = [-2 * delta, -delta, 0, delta, 2 * delta]
        crop_size_params = itertools.product(input_image_sizes, crop_size_delta, crop_size_delta)

        for (input_image_size, delta_height, delta_width) in crop_size_params:
            img = torch.ones(3, *input_image_size)
            crop_size = (input_image_size[0] + delta_height, input_image_size[1] + delta_width)

            # Test both transforms, one with PIL input and one with tensor
            output_pil = transforms.Compose([
                transforms.ToPILImage(),
                transforms.CenterCrop(crop_size),
                transforms.ToTensor()],
            )(img)
            self.assertEqual(output_pil.size()[1:3], crop_size,
                             "image_size: {} crop_size: {}".format(input_image_size, crop_size))

            output_tensor = transforms.CenterCrop(crop_size)(img)
            self.assertEqual(output_tensor.size()[1:3], crop_size,
                             "image_size: {} crop_size: {}".format(input_image_size, crop_size))

            # Ensure output for PIL and Tensor are equal
107
108
109
110
            assert_equal(
                output_tensor, output_pil, check_stride=False,
                msg="image_size: {} crop_size: {}".format(input_image_size, crop_size)
            )
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132

            # Check if content in center of both image and cropped output is same.
            center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1]))
            crop_center_tl, input_center_tl = [0, 0], [0, 0]
            for index in range(2):
                if crop_size[index] > input_image_size[index]:
                    crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2
                else:
                    input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2

            output_center = output_pil[
                :,
                crop_center_tl[0]:crop_center_tl[0] + center_size[0],
                crop_center_tl[1]:crop_center_tl[1] + center_size[1]
            ]

            img_center = img[
                :,
                input_center_tl[0]:input_center_tl[0] + center_size[0],
                input_center_tl[1]:input_center_tl[1] + center_size[1]
            ]

133
134
135
136
            assert_equal(
                output_center, img_center, check_stride=False,
                msg="image_size: {} crop_size: {}".format(input_image_size, crop_size)
            )
137

138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
    def test_five_crop(self):
        to_pil_image = transforms.ToPILImage()
        h = random.randint(5, 25)
        w = random.randint(5, 25)
        for single_dim in [True, False]:
            crop_h = random.randint(1, h)
            crop_w = random.randint(1, w)
            if single_dim:
                crop_h = min(crop_h, crop_w)
                crop_w = crop_h
                transform = transforms.FiveCrop(crop_h)
            else:
                transform = transforms.FiveCrop((crop_h, crop_w))

            img = torch.FloatTensor(3, h, w).uniform_()
            results = transform(to_pil_image(img))

155
            self.assertEqual(len(results), 5)
156
            for crop in results:
157
                self.assertEqual(crop.size, (crop_w, crop_h))
158
159
160
161
162
163
164
165

            to_pil_image = transforms.ToPILImage()
            tl = to_pil_image(img[:, 0:crop_h, 0:crop_w])
            tr = to_pil_image(img[:, 0:crop_h, w - crop_w:])
            bl = to_pil_image(img[:, h - crop_h:, 0:crop_w])
            br = to_pil_image(img[:, h - crop_h:, w - crop_w:])
            center = transforms.CenterCrop((crop_h, crop_w))(to_pil_image(img))
            expected_output = (tl, tr, bl, br, center)
166
            self.assertEqual(results, expected_output)
167
168
169
170
171
172
173
174
175
176
177
178

    def test_ten_crop(self):
        to_pil_image = transforms.ToPILImage()
        h = random.randint(5, 25)
        w = random.randint(5, 25)
        for should_vflip in [True, False]:
            for single_dim in [True, False]:
                crop_h = random.randint(1, h)
                crop_w = random.randint(1, w)
                if single_dim:
                    crop_h = min(crop_h, crop_w)
                    crop_w = crop_h
179
180
                    transform = transforms.TenCrop(crop_h,
                                                   vertical_flip=should_vflip)
181
182
                    five_crop = transforms.FiveCrop(crop_h)
                else:
183
184
                    transform = transforms.TenCrop((crop_h, crop_w),
                                                   vertical_flip=should_vflip)
185
186
187
188
189
                    five_crop = transforms.FiveCrop((crop_h, crop_w))

                img = to_pil_image(torch.FloatTensor(3, h, w).uniform_())
                results = transform(img)
                expected_output = five_crop(img)
190
191
192
193
194

                # Checking if FiveCrop and TenCrop can be printed as string
                transform.__repr__()
                five_crop.__repr__()

195
196
197
198
199
200
201
                if should_vflip:
                    vflipped_img = img.transpose(Image.FLIP_TOP_BOTTOM)
                    expected_output += five_crop(vflipped_img)
                else:
                    hflipped_img = img.transpose(Image.FLIP_LEFT_RIGHT)
                    expected_output += five_crop(hflipped_img)

202
203
                self.assertEqual(len(results), 10)
                self.assertEqual(results, expected_output)
204

205
206
207
208
209
210
211
212
    def test_randomresized_params(self):
        height = random.randint(24, 32) * 2
        width = random.randint(24, 32) * 2
        img = torch.ones(3, height, width)
        to_pil_image = transforms.ToPILImage()
        img = to_pil_image(img)
        size = 100
        epsilon = 0.05
213
        min_scale = 0.25
Francisco Massa's avatar
Francisco Massa committed
214
        for _ in range(10):
215
            scale_min = max(round(random.random(), 2), min_scale)
216
            scale_range = (scale_min, scale_min + round(random.random(), 2))
217
            aspect_min = max(round(random.random(), 2), epsilon)
218
219
            aspect_ratio_range = (aspect_min, aspect_min + round(random.random(), 2))
            randresizecrop = transforms.RandomResizedCrop(size, scale_range, aspect_ratio_range)
220
            i, j, h, w = randresizecrop.get_params(img, scale_range, aspect_ratio_range)
221
            aspect_ratio_obtained = w / h
222
223
224
225
226
227
228
            self.assertTrue((min(aspect_ratio_range) - epsilon <= aspect_ratio_obtained and
                             aspect_ratio_obtained <= max(aspect_ratio_range) + epsilon) or
                            aspect_ratio_obtained == 1.0)
            self.assertIsInstance(i, int)
            self.assertIsInstance(j, int)
            self.assertIsInstance(h, int)
            self.assertIsInstance(w, int)
229

230
    def test_randomperspective(self):
Francisco Massa's avatar
Francisco Massa committed
231
        for _ in range(10):
232
233
234
235
236
237
238
239
240
241
            height = random.randint(24, 32) * 2
            width = random.randint(24, 32) * 2
            img = torch.ones(3, height, width)
            to_pil_image = transforms.ToPILImage()
            img = to_pil_image(img)
            perp = transforms.RandomPerspective()
            startpoints, endpoints = perp.get_params(width, height, 0.5)
            tr_img = F.perspective(img, startpoints, endpoints)
            tr_img2 = F.to_tensor(F.perspective(tr_img, endpoints, startpoints))
            tr_img = F.to_tensor(tr_img)
242
243
244
245
            self.assertEqual(img.size[0], width)
            self.assertEqual(img.size[1], height)
            self.assertGreater(torch.nn.functional.mse_loss(tr_img, F.to_tensor(img)) + 0.3,
                               torch.nn.functional.mse_loss(tr_img2, F.to_tensor(img)))
246

247
    def test_randomperspective_fill(self):
248
249
250
251
252
253
254
255

        # assert fill being either a Sequence or a Number
        with self.assertRaises(TypeError):
            transforms.RandomPerspective(fill={})

        t = transforms.RandomPerspective(fill=None)
        self.assertTrue(t.fill == 0)

256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
        height = 100
        width = 100
        img = torch.ones(3, height, width)
        to_pil_image = transforms.ToPILImage()
        img = to_pil_image(img)

        modes = ("L", "RGB", "F")
        nums_bands = [len(mode) for mode in modes]
        fill = 127

        for mode, num_bands in zip(modes, nums_bands):
            img_conv = img.convert(mode)
            perspective = transforms.RandomPerspective(p=1, fill=fill)
            tr_img = perspective(img_conv)
            pixel = tr_img.getpixel((0, 0))

            if not isinstance(pixel, tuple):
                pixel = (pixel,)
            self.assertTupleEqual(pixel, tuple([fill] * num_bands))

        for mode, num_bands in zip(modes, nums_bands):
            img_conv = img.convert(mode)
            startpoints, endpoints = transforms.RandomPerspective.get_params(width, height, 0.5)
            tr_img = F.perspective(img_conv, startpoints, endpoints, fill=fill)
            pixel = tr_img.getpixel((0, 0))
281

282
283
284
285
286
287
288
289
            if not isinstance(pixel, tuple):
                pixel = (pixel,)
            self.assertTupleEqual(pixel, tuple([fill] * num_bands))

            for wrong_num_bands in set(nums_bands) - {num_bands}:
                with self.assertRaises(ValueError):
                    F.perspective(img_conv, startpoints, endpoints, fill=tuple([fill] * wrong_num_bands))

290
    def test_resize(self):
291

292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
        input_sizes = [
            # height, width
            # square image
            (28, 28),
            (27, 27),
            # rectangular image: h < w
            (28, 34),
            (29, 35),
            # rectangular image: h > w
            (34, 28),
            (35, 29),
        ]
        test_output_sizes_1 = [
            # single integer
            22, 27, 28, 36,
            # single integer in tuple/list
            [22, ], (27, ),
        ]
        test_output_sizes_2 = [
            # two integers
            [22, 22], [22, 28], [22, 36],
            [27, 22], [36, 22], [28, 28],
            [28, 37], [37, 27], [37, 37]
        ]

        for height, width in input_sizes:
            img = Image.new("RGB", size=(width, height), color=127)

            for osize in test_output_sizes_1:
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
                for max_size in (None, 37, 1000):

                    t = transforms.Resize(osize, max_size=max_size)
                    result = t(img)

                    msg = "{}, {} - {} - {}".format(height, width, osize, max_size)
                    osize = osize[0] if isinstance(osize, (list, tuple)) else osize
                    # If size is an int, smaller edge of the image will be matched to this number.
                    # i.e, if height > width, then image will be rescaled to (size * height / width, size).
                    if height < width:
                        exp_w, exp_h = (int(osize * width / height), osize)  # (w, h)
                        if max_size is not None and max_size < exp_w:
                            exp_w, exp_h = max_size, int(max_size * exp_h / exp_w)
                        self.assertEqual(result.size, (exp_w, exp_h), msg=msg)
                    elif width < height:
                        exp_w, exp_h = (osize, int(osize * height / width))  # (w, h)
                        if max_size is not None and max_size < exp_h:
                            exp_w, exp_h = int(max_size * exp_w / exp_h), max_size
                        self.assertEqual(result.size, (exp_w, exp_h), msg=msg)
                    else:
                        exp_w, exp_h = (osize, osize)  # (w, h)
                        if max_size is not None and max_size < osize:
                            exp_w, exp_h = max_size, max_size
                        self.assertEqual(result.size, (exp_w, exp_h), msg=msg)
345

346
347
        for height, width in input_sizes:
            img = Image.new("RGB", size=(width, height), color=127)
348

349
350
            for osize in test_output_sizes_2:
                oheight, owidth = osize
351

352
353
                t = transforms.Resize(osize)
                result = t(img)
354

355
                self.assertEqual((owidth, oheight), result.size)
356

357
358
359
360
        with self.assertWarnsRegex(UserWarning, r"Anti-alias option is always applied for PIL Image input"):
            t = transforms.Resize(osize, antialias=False)
            t(img)

361
362
363
364
    def test_random_crop(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        oheight = random.randint(5, (height - 2) / 2) * 2
365
        owidth = random.randint(5, (width - 2) / 2) * 2
366
367
368
369
370
371
        img = torch.ones(3, height, width)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
372
373
        self.assertEqual(result.size(1), oheight)
        self.assertEqual(result.size(2), owidth)
374

375
376
377
378
379
380
        padding = random.randint(1, 20)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((oheight, owidth), padding=padding),
            transforms.ToTensor(),
        ])(img)
381
382
        self.assertEqual(result.size(1), oheight)
        self.assertEqual(result.size(2), owidth)
383

384
385
386
387
388
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((height, width)),
            transforms.ToTensor()
        ])(img)
389
390
        self.assertEqual(result.size(1), height)
        self.assertEqual(result.size(2), width)
391
        torch.testing.assert_close(result, img)
392

393
394
395
396
397
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((height + 1, width + 1), pad_if_needed=True),
            transforms.ToTensor(),
        ])(img)
398
399
        self.assertEqual(result.size(1), height + 1)
        self.assertEqual(result.size(2), width + 1)
400

vfdev's avatar
vfdev committed
401
402
403
404
405
        t = transforms.RandomCrop(48)
        img = torch.ones(3, 32, 32)
        with self.assertRaisesRegex(ValueError, r"Required crop size .+ is larger then input image size .+"):
            t(img)

Soumith Chintala's avatar
Soumith Chintala committed
406
407
408
409
    def test_lambda(self):
        trans = transforms.Lambda(lambda x: x.add(10))
        x = torch.randn(10)
        y = trans(x)
410
        assert_equal(y, torch.add(x, 10))
Soumith Chintala's avatar
Soumith Chintala committed
411
412
413
414

        trans = transforms.Lambda(lambda x: x.add_(10))
        x = torch.randn(10)
        y = trans(x)
415
        assert_equal(y, x)
416

417
418
419
        # Checking if Lambda can be printed as string
        trans.__repr__()

420
    @unittest.skipIf(stats is None, 'scipy.stats not available')
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
    def test_random_apply(self):
        random_state = random.getstate()
        random.seed(42)
        random_apply_transform = transforms.RandomApply(
            [
                transforms.RandomRotation((-45, 45)),
                transforms.RandomHorizontalFlip(),
                transforms.RandomVerticalFlip(),
            ], p=0.75
        )
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        num_samples = 250
        num_applies = 0
        for _ in range(num_samples):
            out = random_apply_transform(img)
            if out != img:
                num_applies += 1

        p_value = stats.binom_test(num_applies, num_samples, p=0.75)
        random.setstate(random_state)
441
        self.assertGreater(p_value, 0.0001)
442
443
444
445

        # Checking if RandomApply can be printed as string
        random_apply_transform.__repr__()

446
    @unittest.skipIf(stats is None, 'scipy.stats not available')
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
    def test_random_choice(self):
        random_state = random.getstate()
        random.seed(42)
        random_choice_transform = transforms.RandomChoice(
            [
                transforms.Resize(15),
                transforms.Resize(20),
                transforms.CenterCrop(10)
            ]
        )
        img = transforms.ToPILImage()(torch.rand(3, 25, 25))
        num_samples = 250
        num_resize_15 = 0
        num_resize_20 = 0
        num_crop_10 = 0
        for _ in range(num_samples):
            out = random_choice_transform(img)
            if out.size == (15, 15):
                num_resize_15 += 1
            elif out.size == (20, 20):
                num_resize_20 += 1
            elif out.size == (10, 10):
                num_crop_10 += 1

        p_value = stats.binom_test(num_resize_15, num_samples, p=0.33333)
472
        self.assertGreater(p_value, 0.0001)
473
        p_value = stats.binom_test(num_resize_20, num_samples, p=0.33333)
474
        self.assertGreater(p_value, 0.0001)
475
        p_value = stats.binom_test(num_crop_10, num_samples, p=0.33333)
476
        self.assertGreater(p_value, 0.0001)
477
478
479
480
481

        random.setstate(random_state)
        # Checking if RandomChoice can be printed as string
        random_choice_transform.__repr__()

482
    @unittest.skipIf(stats is None, 'scipy.stats not available')
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
    def test_random_order(self):
        random_state = random.getstate()
        random.seed(42)
        random_order_transform = transforms.RandomOrder(
            [
                transforms.Resize(20),
                transforms.CenterCrop(10)
            ]
        )
        img = transforms.ToPILImage()(torch.rand(3, 25, 25))
        num_samples = 250
        num_normal_order = 0
        resize_crop_out = transforms.CenterCrop(10)(transforms.Resize(20)(img))
        for _ in range(num_samples):
            out = random_order_transform(img)
            if out == resize_crop_out:
                num_normal_order += 1

        p_value = stats.binom_test(num_normal_order, num_samples, p=0.5)
        random.setstate(random_state)
503
        self.assertGreater(p_value, 0.0001)
504
505
506
507

        # Checking if RandomOrder can be printed as string
        random_order_transform.__repr__()

508
    def test_to_tensor(self):
509
        test_channels = [1, 3, 4]
510
511
        height, width = 4, 4
        trans = transforms.ToTensor()
512

513
514
515
516
517
518
519
        with self.assertRaises(TypeError):
            trans(np.random.rand(1, height, width).tolist())

        with self.assertRaises(ValueError):
            trans(np.random.rand(height))
            trans(np.random.rand(1, 1, height, width))

520
521
522
523
        for channels in test_channels:
            input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
            img = transforms.ToPILImage()(input_data)
            output = trans(img)
524
            torch.testing.assert_close(output, input_data, check_stride=False)
525

526
            ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
527
528
            output = trans(ndarray)
            expected_output = ndarray.transpose((2, 0, 1)) / 255.0
529
            torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)
530

531
532
533
            ndarray = np.random.rand(height, width, channels).astype(np.float32)
            output = trans(ndarray)
            expected_output = ndarray.transpose((2, 0, 1))
534
            torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False)
535

536
537
538
539
        # separate test for mode '1' PIL images
        input_data = torch.ByteTensor(1, height, width).bernoulli_()
        img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
        output = trans(img)
540
        torch.testing.assert_close(input_data, output, check_dtype=False, check_stride=False)
541

542
543
544
545
546
547
548
549
550
551
552
553
554
555
    def test_to_tensor_with_other_default_dtypes(self):
        current_def_dtype = torch.get_default_dtype()

        t = transforms.ToTensor()
        np_arr = np.random.randint(0, 255, (32, 32, 3), dtype=np.uint8)
        img = Image.fromarray(np_arr)

        for dtype in [torch.float16, torch.float, torch.double]:
            torch.set_default_dtype(dtype)
            res = t(img)
            self.assertTrue(res.dtype == dtype, msg=f"{res.dtype} vs {dtype}")

        torch.set_default_dtype(current_def_dtype)

556
557
558
559
    def test_max_value(self):
        for dtype in int_dtypes():
            self.assertEqual(F_t._max_value(dtype), torch.iinfo(dtype).max)

560
561
562
563
        # remove float testing as it can lead to errors such as
        # runtime error: 5.7896e+76 is outside the range of representable values of type 'float'
        # for dtype in float_dtypes():
        #     self.assertGreater(F_t._max_value(dtype), torch.finfo(dtype).max)
564

565
566
567
568
569
570
    def test_convert_image_dtype_float_to_float(self):
        for input_dtype, output_dtypes in cycle_over(float_dtypes()):
            input_image = torch.tensor((0.0, 1.0), dtype=input_dtype)
            for output_dtype in output_dtypes:
                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
571
572
                    transform_script = torch.jit.script(F.convert_image_dtype)

573
                    output_image = transform(input_image)
574
575
                    output_image_script = transform_script(input_image, output_dtype)

576
                    torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6)
577
578
579
580
581
582
583
584
585
586
587
588
589

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0.0, 1.0

                    self.assertAlmostEqual(actual_min, desired_min)
                    self.assertAlmostEqual(actual_max, desired_max)

    def test_convert_image_dtype_float_to_int(self):
        for input_dtype in float_dtypes():
            input_image = torch.tensor((0.0, 1.0), dtype=input_dtype)
            for output_dtype in int_dtypes():
                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
590
                    transform_script = torch.jit.script(F.convert_image_dtype)
591
592
593
594
595
596
597
598

                    if (input_dtype == torch.float32 and output_dtype in (torch.int32, torch.int64)) or (
                            input_dtype == torch.float64 and output_dtype == torch.int64
                    ):
                        with self.assertRaises(RuntimeError):
                            transform(input_image)
                    else:
                        output_image = transform(input_image)
599
600
                        output_image_script = transform_script(input_image, output_dtype)

601
                        torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6)
602
603
604
605
606
607
608
609
610
611
612
613
614

                        actual_min, actual_max = output_image.tolist()
                        desired_min, desired_max = 0, torch.iinfo(output_dtype).max

                        self.assertEqual(actual_min, desired_min)
                        self.assertEqual(actual_max, desired_max)

    def test_convert_image_dtype_int_to_float(self):
        for input_dtype in int_dtypes():
            input_image = torch.tensor((0, torch.iinfo(input_dtype).max), dtype=input_dtype)
            for output_dtype in float_dtypes():
                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
615
616
                    transform_script = torch.jit.script(F.convert_image_dtype)

617
                    output_image = transform(input_image)
618
619
                    output_image_script = transform_script(input_image, output_dtype)

620
                    torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6)
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0.0, 1.0

                    self.assertAlmostEqual(actual_min, desired_min)
                    self.assertGreaterEqual(actual_min, desired_min)
                    self.assertAlmostEqual(actual_max, desired_max)
                    self.assertLessEqual(actual_max, desired_max)

    def test_convert_image_dtype_int_to_int(self):
        for input_dtype, output_dtypes in cycle_over(int_dtypes()):
            input_max = torch.iinfo(input_dtype).max
            input_image = torch.tensor((0, input_max), dtype=input_dtype)
            for output_dtype in output_dtypes:
                output_max = torch.iinfo(output_dtype).max

                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
639
640
                    transform_script = torch.jit.script(F.convert_image_dtype)

641
                    output_image = transform(input_image)
642
643
                    output_image_script = transform_script(input_image, output_dtype)

644
645
646
647
648
649
                    torch.testing.assert_close(
                        output_image_script,
                        output_image,
                        rtol=0.0,
                        atol=1e-6,
                        msg="{} vs {}".format(output_image_script, output_image),
650
                    )
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0, output_max

                    # see https://github.com/pytorch/vision/pull/2078#issuecomment-641036236 for details
                    if input_max >= output_max:
                        error_term = 0
                    else:
                        error_term = 1 - (torch.iinfo(output_dtype).max + 1) // (torch.iinfo(input_dtype).max + 1)

                    self.assertEqual(actual_min, desired_min)
                    self.assertEqual(actual_max, desired_max + error_term)

    def test_convert_image_dtype_int_to_int_consistency(self):
        for input_dtype, output_dtypes in cycle_over(int_dtypes()):
            input_max = torch.iinfo(input_dtype).max
            input_image = torch.tensor((0, input_max), dtype=input_dtype)
            for output_dtype in output_dtypes:
                output_max = torch.iinfo(output_dtype).max
                if output_max <= input_max:
                    continue

                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
                    inverse_transfrom = transforms.ConvertImageDtype(input_dtype)
                    output_image = inverse_transfrom(transform(input_image))

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0, input_max

                    self.assertEqual(actual_min, desired_min)
                    self.assertEqual(actual_max, desired_max)

684
685
686
687
688
689
690
    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_to_tensor(self):
        trans = transforms.ToTensor()

        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

691
        torch.testing.assert_close(output, expected_output)
692
693
694
695
696
697
698
699
700
701
702
703
704
705

    def test_pil_to_tensor(self):
        test_channels = [1, 3, 4]
        height, width = 4, 4
        trans = transforms.PILToTensor()

        with self.assertRaises(TypeError):
            trans(np.random.rand(1, height, width).tolist())
            trans(np.random.rand(1, height, width))

        for channels in test_channels:
            input_data = torch.ByteTensor(channels, height, width).random_(0, 255)
            img = transforms.ToPILImage()(input_data)
            output = trans(img)
706
            torch.testing.assert_close(input_data, output, check_stride=False)
707
708
709
710
711

            input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
            img = transforms.ToPILImage()(input_data)
            output = trans(img)
            expected_output = input_data.transpose((2, 0, 1))
712
            torch.testing.assert_close(output.numpy(), expected_output)
713
714
715
716
717

            input_data = torch.as_tensor(np.random.rand(channels, height, width).astype(np.float32))
            img = transforms.ToPILImage()(input_data)  # CHW -> HWC and (* 255).byte()
            output = trans(img)  # HWC -> CHW
            expected_output = (input_data * 255).byte()
718
            torch.testing.assert_close(output, expected_output, check_stride=False)
719
720
721
722

        # separate test for mode '1' PIL images
        input_data = torch.ByteTensor(1, height, width).bernoulli_()
        img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
723
724
        output = trans(img).view(torch.uint8).bool().to(torch.uint8)
        torch.testing.assert_close(input_data, output, check_stride=False)
725
726
727
728
729
730
731
732
733

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_pil_to_tensor(self):
        trans = transforms.PILToTensor()

        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
734
        torch.testing.assert_close(output, expected_output, check_stride=False)
735
736
737
738

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_resize(self):
        trans = transforms.Compose([
739
            transforms.Resize(256, interpolation=Image.LINEAR),
740
741
742
            transforms.ToTensor(),
        ])

743
744
745
        # Checking if Compose, Resize and ToTensor can be printed as string
        trans.__repr__()

746
747
748
749
750
751
752
        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
        self.assertLess(np.abs((expected_output - output).mean()), 1e-3)
        self.assertLess((expected_output - output).var(), 1e-5)
        # note the high absolute tolerance
753
        self.assertTrue(np.allclose(output.numpy(), expected_output.numpy(), atol=5e-2))
754
755
756
757
758
759
760
761

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_crop(self):
        trans = transforms.Compose([
            transforms.CenterCrop(256),
            transforms.ToTensor(),
        ])

762
763
764
        # Checking if Compose, CenterCrop and ToTensor can be printed as string
        trans.__repr__()

765
766
767
768
        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
769
        torch.testing.assert_close(output, expected_output)
770

771
    def test_1_channel_tensor_to_pil_image(self):
772
773
        to_tensor = transforms.ToTensor()

774
        img_data_float = torch.Tensor(1, 4, 4).uniform_()
775
776
777
778
        img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255)
        img_data_short = torch.ShortTensor(1, 4, 4).random_()
        img_data_int = torch.IntTensor(1, 4, 4).random_()

779
780
781
782
783
784
785
786
787
788
        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(),
                            img_data_byte.float().div(255.0).numpy(),
                            img_data_short.numpy(),
                            img_data_int.numpy()]
        expected_modes = ['L', 'L', 'I;16', 'I']

        for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
789
                self.assertEqual(img.mode, mode)
790
                torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False)
791
792
        # 'F' mode for torch.FloatTensor
        img_F_mode = transforms.ToPILImage(mode='F')(img_data_float)
793
        self.assertEqual(img_F_mode.mode, 'F')
794
795
796
        torch.testing.assert_close(
            np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')), np.array(img_F_mode)
        )
797
798
799
800
801
802
803
804
805
806
807
808

    def test_1_channel_ndarray_to_pil_image(self):
        img_data_float = torch.Tensor(4, 4, 1).uniform_().numpy()
        img_data_byte = torch.ByteTensor(4, 4, 1).random_(0, 255).numpy()
        img_data_short = torch.ShortTensor(4, 4, 1).random_().numpy()
        img_data_int = torch.IntTensor(4, 4, 1).random_().numpy()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_modes = ['F', 'L', 'I;16', 'I']
        for img_data, mode in zip(inputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
809
                self.assertEqual(img.mode, mode)
810
811
812
                # note: we explicitly convert img's dtype because pytorch doesn't support uint16
                # and otherwise assert_close wouldn't be able to construct a tensor from the uint16 array
                torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype))
813

surgan12's avatar
surgan12 committed
814
815
816
817
    def test_2_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
818
                self.assertEqual(img.mode, 'LA')  # default should assume LA
surgan12's avatar
surgan12 committed
819
820
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
821
                self.assertEqual(img.mode, mode)
surgan12's avatar
surgan12 committed
822
823
            split = img.split()
            for i in range(2):
824
                torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False)
surgan12's avatar
surgan12 committed
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841

        img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy()
        for mode in [None, 'LA']:
            verify_img_data(img_data, mode)

        transforms.ToPILImage().__repr__()

        with self.assertRaises(ValueError):
            # should raise if we try a mode for 4 or 1 or 3 channel images
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
            transforms.ToPILImage(mode='RGB')(img_data)

    def test_2_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
842
                self.assertEqual(img.mode, 'LA')  # default should assume LA
surgan12's avatar
surgan12 committed
843
844
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
845
                self.assertEqual(img.mode, mode)
surgan12's avatar
surgan12 committed
846
847
            split = img.split()
            for i in range(2):
848
                self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()))
surgan12's avatar
surgan12 committed
849
850
851
852
853
854
855
856
857
858
859
860

        img_data = torch.Tensor(2, 4, 4).uniform_()
        expected_output = img_data.mul(255).int().float().div(255)
        for mode in [None, 'LA']:
            verify_img_data(img_data, expected_output, mode=mode)

        with self.assertRaises(ValueError):
            # should raise if we try a mode for 4 or 1 or 3 channel images
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
            transforms.ToPILImage(mode='RGB')(img_data)

861
862
863
864
    def test_3_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
865
                self.assertEqual(img.mode, 'RGB')  # default should assume RGB
866
867
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
868
                self.assertEqual(img.mode, mode)
869
870
            split = img.split()
            for i in range(3):
871
                self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()))
872

873
874
875
876
        img_data = torch.Tensor(3, 4, 4).uniform_()
        expected_output = img_data.mul(255).int().float().div(255)
        for mode in [None, 'RGB', 'HSV', 'YCbCr']:
            verify_img_data(img_data, expected_output, mode=mode)
877

878
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
879
            # should raise if we try a mode for 4 or 1 or 2 channel images
880
881
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
882
            transforms.ToPILImage(mode='LA')(img_data)
883

Varun Agrawal's avatar
Varun Agrawal committed
884
885
886
        with self.assertRaises(ValueError):
            transforms.ToPILImage()(torch.Tensor(1, 3, 4, 4).uniform_())

887
888
889
890
    def test_3_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
891
                self.assertEqual(img.mode, 'RGB')  # default should assume RGB
892
893
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
894
                self.assertEqual(img.mode, mode)
895
896
            split = img.split()
            for i in range(3):
897
                torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False)
898

899
900
901
902
        img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy()
        for mode in [None, 'RGB', 'HSV', 'YCbCr']:
            verify_img_data(img_data, mode)

903
904
905
        # Checking if ToPILImage can be printed as string
        transforms.ToPILImage().__repr__()

906
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
907
            # should raise if we try a mode for 4 or 1 or 2 channel images
908
909
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
910
            transforms.ToPILImage(mode='LA')(img_data)
911
912
913
914
915

    def test_4_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
916
                self.assertEqual(img.mode, 'RGBA')  # default should assume RGBA
917
918
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
919
                self.assertEqual(img.mode, mode)
920
921
922

            split = img.split()
            for i in range(4):
923
                self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()))
924

925
        img_data = torch.Tensor(4, 4, 4).uniform_()
926
        expected_output = img_data.mul(255).int().float().div(255)
surgan12's avatar
surgan12 committed
927
        for mode in [None, 'RGBA', 'CMYK', 'RGBX']:
928
            verify_img_data(img_data, expected_output, mode)
929

930
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
931
            # should raise if we try a mode for 3 or 1 or 2 channel images
932
933
            transforms.ToPILImage(mode='RGB')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
934
            transforms.ToPILImage(mode='LA')(img_data)
935
936
937
938
939

    def test_4_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
940
                self.assertEqual(img.mode, 'RGBA')  # default should assume RGBA
941
942
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
943
                self.assertEqual(img.mode, mode)
944
945
            split = img.split()
            for i in range(4):
946
                torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False)
947

948
        img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy()
surgan12's avatar
surgan12 committed
949
        for mode in [None, 'RGBA', 'CMYK', 'RGBX']:
950
            verify_img_data(img_data, mode)
951

952
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
953
            # should raise if we try a mode for 3 or 1 or 2 channel images
954
955
            transforms.ToPILImage(mode='RGB')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
956
            transforms.ToPILImage(mode='LA')(img_data)
957

Varun Agrawal's avatar
Varun Agrawal committed
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
    def test_2d_tensor_to_pil_image(self):
        to_tensor = transforms.ToTensor()

        img_data_float = torch.Tensor(4, 4).uniform_()
        img_data_byte = torch.ByteTensor(4, 4).random_(0, 255)
        img_data_short = torch.ShortTensor(4, 4).random_()
        img_data_int = torch.IntTensor(4, 4).random_()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(),
                            img_data_byte.float().div(255.0).numpy(),
                            img_data_short.numpy(),
                            img_data_int.numpy()]
        expected_modes = ['L', 'L', 'I;16', 'I']

        for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
976
                self.assertEqual(img.mode, mode)
977
                np.testing.assert_allclose(expected_output, to_tensor(img).numpy()[0])
Varun Agrawal's avatar
Varun Agrawal committed
978
979
980
981
982
983
984
985
986
987
988
989

    def test_2d_ndarray_to_pil_image(self):
        img_data_float = torch.Tensor(4, 4).uniform_().numpy()
        img_data_byte = torch.ByteTensor(4, 4).random_(0, 255).numpy()
        img_data_short = torch.ShortTensor(4, 4).random_().numpy()
        img_data_int = torch.IntTensor(4, 4).random_().numpy()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_modes = ['F', 'L', 'I;16', 'I']
        for img_data, mode in zip(inputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
990
                self.assertEqual(img.mode, mode)
991
                np.testing.assert_allclose(img_data, img)
Varun Agrawal's avatar
Varun Agrawal committed
992
993

    def test_tensor_bad_types_to_pil_image(self):
994
        with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'):
Varun Agrawal's avatar
Varun Agrawal committed
995
            transforms.ToPILImage()(torch.ones(1, 3, 4, 4))
996
997
        with self.assertRaisesRegex(ValueError, r'pic should not have > 4 channels. Got \d+ channels.'):
            transforms.ToPILImage()(torch.ones(6, 4, 4))
Varun Agrawal's avatar
Varun Agrawal committed
998

999
    def test_ndarray_bad_types_to_pil_image(self):
1000
        trans = transforms.ToPILImage()
1001
1002
        reg_msg = r'Input type \w+ is not supported'
        with self.assertRaisesRegex(TypeError, reg_msg):
1003
            trans(np.ones([4, 4, 1], np.int64))
1004
        with self.assertRaisesRegex(TypeError, reg_msg):
1005
            trans(np.ones([4, 4, 1], np.uint16))
1006
        with self.assertRaisesRegex(TypeError, reg_msg):
1007
            trans(np.ones([4, 4, 1], np.uint32))
1008
        with self.assertRaisesRegex(TypeError, reg_msg):
1009
1010
            trans(np.ones([4, 4, 1], np.float64))

1011
        with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'):
Varun Agrawal's avatar
Varun Agrawal committed
1012
            transforms.ToPILImage()(np.ones([1, 4, 4, 3]))
1013
1014
        with self.assertRaisesRegex(ValueError, r'pic should not have > 4 channels. Got \d+ channels.'):
            transforms.ToPILImage()(np.ones([4, 4, 6]))
Varun Agrawal's avatar
Varun Agrawal committed
1015

1016
1017
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_vertical_flip(self):
1018
1019
        random_state = random.getstate()
        random.seed(42)
1020
1021
1022
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        vimg = img.transpose(Image.FLIP_TOP_BOTTOM)

1023
        num_samples = 250
1024
        num_vertical = 0
1025
        for _ in range(num_samples):
1026
1027
1028
1029
            out = transforms.RandomVerticalFlip()(img)
            if out == vimg:
                num_vertical += 1

1030
1031
        p_value = stats.binom_test(num_vertical, num_samples, p=0.5)
        random.setstate(random_state)
1032
        self.assertGreater(p_value, 0.0001)
1033

1034
1035
1036
1037
1038
1039
1040
1041
1042
        num_samples = 250
        num_vertical = 0
        for _ in range(num_samples):
            out = transforms.RandomVerticalFlip(p=0.7)(img)
            if out == vimg:
                num_vertical += 1

        p_value = stats.binom_test(num_vertical, num_samples, p=0.7)
        random.setstate(random_state)
1043
        self.assertGreater(p_value, 0.0001)
1044

1045
1046
1047
        # Checking if RandomVerticalFlip can be printed as string
        transforms.RandomVerticalFlip().__repr__()

1048
1049
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_horizontal_flip(self):
1050
1051
        random_state = random.getstate()
        random.seed(42)
1052
1053
1054
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        himg = img.transpose(Image.FLIP_LEFT_RIGHT)

1055
        num_samples = 250
1056
        num_horizontal = 0
1057
        for _ in range(num_samples):
1058
1059
1060
1061
            out = transforms.RandomHorizontalFlip()(img)
            if out == himg:
                num_horizontal += 1

1062
1063
        p_value = stats.binom_test(num_horizontal, num_samples, p=0.5)
        random.setstate(random_state)
1064
        self.assertGreater(p_value, 0.0001)
1065

1066
1067
1068
1069
1070
1071
1072
1073
1074
        num_samples = 250
        num_horizontal = 0
        for _ in range(num_samples):
            out = transforms.RandomHorizontalFlip(p=0.7)(img)
            if out == himg:
                num_horizontal += 1

        p_value = stats.binom_test(num_horizontal, num_samples, p=0.7)
        random.setstate(random_state)
1075
        self.assertGreater(p_value, 0.0001)
1076

1077
1078
1079
        # Checking if RandomHorizontalFlip can be printed as string
        transforms.RandomHorizontalFlip().__repr__()

1080
    @unittest.skipIf(stats is None, 'scipy.stats is not available')
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
    def test_normalize(self):
        def samples_from_standard_normal(tensor):
            p_value = stats.kstest(list(tensor.view(-1)), 'norm', args=(0, 1)).pvalue
            return p_value > 0.0001

        random_state = random.getstate()
        random.seed(42)
        for channels in [1, 3]:
            img = torch.rand(channels, 10, 10)
            mean = [img[c].mean() for c in range(channels)]
            std = [img[c].std() for c in range(channels)]
            normalized = transforms.Normalize(mean, std)(img)
1093
            self.assertTrue(samples_from_standard_normal(normalized))
1094
1095
        random.setstate(random_state)

1096
1097
1098
        # Checking if Normalize can be printed as string
        transforms.Normalize(mean, std).__repr__()

1099
1100
1101
        # Checking the optional in-place behaviour
        tensor = torch.rand((1, 16, 16))
        tensor_inplace = transforms.Normalize((0.5,), (0.5,), inplace=True)(tensor)
1102
        assert_equal(tensor, tensor_inplace)
1103

1104
1105
1106
1107
1108
1109
1110
1111
1112
    def test_normalize_different_dtype(self):
        for dtype1 in [torch.float32, torch.float64]:
            img = torch.rand(3, 10, 10, dtype=dtype1)
            for dtype2 in [torch.int64, torch.float32, torch.float64]:
                mean = torch.tensor([1, 2, 3], dtype=dtype2)
                std = torch.tensor([1, 2, 1], dtype=dtype2)
                # checks that it doesn't crash
                transforms.functional.normalize(img, mean, std)

1113
1114
1115
1116
1117
1118
1119
    def test_normalize_3d_tensor(self):
        torch.manual_seed(28)
        n_channels = 3
        img_size = 10
        mean = torch.rand(n_channels)
        std = torch.rand(n_channels)
        img = torch.rand(n_channels, img_size, img_size)
1120
        target = F.normalize(img, mean, std)
1121
1122
1123
1124
1125
1126
1127

        mean_unsqueezed = mean.view(-1, 1, 1)
        std_unsqueezed = std.view(-1, 1, 1)
        result1 = F.normalize(img, mean_unsqueezed, std_unsqueezed)
        result2 = F.normalize(img,
                              mean_unsqueezed.repeat(1, img_size, img_size),
                              std_unsqueezed.repeat(1, img_size, img_size))
1128
1129
        torch.testing.assert_close(target, result1)
        torch.testing.assert_close(target, result2)
1130

1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
    def test_color_jitter(self):
        color_jitter = transforms.ColorJitter(2, 2, 2, 0.1)

        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')

        for i in range(10):
            y_pil = color_jitter(x_pil)
1142
            self.assertEqual(y_pil.mode, x_pil.mode)
1143
1144

            y_pil_2 = color_jitter(x_pil_2)
1145
            self.assertEqual(y_pil_2.mode, x_pil_2.mode)
1146

1147
1148
1149
        # Checking if ColorJitter can be printed as string
        color_jitter.__repr__()

1150
    def test_linear_transformation(self):
ekka's avatar
ekka committed
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
        num_samples = 1000
        x = torch.randn(num_samples, 3, 10, 10)
        flat_x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
        # compute principal components
        sigma = torch.mm(flat_x.t(), flat_x) / flat_x.size(0)
        u, s, _ = np.linalg.svd(sigma.numpy())
        zca_epsilon = 1e-10  # avoid division by 0
        d = torch.Tensor(np.diag(1. / np.sqrt(s + zca_epsilon)))
        u = torch.Tensor(u)
        principal_components = torch.mm(torch.mm(u, d), u.t())
        mean_vector = (torch.sum(flat_x, dim=0) / flat_x.size(0))
        # initialize whitening matrix
1163
        whitening = transforms.LinearTransformation(principal_components, mean_vector)
ekka's avatar
ekka committed
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
        # estimate covariance and mean using weak law of large number
        num_features = flat_x.size(1)
        cov = 0.0
        mean = 0.0
        for i in x:
            xwhite = whitening(i)
            xwhite = xwhite.view(1, -1).numpy()
            cov += np.dot(xwhite, xwhite.T) / num_features
            mean += np.sum(xwhite) / num_features
        # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov
1174
1175
1176
1177
        torch.testing.assert_close(cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, check_dtype=False,
                                   msg="cov not close to 1")
        torch.testing.assert_close(mean / num_samples, 0, rtol=1e-3, atol=1e-8, check_dtype=False,
                                   msg="mean not close to 0")
ekka's avatar
ekka committed
1178

1179
        # Checking if LinearTransformation can be printed as string
ekka's avatar
ekka committed
1180
1181
        whitening.__repr__()

1182
1183
1184
1185
    def test_rotate(self):
        x = np.zeros((100, 100, 3), dtype=np.uint8)
        x[40, 40] = [255, 255, 255]

vfdev's avatar
vfdev committed
1186
        with self.assertRaisesRegex(TypeError, r"img should be PIL Image"):
1187
1188
1189
1190
1191
            F.rotate(x, 10)

        img = F.to_pil_image(x)

        result = F.rotate(img, 45)
1192
        self.assertEqual(result.size, (100, 100))
1193
        r, c, ch = np.where(result)
1194
1195
1196
        self.assertTrue(all(x in r for x in [49, 50]))
        self.assertTrue(all(x in c for x in [36]))
        self.assertTrue(all(x in ch for x in [0, 1, 2]))
1197
1198

        result = F.rotate(img, 45, expand=True)
1199
        self.assertEqual(result.size, (142, 142))
1200
        r, c, ch = np.where(result)
1201
1202
1203
        self.assertTrue(all(x in r for x in [70, 71]))
        self.assertTrue(all(x in c for x in [57]))
        self.assertTrue(all(x in ch for x in [0, 1, 2]))
1204
1205

        result = F.rotate(img, 45, center=(40, 40))
1206
        self.assertEqual(result.size, (100, 100))
1207
        r, c, ch = np.where(result)
1208
1209
1210
        self.assertTrue(all(x in r for x in [40]))
        self.assertTrue(all(x in c for x in [40]))
        self.assertTrue(all(x in ch for x in [0, 1, 2]))
1211
1212
1213
1214

        result_a = F.rotate(img, 90)
        result_b = F.rotate(img, -270)

1215
        assert_equal(np.array(result_a), np.array(result_b))
1216

Philip Meier's avatar
Philip Meier committed
1217
1218
1219
    def test_rotate_fill(self):
        img = F.to_pil_image(np.ones((100, 100, 3), dtype=np.uint8) * 255, "RGB")

1220
        modes = ("L", "RGB", "F")
Philip Meier's avatar
Philip Meier committed
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
        nums_bands = [len(mode) for mode in modes]
        fill = 127

        for mode, num_bands in zip(modes, nums_bands):
            img_conv = img.convert(mode)
            img_rot = F.rotate(img_conv, 45.0, fill=fill)
            pixel = img_rot.getpixel((0, 0))

            if not isinstance(pixel, tuple):
                pixel = (pixel,)
            self.assertTupleEqual(pixel, tuple([fill] * num_bands))

            for wrong_num_bands in set(nums_bands) - {num_bands}:
                with self.assertRaises(ValueError):
                    F.rotate(img_conv, 45.0, fill=tuple([fill] * wrong_num_bands))

1237
    def test_affine(self):
Francisco Massa's avatar
Francisco Massa committed
1238
1239
1240
        input_img = np.zeros((40, 40, 3), dtype=np.uint8)
        cnt = [20, 20]
        for pt in [(16, 16), (20, 16), (20, 20)]:
1241
1242
1243
1244
            for i in range(-5, 5):
                for j in range(-5, 5):
                    input_img[pt[0] + i, pt[1] + j, :] = [255, 155, 55]

vfdev's avatar
vfdev committed
1245
1246
        with self.assertRaises(TypeError, msg="Argument translate should be a sequence"):
            F.affine(input_img, 10, translate=0, scale=1, shear=1)
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257

        pil_img = F.to_pil_image(input_img)

        def _to_3x3_inv(inv_result_matrix):
            result_matrix = np.zeros((3, 3))
            result_matrix[:2, :] = np.array(inv_result_matrix).reshape((2, 3))
            result_matrix[2, 2] = 1
            return np.linalg.inv(result_matrix)

        def _test_transformation(a, t, s, sh):
            a_rad = math.radians(a)
ptrblck's avatar
ptrblck committed
1258
            s_rad = [math.radians(sh_) for sh_ in sh]
1259
1260
1261
1262
1263
            cx, cy = cnt
            tx, ty = t
            sx, sy = s_rad
            rot = a_rad

1264
            # 1) Check transformation matrix:
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
            C = np.array([[1, 0, cx],
                          [0, 1, cy],
                          [0, 0, 1]])
            T = np.array([[1, 0, tx],
                          [0, 1, ty],
                          [0, 0, 1]])
            Cinv = np.linalg.inv(C)

            RS = np.array(
                [[s * math.cos(rot), -s * math.sin(rot), 0],
                 [s * math.sin(rot), s * math.cos(rot), 0],
                 [0, 0, 1]])

            SHx = np.array([[1, -math.tan(sx), 0],
                            [0, 1, 0],
                            [0, 0, 1]])

            SHy = np.array([[1, 0, 0],
                            [-math.tan(sy), 1, 0],
                            [0, 0, 1]])

            RSS = np.matmul(RS, np.matmul(SHy, SHx))

            true_matrix = np.matmul(T, np.matmul(C, np.matmul(RSS, Cinv)))

1290
1291
            result_matrix = _to_3x3_inv(F._get_inverse_affine_matrix(center=cnt, angle=a,
                                                                     translate=t, scale=s, shear=sh))
1292
            self.assertLess(np.sum(np.abs(true_matrix - result_matrix)), 1e-10)
1293
            # 2) Perform inverse mapping:
Francisco Massa's avatar
Francisco Massa committed
1294
            true_result = np.zeros((40, 40, 3), dtype=np.uint8)
1295
1296
1297
            inv_true_matrix = np.linalg.inv(true_matrix)
            for y in range(true_result.shape[0]):
                for x in range(true_result.shape[1]):
1298
1299
1300
1301
1302
1303
                    # Same as for PIL:
                    # https://github.com/python-pillow/Pillow/blob/71f8ec6a0cfc1008076a023c0756542539d057ab/
                    # src/libImaging/Geometry.c#L1060
                    input_pt = np.array([x + 0.5, y + 0.5, 1.0])
                    res = np.floor(np.dot(inv_true_matrix, input_pt)).astype(np.int)
                    _x, _y = res[:2]
1304
1305
1306
1307
                    if 0 <= _x < input_img.shape[1] and 0 <= _y < input_img.shape[0]:
                        true_result[y, x, :] = input_img[_y, _x, :]

            result = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh)
1308
            self.assertEqual(result.size, pil_img.size)
1309
1310
1311
1312
            # Compute number of different pixels:
            np_result = np.array(result)
            n_diff_pixels = np.sum(np_result != true_result) / 3
            # Accept 3 wrong pixels
1313
1314
            self.assertLess(n_diff_pixels, 3,
                            "a={}, t={}, s={}, sh={}\n".format(a, t, s, sh) +
1315
                            "n diff pixels={}\n".format(n_diff_pixels))
1316
1317
1318

        # Test rotation
        a = 45
ptrblck's avatar
ptrblck committed
1319
        _test_transformation(a=a, t=(0, 0), s=1.0, sh=(0.0, 0.0))
1320
1321
1322

        # Test translation
        t = [10, 15]
ptrblck's avatar
ptrblck committed
1323
        _test_transformation(a=0.0, t=t, s=1.0, sh=(0.0, 0.0))
1324
1325
1326

        # Test scale
        s = 1.2
ptrblck's avatar
ptrblck committed
1327
        _test_transformation(a=0.0, t=(0.0, 0.0), s=s, sh=(0.0, 0.0))
1328
1329

        # Test shear
ptrblck's avatar
ptrblck committed
1330
        sh = [45.0, 25.0]
1331
1332
1333
        _test_transformation(a=0.0, t=(0.0, 0.0), s=1.0, sh=sh)

        # Test rotation, scale, translation, shear
1334
        for a in range(-90, 90, 36):
1335
            for t1 in range(-10, 10, 5):
1336
                for s in [0.77, 1.0, 1.27]:
1337
                    for sh in range(-15, 15, 5):
ptrblck's avatar
ptrblck committed
1338
                        _test_transformation(a=a, t=(t1, t1), s=s, sh=(sh, sh))
1339

1340
1341
1342
1343
1344
1345
1346
    def test_random_rotation(self):

        with self.assertRaises(ValueError):
            transforms.RandomRotation(-0.7)
            transforms.RandomRotation([-0.7])
            transforms.RandomRotation([-0.7, 0, 0.7])

1347
1348
1349
1350
1351
1352
1353
        # assert fill being either a Sequence or a Number
        with self.assertRaises(TypeError):
            transforms.RandomRotation(0, fill={})

        t = transforms.RandomRotation(0, fill=None)
        self.assertTrue(t.fill == 0)

1354
1355
        t = transforms.RandomRotation(10)
        angle = t.get_params(t.degrees)
1356
        self.assertTrue(angle > -10 and angle < 10)
1357
1358
1359

        t = transforms.RandomRotation((-10, 10))
        angle = t.get_params(t.degrees)
1360
        self.assertTrue(-10 < angle < 10)
1361

1362
1363
1364
        # Checking if RandomRotation can be printed as string
        t.__repr__()

1365
1366
1367
        # assert deprecation warning and non-BC
        with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"):
            t = transforms.RandomRotation((-10, 10), resample=2)
1368
            self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR)
1369
1370

        # assert changed type warning
1371
        with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"):
1372
            t = transforms.RandomRotation((-10, 10), interpolation=2)
1373
            self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR)
1374

1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
    def test_random_affine(self):

        with self.assertRaises(ValueError):
            transforms.RandomAffine(-0.7)
            transforms.RandomAffine([-0.7])
            transforms.RandomAffine([-0.7, 0, 0.7])

            transforms.RandomAffine([-90, 90], translate=2.0)
            transforms.RandomAffine([-90, 90], translate=[-1.0, 1.0])
            transforms.RandomAffine([-90, 90], translate=[-1.0, 0.0, 1.0])

            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.0])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[-1.0, 1.0])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, -0.5])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 3.0, -0.5])

            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=-7)
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 0, 10])
ptrblck's avatar
ptrblck committed
1394
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 0, 10, 0, 10])
1395

1396
1397
1398
1399
1400
1401
1402
        # assert fill being either a Sequence or a Number
        with self.assertRaises(TypeError):
            transforms.RandomAffine(0, fill={})

        t = transforms.RandomAffine(0, fill=None)
        self.assertTrue(t.fill == 0)

1403
1404
1405
        x = np.zeros((100, 100, 3), dtype=np.uint8)
        img = F.to_pil_image(x)

ptrblck's avatar
ptrblck committed
1406
        t = transforms.RandomAffine(10, translate=[0.5, 0.3], scale=[0.7, 1.3], shear=[-10, 10, 20, 40])
1407
1408
1409
        for _ in range(100):
            angle, translations, scale, shear = t.get_params(t.degrees, t.translate, t.scale, t.shear,
                                                             img_size=img.size)
1410
1411
1412
1413
1414
1415
1416
1417
            self.assertTrue(-10 < angle < 10)
            self.assertTrue(-img.size[0] * 0.5 <= translations[0] <= img.size[0] * 0.5,
                            "{} vs {}".format(translations[0], img.size[0] * 0.5))
            self.assertTrue(-img.size[1] * 0.5 <= translations[1] <= img.size[1] * 0.5,
                            "{} vs {}".format(translations[1], img.size[1] * 0.5))
            self.assertTrue(0.7 < scale < 1.3)
            self.assertTrue(-10 < shear[0] < 10)
            self.assertTrue(-20 < shear[1] < 40)
1418
1419
1420
1421

        # Checking if RandomAffine can be printed as string
        t.__repr__()

1422
        t = transforms.RandomAffine(10, interpolation=transforms.InterpolationMode.BILINEAR)
1423
1424
1425
1426
1427
        self.assertIn("bilinear", t.__repr__())

        # assert deprecation warning and non-BC
        with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"):
            t = transforms.RandomAffine(10, resample=2)
1428
            self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR)
1429
1430
1431
1432
1433
1434

        with self.assertWarnsRegex(UserWarning, r"Argument fillcolor is deprecated and will be removed"):
            t = transforms.RandomAffine(10, fillcolor=10)
            self.assertEqual(t.fill, 10)

        # assert changed type warning
1435
        with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"):
1436
            t = transforms.RandomAffine(10, interpolation=2)
1437
            self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR)
1438

1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
    def test_to_grayscale(self):
        """Unit tests for grayscale transform"""

        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        # Test Set: Grayscale an image with desired number of output channels
        # Case 1: RGB -> 1 channel grayscale
        trans1 = transforms.Grayscale(num_output_channels=1)
        gray_pil_1 = trans1(x_pil)
        gray_np_1 = np.array(gray_pil_1)
1454
1455
        self.assertEqual(gray_pil_1.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_1.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1456
        assert_equal(gray_np, gray_np_1)
1457
1458
1459
1460
1461

        # Case 2: RGB -> 3 channel grayscale
        trans2 = transforms.Grayscale(num_output_channels=3)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
1462
1463
        self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel')
1464
1465
1466
        assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
        assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
        assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False)
1467
1468
1469
1470
1471

        # Case 3: 1 channel grayscale -> 1 channel grayscale
        trans3 = transforms.Grayscale(num_output_channels=1)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
1472
1473
        self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1474
        assert_equal(gray_np, gray_np_3)
1475
1476
1477
1478
1479

        # Case 4: 1 channel grayscale -> 3 channel grayscale
        trans4 = transforms.Grayscale(num_output_channels=3)
        gray_pil_4 = trans4(x_pil_2)
        gray_np_4 = np.array(gray_pil_4)
1480
1481
        self.assertEqual(gray_pil_4.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_4.shape, tuple(x_shape), 'should be 3 channel')
1482
1483
1484
        assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1])
        assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2])
        assert_equal(gray_np, gray_np_4[:, :, 0], check_stride=False)
1485

1486
1487
1488
        # Checking if Grayscale can be printed as string
        trans4.__repr__()

1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_grayscale(self):
        """Unit tests for random grayscale transform"""

        # Test Set 1: RGB -> 3 channel grayscale
        random_state = random.getstate()
        random.seed(42)
        x_shape = [2, 2, 3]
        x_np = np.random.randint(0, 256, x_shape, np.uint8)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        num_samples = 250
        num_gray = 0
        for _ in range(num_samples):
            gray_pil_2 = transforms.RandomGrayscale(p=0.5)(x_pil)
            gray_np_2 = np.array(gray_pil_2)
            if np.array_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) and \
1508
1509
                    np.array_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) and \
                    np.array_equal(gray_np, gray_np_2[:, :, 0]):
1510
1511
1512
1513
                num_gray = num_gray + 1

        p_value = stats.binom_test(num_gray, num_samples, p=0.5)
        random.setstate(random_state)
1514
        self.assertGreater(p_value, 0.0001)
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534

        # Test Set 2: grayscale -> 1 channel grayscale
        random_state = random.getstate()
        random.seed(42)
        x_shape = [2, 2, 3]
        x_np = np.random.randint(0, 256, x_shape, np.uint8)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        num_samples = 250
        num_gray = 0
        for _ in range(num_samples):
            gray_pil_3 = transforms.RandomGrayscale(p=0.5)(x_pil_2)
            gray_np_3 = np.array(gray_pil_3)
            if np.array_equal(gray_np, gray_np_3):
                num_gray = num_gray + 1

        p_value = stats.binom_test(num_gray, num_samples, p=1.0)  # Note: grayscale is always unchanged
        random.setstate(random_state)
1535
        self.assertGreater(p_value, 0.0001)
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548

        # Test set 3: Explicit tests
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        # Case 3a: RGB -> 3 channel grayscale (grayscaled)
        trans2 = transforms.RandomGrayscale(p=1.0)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
1549
1550
        self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel')
1551
1552
1553
        assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
        assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
        assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False)
1554
1555
1556
1557
1558

        # Case 3b: RGB -> 3 channel grayscale (unchanged)
        trans2 = transforms.RandomGrayscale(p=0.0)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
1559
1560
        self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel')
1561
        assert_equal(x_np, gray_np_2)
1562
1563
1564
1565
1566

        # Case 3c: 1 channel grayscale -> 1 channel grayscale (grayscaled)
        trans3 = transforms.RandomGrayscale(p=1.0)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
1567
1568
        self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1569
        assert_equal(gray_np, gray_np_3)
1570
1571
1572
1573
1574

        # Case 3d: 1 channel grayscale -> 1 channel grayscale (unchanged)
        trans3 = transforms.RandomGrayscale(p=0.0)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
1575
1576
        self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1577
        assert_equal(gray_np, gray_np_3)
1578

1579
1580
1581
        # Checking if RandomGrayscale can be printed as string
        trans3.__repr__()

1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
    def test_gaussian_blur_asserts(self):
        np_img = np.ones((100, 100, 3), dtype=np.uint8) * 255
        img = F.to_pil_image(np_img, "RGB")

        with self.assertRaisesRegex(ValueError, r"If kernel_size is a sequence its length should be 2"):
            F.gaussian_blur(img, [3])

        with self.assertRaisesRegex(ValueError, r"If kernel_size is a sequence its length should be 2"):
            F.gaussian_blur(img, [3, 3, 3])
        with self.assertRaisesRegex(ValueError, r"Kernel size should be a tuple/list of two integers"):
            transforms.GaussianBlur([3, 3, 3])

        with self.assertRaisesRegex(ValueError, r"kernel_size should have odd and positive integers"):
            F.gaussian_blur(img, [4, 4])
        with self.assertRaisesRegex(ValueError, r"Kernel size value should be an odd and positive number"):
            transforms.GaussianBlur([4, 4])

        with self.assertRaisesRegex(ValueError, r"kernel_size should have odd and positive integers"):
            F.gaussian_blur(img, [-3, -3])
        with self.assertRaisesRegex(ValueError, r"Kernel size value should be an odd and positive number"):
            transforms.GaussianBlur([-3, -3])

        with self.assertRaisesRegex(ValueError, r"If sigma is a sequence, its length should be 2"):
            F.gaussian_blur(img, 3, [1, 1, 1])
        with self.assertRaisesRegex(ValueError, r"sigma should be a single number or a list/tuple with length 2"):
            transforms.GaussianBlur(3, [1, 1, 1])

        with self.assertRaisesRegex(ValueError, r"sigma should have positive values"):
            F.gaussian_blur(img, 3, -1.0)
        with self.assertRaisesRegex(ValueError, r"If sigma is a single number, it must be positive"):
            transforms.GaussianBlur(3, -1.0)

        with self.assertRaisesRegex(TypeError, r"kernel_size should be int or a sequence of integers"):
            F.gaussian_blur(img, "kernel_size_string")
        with self.assertRaisesRegex(ValueError, r"Kernel size should be a tuple/list of two integers"):
            transforms.GaussianBlur("kernel_size_string")

        with self.assertRaisesRegex(TypeError, r"sigma should be either float or sequence of floats"):
            F.gaussian_blur(img, 3, "sigma_string")
        with self.assertRaisesRegex(ValueError, r"sigma should be a single number or a list/tuple with length 2"):
            transforms.GaussianBlur(3, "sigma_string")

1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
    def _test_randomness(self, fn, trans, configs):
        random_state = random.getstate()
        random.seed(42)
        img = transforms.ToPILImage()(torch.rand(3, 16, 18))

        for p in [0.5, 0.7]:
            for config in configs:
                inv_img = fn(img, **config)

                num_samples = 250
                counts = 0
                for _ in range(num_samples):
                    tranformation = trans(p=p, **config)
                    tranformation.__repr__()
                    out = tranformation(img)
                    if out == inv_img:
                        counts += 1

                p_value = stats.binom_test(counts, num_samples, p=p)
                random.setstate(random_state)
                self.assertGreater(p_value, 0.0001)

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_invert(self):
        self._test_randomness(
            F.invert,
            transforms.RandomInvert,
            [{}]
        )

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_posterize(self):
        self._test_randomness(
            F.posterize,
            transforms.RandomPosterize,
            [{"bits": 4}]
        )

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_solarize(self):
        self._test_randomness(
            F.solarize,
            transforms.RandomSolarize,
            [{"threshold": 192}]
        )

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_adjust_sharpness(self):
        self._test_randomness(
            F.adjust_sharpness,
            transforms.RandomAdjustSharpness,
            [{"sharpness_factor": 2.0}]
        )

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_autocontrast(self):
        self._test_randomness(
            F.autocontrast,
            transforms.RandomAutocontrast,
            [{}]
        )

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_equalize(self):
        self._test_randomness(
            F.equalize,
            transforms.RandomEqualize,
            [{}]
        )

    def test_autoaugment(self):
        for policy in transforms.AutoAugmentPolicy:
            for fill in [None, 85, (128, 128, 128)]:
                random.seed(42)
                img = Image.open(GRACE_HOPPER)
                transform = transforms.AutoAugment(policy=policy, fill=fill)
                for _ in range(100):
                    img = transform(img)
                transform.__repr__()

1704
    @unittest.skipIf(stats is None, 'scipy.stats not available')
1705
1706
1707
    def test_random_erasing(self):
        img = torch.ones(3, 128, 128)

1708
        t = transforms.RandomErasing(scale=(0.1, 0.1), ratio=(1 / 3, 3.))
1709
1710
        y, x, h, w, v = t.get_params(img, t.scale, t.ratio, [t.value, ])
        aspect_ratio = h / w
1711
1712
1713
        # Add some tolerance due to the rounding and int conversion used in the transform
        tol = 0.05
        self.assertTrue(1 / 3 - tol <= aspect_ratio <= 3 + tol)
1714
1715
1716
1717
1718
1719
1720
1721
1722

        aspect_ratios = []
        random.seed(42)
        trial = 1000
        for _ in range(trial):
            y, x, h, w, v = t.get_params(img, t.scale, t.ratio, [t.value, ])
            aspect_ratios.append(h / w)

        count_bigger_then_ones = len([1 for aspect_ratio in aspect_ratios if aspect_ratio > 1])
1723
1724
        p_value = stats.binom_test(count_bigger_then_ones, trial, p=0.5)
        self.assertGreater(p_value, 0.0001)
1725

1726
1727
1728
        # Checking if RandomErasing can be printed as string
        t.__repr__()

1729

1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
class TestPad:

    def test_pad(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        img = torch.ones(3, height, width)
        padding = random.randint(1, 20)
        fill = random.randint(1, 50)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Pad(padding, fill=fill),
            transforms.ToTensor(),
        ])(img)
        assert result.size(1) == height + 2 * padding
        assert result.size(2) == width + 2 * padding
        # check that all elements in the padded region correspond
        # to the pad value
        fill_v = fill / 255
        eps = 1e-5
        h_padded = result[:, :padding, :]
        w_padded = result[:, :, :padding]
        torch.testing.assert_close(
            h_padded, torch.full_like(h_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps
        )
        torch.testing.assert_close(
            w_padded, torch.full_like(w_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps
        )
        pytest.raises(ValueError, transforms.Pad(padding, fill=(1, 2)),
                      transforms.ToPILImage()(img))

    def test_pad_with_tuple_of_pad_values(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        img = transforms.ToPILImage()(torch.ones(3, height, width))

        padding = tuple([random.randint(1, 20) for _ in range(2)])
        output = transforms.Pad(padding)(img)
        assert output.size == (width + padding[0] * 2, height + padding[1] * 2)

        padding = tuple([random.randint(1, 20) for _ in range(4)])
        output = transforms.Pad(padding)(img)
        assert output.size[0] == width + padding[0] + padding[2]
        assert output.size[1] == height + padding[1] + padding[3]

        # Checking if Padding can be printed as string
        transforms.Pad(padding).__repr__()

    def test_pad_with_non_constant_padding_modes(self):
        """Unit tests for edge, reflect, symmetric padding"""
        img = torch.zeros(3, 27, 27).byte()
        img[:, :, 0] = 1  # Constant value added to leftmost edge
        img = transforms.ToPILImage()(img)
        img = F.pad(img, 1, (200, 200, 200))

        # pad 3 to all sidess
        edge_padded_img = F.pad(img, 3, padding_mode='edge')
        # First 6 elements of leftmost edge in the middle of the image, values are in order:
        # edge_pad, edge_pad, edge_pad, constant_pad, constant value added to leftmost edge, 0
        edge_middle_slice = np.asarray(edge_padded_img).transpose(2, 0, 1)[0][17][:6]
        assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0], dtype=np.uint8), check_stride=False)
        assert transforms.ToTensor()(edge_padded_img).size() == (3, 35, 35)

        # Pad 3 to left/right, 2 to top/bottom
        reflect_padded_img = F.pad(img, (3, 2), padding_mode='reflect')
        # First 6 elements of leftmost edge in the middle of the image, values are in order:
        # reflect_pad, reflect_pad, reflect_pad, constant_pad, constant value added to leftmost edge, 0
        reflect_middle_slice = np.asarray(reflect_padded_img).transpose(2, 0, 1)[0][17][:6]
        assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0], dtype=np.uint8), check_stride=False)
        assert transforms.ToTensor()(reflect_padded_img).size() == (3, 33, 35)

        # Pad 3 to left, 2 to top, 2 to right, 1 to bottom
        symmetric_padded_img = F.pad(img, (3, 2, 2, 1), padding_mode='symmetric')
        # First 6 elements of leftmost edge in the middle of the image, values are in order:
        # sym_pad, sym_pad, sym_pad, constant_pad, constant value added to leftmost edge, 0
        symmetric_middle_slice = np.asarray(symmetric_padded_img).transpose(2, 0, 1)[0][17][:6]
        assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0], dtype=np.uint8), check_stride=False)
        assert transforms.ToTensor()(symmetric_padded_img).size() == (3, 32, 34)

        # Check negative padding explicitly for symmetric case, since it is not
        # implemented for tensor case to compare to
        # Crop 1 to left, pad 2 to top, pad 3 to right, crop 3 to bottom
        symmetric_padded_img_neg = F.pad(img, (-1, 2, 3, -3), padding_mode='symmetric')
        symmetric_neg_middle_left = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][:3]
        symmetric_neg_middle_right = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][-4:]
        assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0], dtype=np.uint8), check_stride=False)
        assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0], dtype=np.uint8), check_stride=False)
        assert transforms.ToTensor()(symmetric_padded_img_neg).size() == (3, 28, 31)

    def test_pad_raises_with_invalid_pad_sequence_len(self):
        with pytest.raises(ValueError):
            transforms.Pad(())

        with pytest.raises(ValueError):
            transforms.Pad((1, 2, 3))

        with pytest.raises(ValueError):
            transforms.Pad((1, 2, 3, 4, 5))

    def test_pad_with_mode_F_images(self):
        pad = 2
        transform = transforms.Pad(pad)

        img = Image.new("F", (10, 10))
        padded_img = transform(img)
        assert_equal(padded_img.size, [edge_size + 2 * pad for edge_size in img.size], check_stride=False)


1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
def test_adjust_brightness():
    x_shape = [2, 2, 3]
    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
    x_pil = Image.fromarray(x_np, mode='RGB')

    # test 0
    y_pil = F.adjust_brightness(x_pil, 1)
    y_np = np.array(y_pil)
    torch.testing.assert_close(y_np, x_np)

    # test 1
    y_pil = F.adjust_brightness(x_pil, 0.5)
    y_np = np.array(y_pil)
    y_ans = [0, 2, 6, 27, 67, 113, 18, 4, 117, 45, 127, 0]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)

    # test 2
    y_pil = F.adjust_brightness(x_pil, 2)
    y_np = np.array(y_pil)
    y_ans = [0, 10, 26, 108, 255, 255, 74, 16, 255, 180, 255, 2]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)


def test_adjust_contrast():
    x_shape = [2, 2, 3]
    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
    x_pil = Image.fromarray(x_np, mode='RGB')

    # test 0
    y_pil = F.adjust_contrast(x_pil, 1)
    y_np = np.array(y_pil)
    torch.testing.assert_close(y_np, x_np)

    # test 1
    y_pil = F.adjust_contrast(x_pil, 0.5)
    y_np = np.array(y_pil)
    y_ans = [43, 45, 49, 70, 110, 156, 61, 47, 160, 88, 170, 43]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)

    # test 2
    y_pil = F.adjust_contrast(x_pil, 2)
    y_np = np.array(y_pil)
    y_ans = [0, 0, 0, 22, 184, 255, 0, 0, 255, 94, 255, 0]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)


@pytest.mark.skipif(Image.__version__ >= '7', reason="Temporarily disabled")
def test_adjust_saturation():
    x_shape = [2, 2, 3]
    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
    x_pil = Image.fromarray(x_np, mode='RGB')

    # test 0
    y_pil = F.adjust_saturation(x_pil, 1)
    y_np = np.array(y_pil)
    torch.testing.assert_close(y_np, x_np)

    # test 1
    y_pil = F.adjust_saturation(x_pil, 0.5)
    y_np = np.array(y_pil)
    y_ans = [2, 4, 8, 87, 128, 173, 39, 25, 138, 133, 215, 88]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)

    # test 2
    y_pil = F.adjust_saturation(x_pil, 2)
    y_np = np.array(y_pil)
    y_ans = [0, 6, 22, 0, 149, 255, 32, 0, 255, 4, 255, 0]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)


def test_adjust_hue():
    x_shape = [2, 2, 3]
    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
    x_pil = Image.fromarray(x_np, mode='RGB')

    with pytest.raises(ValueError):
        F.adjust_hue(x_pil, -0.7)
        F.adjust_hue(x_pil, 1)

    # test 0: almost same as x_data but not exact.
    # probably because hsv <-> rgb floating point ops
    y_pil = F.adjust_hue(x_pil, 0)
    y_np = np.array(y_pil)
    y_ans = [0, 5, 13, 54, 139, 226, 35, 8, 234, 91, 255, 1]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)

    # test 1
    y_pil = F.adjust_hue(x_pil, 0.25)
    y_np = np.array(y_pil)
    y_ans = [13, 0, 12, 224, 54, 226, 234, 8, 99, 1, 222, 255]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)

    # test 2
    y_pil = F.adjust_hue(x_pil, -0.25)
    y_np = np.array(y_pil)
    y_ans = [0, 13, 2, 54, 226, 58, 8, 234, 152, 255, 43, 1]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)


def test_adjust_sharpness():
    x_shape = [4, 4, 3]
    x_data = [75, 121, 114, 105, 97, 107, 105, 32, 66, 111, 117, 114, 99, 104, 97, 0,
              0, 65, 108, 101, 120, 97, 110, 100, 101, 114, 32, 86, 114, 121, 110, 105,
              111, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117]
    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
    x_pil = Image.fromarray(x_np, mode='RGB')

    # test 0
    y_pil = F.adjust_sharpness(x_pil, 1)
    y_np = np.array(y_pil)
    torch.testing.assert_close(y_np, x_np)

    # test 1
    y_pil = F.adjust_sharpness(x_pil, 0.5)
    y_np = np.array(y_pil)
    y_ans = [75, 121, 114, 105, 97, 107, 105, 32, 66, 111, 117, 114, 99, 104, 97, 30,
             30, 74, 103, 96, 114, 97, 110, 100, 101, 114, 32, 81, 103, 108, 102, 101,
             107, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)

    # test 2
    y_pil = F.adjust_sharpness(x_pil, 2)
    y_np = np.array(y_pil)
    y_ans = [75, 121, 114, 105, 97, 107, 105, 32, 66, 111, 117, 114, 99, 104, 97, 0,
             0, 46, 118, 111, 132, 97, 110, 100, 101, 114, 32, 95, 135, 146, 126, 112,
             119, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)

    # test 3
    x_shape = [2, 2, 3]
    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
    x_pil = Image.fromarray(x_np, mode='RGB')
    x_th = torch.tensor(x_np.transpose(2, 0, 1))
    y_pil = F.adjust_sharpness(x_pil, 2)
    y_np = np.array(y_pil).transpose(2, 0, 1)
    y_th = F.adjust_sharpness(x_th, 2)
    torch.testing.assert_close(y_np, y_th.numpy())


def test_adjust_gamma():
    x_shape = [2, 2, 3]
    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
    x_pil = Image.fromarray(x_np, mode='RGB')

    # test 0
    y_pil = F.adjust_gamma(x_pil, 1)
    y_np = np.array(y_pil)
    torch.testing.assert_close(y_np, x_np)

    # test 1
    y_pil = F.adjust_gamma(x_pil, 0.5)
    y_np = np.array(y_pil)
    y_ans = [0, 35, 57, 117, 186, 241, 97, 45, 245, 152, 255, 16]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)

    # test 2
    y_pil = F.adjust_gamma(x_pil, 2)
    y_np = np.array(y_pil)
    y_ans = [0, 0, 0, 11, 71, 201, 5, 0, 215, 31, 255, 0]
    y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
    torch.testing.assert_close(y_np, y_ans)


def test_adjusts_L_mode():
    x_shape = [2, 2, 3]
    x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
    x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
    x_rgb = Image.fromarray(x_np, mode='RGB')

    x_l = x_rgb.convert('L')
    assert F.adjust_brightness(x_l, 2).mode == 'L'
    assert F.adjust_saturation(x_l, 2).mode == 'L'
    assert F.adjust_contrast(x_l, 2).mode == 'L'
    assert F.adjust_hue(x_l, 0.4).mode == 'L'
    assert F.adjust_sharpness(x_l, 2).mode == 'L'
    assert F.adjust_gamma(x_l, 0.5).mode == 'L'


2033
2034
if __name__ == '__main__':
    unittest.main()