test_transforms.py 85.9 KB
Newer Older
1
import itertools
2
import os
3
4
import torch
import torchvision.transforms as transforms
5
import torchvision.transforms.functional as F
6
import torchvision.transforms.functional_tensor as F_t
7
from torch._utils_internal import get_file_path_2
8
from numpy.testing import assert_array_almost_equal
9
import unittest
10
import math
11
import random
12
import numpy as np
13
14
15
16
17
18
from PIL import Image
try:
    import accimage
except ImportError:
    accimage = None

19
20
21
22
23
try:
    from scipy import stats
except ImportError:
    stats = None

24
from common_utils import cycle_over, int_dtypes, float_dtypes
25
26


27
GRACE_HOPPER = get_file_path_2(
28
    os.path.dirname(os.path.abspath(__file__)), 'assets', 'encode_jpeg', 'grace_hopper_517x606.jpg')
29
30


31
class Tester(unittest.TestCase):
32

33
    def test_center_crop(self):
34
35
36
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        oheight = random.randint(5, (height - 2) / 2) * 2
37
38
        owidth = random.randint(5, (width - 2) / 2) * 2

39
        img = torch.ones(3, height, width)
40
41
42
        oh1 = (height - oheight) // 2
        ow1 = (width - owidth) // 2
        imgnarrow = img[:, oh1:oh1 + oheight, ow1:ow1 + owidth]
43
44
45
46
47
48
        imgnarrow.fill_(0)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
49
50
        self.assertEqual(result.sum(), 0,
                         "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
51
52
53
54
55
56
57
58
        oheight += 1
        owidth += 1
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
        sum1 = result.sum()
59
60
        self.assertGreater(sum1, 1,
                           "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
61
        oheight += 1
62
        owidth += 1
63
64
65
66
67
68
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
        sum2 = result.sum()
69
70
71
72
        self.assertGreater(sum2, 0,
                           "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
        self.assertGreater(sum2, sum1,
                           "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
73

74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
    def test_center_crop_2(self):
        """ Tests when center crop size is larger than image size, along any dimension"""
        even_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2)
        odd_image_size = (even_image_size[0] + 1, even_image_size[1] + 1)

        # Since height is independent of width, we can ignore images with odd height and even width and vice-versa.
        input_image_sizes = [even_image_size, odd_image_size]

        # Get different crop sizes
        delta = random.choice((1, 3, 5))
        crop_size_delta = [-2 * delta, -delta, 0, delta, 2 * delta]
        crop_size_params = itertools.product(input_image_sizes, crop_size_delta, crop_size_delta)

        for (input_image_size, delta_height, delta_width) in crop_size_params:
            img = torch.ones(3, *input_image_size)
            crop_size = (input_image_size[0] + delta_height, input_image_size[1] + delta_width)

            # Test both transforms, one with PIL input and one with tensor
            output_pil = transforms.Compose([
                transforms.ToPILImage(),
                transforms.CenterCrop(crop_size),
                transforms.ToTensor()],
            )(img)
            self.assertEqual(output_pil.size()[1:3], crop_size,
                             "image_size: {} crop_size: {}".format(input_image_size, crop_size))

            output_tensor = transforms.CenterCrop(crop_size)(img)
            self.assertEqual(output_tensor.size()[1:3], crop_size,
                             "image_size: {} crop_size: {}".format(input_image_size, crop_size))

            # Ensure output for PIL and Tensor are equal
            self.assertEqual((output_tensor - output_pil).sum(), 0,
                             "image_size: {} crop_size: {}".format(input_image_size, crop_size))

            # Check if content in center of both image and cropped output is same.
            center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1]))
            crop_center_tl, input_center_tl = [0, 0], [0, 0]
            for index in range(2):
                if crop_size[index] > input_image_size[index]:
                    crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2
                else:
                    input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2

            output_center = output_pil[
                :,
                crop_center_tl[0]:crop_center_tl[0] + center_size[0],
                crop_center_tl[1]:crop_center_tl[1] + center_size[1]
            ]

            img_center = img[
                :,
                input_center_tl[0]:input_center_tl[0] + center_size[0],
                input_center_tl[1]:input_center_tl[1] + center_size[1]
            ]

            self.assertEqual((output_center - img_center).sum(), 0,
                             "image_size: {} crop_size: {}".format(input_image_size, crop_size))

132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
    def test_five_crop(self):
        to_pil_image = transforms.ToPILImage()
        h = random.randint(5, 25)
        w = random.randint(5, 25)
        for single_dim in [True, False]:
            crop_h = random.randint(1, h)
            crop_w = random.randint(1, w)
            if single_dim:
                crop_h = min(crop_h, crop_w)
                crop_w = crop_h
                transform = transforms.FiveCrop(crop_h)
            else:
                transform = transforms.FiveCrop((crop_h, crop_w))

            img = torch.FloatTensor(3, h, w).uniform_()
            results = transform(to_pil_image(img))

149
            self.assertEqual(len(results), 5)
150
            for crop in results:
151
                self.assertEqual(crop.size, (crop_w, crop_h))
152
153
154
155
156
157
158
159

            to_pil_image = transforms.ToPILImage()
            tl = to_pil_image(img[:, 0:crop_h, 0:crop_w])
            tr = to_pil_image(img[:, 0:crop_h, w - crop_w:])
            bl = to_pil_image(img[:, h - crop_h:, 0:crop_w])
            br = to_pil_image(img[:, h - crop_h:, w - crop_w:])
            center = transforms.CenterCrop((crop_h, crop_w))(to_pil_image(img))
            expected_output = (tl, tr, bl, br, center)
160
            self.assertEqual(results, expected_output)
161
162
163
164
165
166
167
168
169
170
171
172

    def test_ten_crop(self):
        to_pil_image = transforms.ToPILImage()
        h = random.randint(5, 25)
        w = random.randint(5, 25)
        for should_vflip in [True, False]:
            for single_dim in [True, False]:
                crop_h = random.randint(1, h)
                crop_w = random.randint(1, w)
                if single_dim:
                    crop_h = min(crop_h, crop_w)
                    crop_w = crop_h
173
174
                    transform = transforms.TenCrop(crop_h,
                                                   vertical_flip=should_vflip)
175
176
                    five_crop = transforms.FiveCrop(crop_h)
                else:
177
178
                    transform = transforms.TenCrop((crop_h, crop_w),
                                                   vertical_flip=should_vflip)
179
180
181
182
183
                    five_crop = transforms.FiveCrop((crop_h, crop_w))

                img = to_pil_image(torch.FloatTensor(3, h, w).uniform_())
                results = transform(img)
                expected_output = five_crop(img)
184
185
186
187
188

                # Checking if FiveCrop and TenCrop can be printed as string
                transform.__repr__()
                five_crop.__repr__()

189
190
191
192
193
194
195
                if should_vflip:
                    vflipped_img = img.transpose(Image.FLIP_TOP_BOTTOM)
                    expected_output += five_crop(vflipped_img)
                else:
                    hflipped_img = img.transpose(Image.FLIP_LEFT_RIGHT)
                    expected_output += five_crop(hflipped_img)

196
197
                self.assertEqual(len(results), 10)
                self.assertEqual(results, expected_output)
198

199
200
201
202
203
204
205
206
    def test_randomresized_params(self):
        height = random.randint(24, 32) * 2
        width = random.randint(24, 32) * 2
        img = torch.ones(3, height, width)
        to_pil_image = transforms.ToPILImage()
        img = to_pil_image(img)
        size = 100
        epsilon = 0.05
207
        min_scale = 0.25
Francisco Massa's avatar
Francisco Massa committed
208
        for _ in range(10):
209
            scale_min = max(round(random.random(), 2), min_scale)
210
            scale_range = (scale_min, scale_min + round(random.random(), 2))
211
            aspect_min = max(round(random.random(), 2), epsilon)
212
213
            aspect_ratio_range = (aspect_min, aspect_min + round(random.random(), 2))
            randresizecrop = transforms.RandomResizedCrop(size, scale_range, aspect_ratio_range)
214
            i, j, h, w = randresizecrop.get_params(img, scale_range, aspect_ratio_range)
215
            aspect_ratio_obtained = w / h
216
217
218
219
220
221
222
            self.assertTrue((min(aspect_ratio_range) - epsilon <= aspect_ratio_obtained and
                             aspect_ratio_obtained <= max(aspect_ratio_range) + epsilon) or
                            aspect_ratio_obtained == 1.0)
            self.assertIsInstance(i, int)
            self.assertIsInstance(j, int)
            self.assertIsInstance(h, int)
            self.assertIsInstance(w, int)
223

224
    def test_randomperspective(self):
Francisco Massa's avatar
Francisco Massa committed
225
        for _ in range(10):
226
227
228
229
230
231
232
233
234
235
            height = random.randint(24, 32) * 2
            width = random.randint(24, 32) * 2
            img = torch.ones(3, height, width)
            to_pil_image = transforms.ToPILImage()
            img = to_pil_image(img)
            perp = transforms.RandomPerspective()
            startpoints, endpoints = perp.get_params(width, height, 0.5)
            tr_img = F.perspective(img, startpoints, endpoints)
            tr_img2 = F.to_tensor(F.perspective(tr_img, endpoints, startpoints))
            tr_img = F.to_tensor(tr_img)
236
237
238
239
            self.assertEqual(img.size[0], width)
            self.assertEqual(img.size[1], height)
            self.assertGreater(torch.nn.functional.mse_loss(tr_img, F.to_tensor(img)) + 0.3,
                               torch.nn.functional.mse_loss(tr_img2, F.to_tensor(img)))
240

241
    def test_randomperspective_fill(self):
242
243
244
245
246
247
248
249

        # assert fill being either a Sequence or a Number
        with self.assertRaises(TypeError):
            transforms.RandomPerspective(fill={})

        t = transforms.RandomPerspective(fill=None)
        self.assertTrue(t.fill == 0)

250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
        height = 100
        width = 100
        img = torch.ones(3, height, width)
        to_pil_image = transforms.ToPILImage()
        img = to_pil_image(img)

        modes = ("L", "RGB", "F")
        nums_bands = [len(mode) for mode in modes]
        fill = 127

        for mode, num_bands in zip(modes, nums_bands):
            img_conv = img.convert(mode)
            perspective = transforms.RandomPerspective(p=1, fill=fill)
            tr_img = perspective(img_conv)
            pixel = tr_img.getpixel((0, 0))

            if not isinstance(pixel, tuple):
                pixel = (pixel,)
            self.assertTupleEqual(pixel, tuple([fill] * num_bands))

        for mode, num_bands in zip(modes, nums_bands):
            img_conv = img.convert(mode)
            startpoints, endpoints = transforms.RandomPerspective.get_params(width, height, 0.5)
            tr_img = F.perspective(img_conv, startpoints, endpoints, fill=fill)
            pixel = tr_img.getpixel((0, 0))
275

276
277
278
279
280
281
282
283
            if not isinstance(pixel, tuple):
                pixel = (pixel,)
            self.assertTupleEqual(pixel, tuple([fill] * num_bands))

            for wrong_num_bands in set(nums_bands) - {num_bands}:
                with self.assertRaises(ValueError):
                    F.perspective(img_conv, startpoints, endpoints, fill=tuple([fill] * wrong_num_bands))

284
    def test_resize(self):
285

286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
        input_sizes = [
            # height, width
            # square image
            (28, 28),
            (27, 27),
            # rectangular image: h < w
            (28, 34),
            (29, 35),
            # rectangular image: h > w
            (34, 28),
            (35, 29),
        ]
        test_output_sizes_1 = [
            # single integer
            22, 27, 28, 36,
            # single integer in tuple/list
            [22, ], (27, ),
        ]
        test_output_sizes_2 = [
            # two integers
            [22, 22], [22, 28], [22, 36],
            [27, 22], [36, 22], [28, 28],
            [28, 37], [37, 27], [37, 37]
        ]

        for height, width in input_sizes:
            img = Image.new("RGB", size=(width, height), color=127)

            for osize in test_output_sizes_1:
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
                for max_size in (None, 37, 1000):

                    t = transforms.Resize(osize, max_size=max_size)
                    result = t(img)

                    msg = "{}, {} - {} - {}".format(height, width, osize, max_size)
                    osize = osize[0] if isinstance(osize, (list, tuple)) else osize
                    # If size is an int, smaller edge of the image will be matched to this number.
                    # i.e, if height > width, then image will be rescaled to (size * height / width, size).
                    if height < width:
                        exp_w, exp_h = (int(osize * width / height), osize)  # (w, h)
                        if max_size is not None and max_size < exp_w:
                            exp_w, exp_h = max_size, int(max_size * exp_h / exp_w)
                        self.assertEqual(result.size, (exp_w, exp_h), msg=msg)
                    elif width < height:
                        exp_w, exp_h = (osize, int(osize * height / width))  # (w, h)
                        if max_size is not None and max_size < exp_h:
                            exp_w, exp_h = int(max_size * exp_w / exp_h), max_size
                        self.assertEqual(result.size, (exp_w, exp_h), msg=msg)
                    else:
                        exp_w, exp_h = (osize, osize)  # (w, h)
                        if max_size is not None and max_size < osize:
                            exp_w, exp_h = max_size, max_size
                        self.assertEqual(result.size, (exp_w, exp_h), msg=msg)
339

340
341
        for height, width in input_sizes:
            img = Image.new("RGB", size=(width, height), color=127)
342

343
344
            for osize in test_output_sizes_2:
                oheight, owidth = osize
345

346
347
                t = transforms.Resize(osize)
                result = t(img)
348

349
                self.assertEqual((owidth, oheight), result.size)
350

351
352
353
354
        with self.assertWarnsRegex(UserWarning, r"Anti-alias option is always applied for PIL Image input"):
            t = transforms.Resize(osize, antialias=False)
            t(img)

355
356
357
358
    def test_random_crop(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        oheight = random.randint(5, (height - 2) / 2) * 2
359
        owidth = random.randint(5, (width - 2) / 2) * 2
360
361
362
363
364
365
        img = torch.ones(3, height, width)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
366
367
        self.assertEqual(result.size(1), oheight)
        self.assertEqual(result.size(2), owidth)
368

369
370
371
372
373
374
        padding = random.randint(1, 20)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((oheight, owidth), padding=padding),
            transforms.ToTensor(),
        ])(img)
375
376
        self.assertEqual(result.size(1), oheight)
        self.assertEqual(result.size(2), owidth)
377

378
379
380
381
382
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((height, width)),
            transforms.ToTensor()
        ])(img)
383
384
385
        self.assertEqual(result.size(1), height)
        self.assertEqual(result.size(2), width)
        self.assertTrue(np.allclose(img.numpy(), result.numpy()))
386

387
388
389
390
391
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((height + 1, width + 1), pad_if_needed=True),
            transforms.ToTensor(),
        ])(img)
392
393
        self.assertEqual(result.size(1), height + 1)
        self.assertEqual(result.size(2), width + 1)
394

vfdev's avatar
vfdev committed
395
396
397
398
399
        t = transforms.RandomCrop(48)
        img = torch.ones(3, 32, 32)
        with self.assertRaisesRegex(ValueError, r"Required crop size .+ is larger then input image size .+"):
            t(img)

400
401
402
403
404
    def test_pad(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        img = torch.ones(3, height, width)
        padding = random.randint(1, 20)
405
        fill = random.randint(1, 50)
406
407
        result = transforms.Compose([
            transforms.ToPILImage(),
408
            transforms.Pad(padding, fill=fill),
409
410
            transforms.ToTensor(),
        ])(img)
411
412
        self.assertEqual(result.size(1), height + 2 * padding)
        self.assertEqual(result.size(2), width + 2 * padding)
413
414
415
416
417
418
419
420
        # check that all elements in the padded region correspond
        # to the pad value
        fill_v = fill / 255
        eps = 1e-5
        self.assertTrue((result[:, :padding, :] - fill_v).abs().max() < eps)
        self.assertTrue((result[:, :, :padding] - fill_v).abs().max() < eps)
        self.assertRaises(ValueError, transforms.Pad(padding, fill=(1, 2)),
                          transforms.ToPILImage()(img))
Soumith Chintala's avatar
Soumith Chintala committed
421

422
423
424
425
426
427
428
    def test_pad_with_tuple_of_pad_values(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        img = transforms.ToPILImage()(torch.ones(3, height, width))

        padding = tuple([random.randint(1, 20) for _ in range(2)])
        output = transforms.Pad(padding)(img)
429
        self.assertEqual(output.size, (width + padding[0] * 2, height + padding[1] * 2))
430
431
432

        padding = tuple([random.randint(1, 20) for _ in range(4)])
        output = transforms.Pad(padding)(img)
433
434
        self.assertEqual(output.size[0], width + padding[0] + padding[2])
        self.assertEqual(output.size[1], height + padding[1] + padding[3])
435

436
437
438
        # Checking if Padding can be printed as string
        transforms.Pad(padding).__repr__()

439
440
    def test_pad_with_non_constant_padding_modes(self):
        """Unit tests for edge, reflect, symmetric padding"""
vfdev's avatar
vfdev committed
441
        img = torch.zeros(3, 27, 27).byte()
442
443
444
445
446
447
448
449
450
        img[:, :, 0] = 1  # Constant value added to leftmost edge
        img = transforms.ToPILImage()(img)
        img = F.pad(img, 1, (200, 200, 200))

        # pad 3 to all sidess
        edge_padded_img = F.pad(img, 3, padding_mode='edge')
        # First 6 elements of leftmost edge in the middle of the image, values are in order:
        # edge_pad, edge_pad, edge_pad, constant_pad, constant value added to leftmost edge, 0
        edge_middle_slice = np.asarray(edge_padded_img).transpose(2, 0, 1)[0][17][:6]
451
452
        self.assertTrue(np.all(edge_middle_slice == np.asarray([200, 200, 200, 200, 1, 0])))
        self.assertEqual(transforms.ToTensor()(edge_padded_img).size(), (3, 35, 35))
453
454
455
456
457
458

        # Pad 3 to left/right, 2 to top/bottom
        reflect_padded_img = F.pad(img, (3, 2), padding_mode='reflect')
        # First 6 elements of leftmost edge in the middle of the image, values are in order:
        # reflect_pad, reflect_pad, reflect_pad, constant_pad, constant value added to leftmost edge, 0
        reflect_middle_slice = np.asarray(reflect_padded_img).transpose(2, 0, 1)[0][17][:6]
459
460
        self.assertTrue(np.all(reflect_middle_slice == np.asarray([0, 0, 1, 200, 1, 0])))
        self.assertEqual(transforms.ToTensor()(reflect_padded_img).size(), (3, 33, 35))
461
462
463
464
465
466

        # Pad 3 to left, 2 to top, 2 to right, 1 to bottom
        symmetric_padded_img = F.pad(img, (3, 2, 2, 1), padding_mode='symmetric')
        # First 6 elements of leftmost edge in the middle of the image, values are in order:
        # sym_pad, sym_pad, sym_pad, constant_pad, constant value added to leftmost edge, 0
        symmetric_middle_slice = np.asarray(symmetric_padded_img).transpose(2, 0, 1)[0][17][:6]
467
468
        self.assertTrue(np.all(symmetric_middle_slice == np.asarray([0, 1, 200, 200, 1, 0])))
        self.assertEqual(transforms.ToTensor()(symmetric_padded_img).size(), (3, 32, 34))
469

470
471
472
473
474
475
476
477
478
479
        # Check negative padding explicitly for symmetric case, since it is not
        # implemented for tensor case to compare to
        # Crop 1 to left, pad 2 to top, pad 3 to right, crop 3 to bottom
        symmetric_padded_img_neg = F.pad(img, (-1, 2, 3, -3), padding_mode='symmetric')
        symmetric_neg_middle_left = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][:3]
        symmetric_neg_middle_right = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][-4:]
        self.assertTrue(np.all(symmetric_neg_middle_left == np.asarray([1, 0, 0])))
        self.assertTrue(np.all(symmetric_neg_middle_right == np.asarray([200, 200, 0, 0])))
        self.assertEqual(transforms.ToTensor()(symmetric_padded_img_neg).size(), (3, 28, 31))

480
    def test_pad_raises_with_invalid_pad_sequence_len(self):
481
482
483
484
485
486
487
488
489
        with self.assertRaises(ValueError):
            transforms.Pad(())

        with self.assertRaises(ValueError):
            transforms.Pad((1, 2, 3))

        with self.assertRaises(ValueError):
            transforms.Pad((1, 2, 3, 4, 5))

490
491
492
493
494
495
496
497
    def test_pad_with_mode_F_images(self):
        pad = 2
        transform = transforms.Pad(pad)

        img = Image.new("F", (10, 10))
        padded_img = transform(img)
        self.assertSequenceEqual(padded_img.size, [edge_size + 2 * pad for edge_size in img.size])

Soumith Chintala's avatar
Soumith Chintala committed
498
499
500
501
    def test_lambda(self):
        trans = transforms.Lambda(lambda x: x.add(10))
        x = torch.randn(10)
        y = trans(x)
502
        self.assertTrue(y.equal(torch.add(x, 10)))
Soumith Chintala's avatar
Soumith Chintala committed
503
504
505
506

        trans = transforms.Lambda(lambda x: x.add_(10))
        x = torch.randn(10)
        y = trans(x)
507
        self.assertTrue(y.equal(x))
508

509
510
511
        # Checking if Lambda can be printed as string
        trans.__repr__()

512
    @unittest.skipIf(stats is None, 'scipy.stats not available')
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
    def test_random_apply(self):
        random_state = random.getstate()
        random.seed(42)
        random_apply_transform = transforms.RandomApply(
            [
                transforms.RandomRotation((-45, 45)),
                transforms.RandomHorizontalFlip(),
                transforms.RandomVerticalFlip(),
            ], p=0.75
        )
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        num_samples = 250
        num_applies = 0
        for _ in range(num_samples):
            out = random_apply_transform(img)
            if out != img:
                num_applies += 1

        p_value = stats.binom_test(num_applies, num_samples, p=0.75)
        random.setstate(random_state)
533
        self.assertGreater(p_value, 0.0001)
534
535
536
537

        # Checking if RandomApply can be printed as string
        random_apply_transform.__repr__()

538
    @unittest.skipIf(stats is None, 'scipy.stats not available')
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
    def test_random_choice(self):
        random_state = random.getstate()
        random.seed(42)
        random_choice_transform = transforms.RandomChoice(
            [
                transforms.Resize(15),
                transforms.Resize(20),
                transforms.CenterCrop(10)
            ]
        )
        img = transforms.ToPILImage()(torch.rand(3, 25, 25))
        num_samples = 250
        num_resize_15 = 0
        num_resize_20 = 0
        num_crop_10 = 0
        for _ in range(num_samples):
            out = random_choice_transform(img)
            if out.size == (15, 15):
                num_resize_15 += 1
            elif out.size == (20, 20):
                num_resize_20 += 1
            elif out.size == (10, 10):
                num_crop_10 += 1

        p_value = stats.binom_test(num_resize_15, num_samples, p=0.33333)
564
        self.assertGreater(p_value, 0.0001)
565
        p_value = stats.binom_test(num_resize_20, num_samples, p=0.33333)
566
        self.assertGreater(p_value, 0.0001)
567
        p_value = stats.binom_test(num_crop_10, num_samples, p=0.33333)
568
        self.assertGreater(p_value, 0.0001)
569
570
571
572
573

        random.setstate(random_state)
        # Checking if RandomChoice can be printed as string
        random_choice_transform.__repr__()

574
    @unittest.skipIf(stats is None, 'scipy.stats not available')
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
    def test_random_order(self):
        random_state = random.getstate()
        random.seed(42)
        random_order_transform = transforms.RandomOrder(
            [
                transforms.Resize(20),
                transforms.CenterCrop(10)
            ]
        )
        img = transforms.ToPILImage()(torch.rand(3, 25, 25))
        num_samples = 250
        num_normal_order = 0
        resize_crop_out = transforms.CenterCrop(10)(transforms.Resize(20)(img))
        for _ in range(num_samples):
            out = random_order_transform(img)
            if out == resize_crop_out:
                num_normal_order += 1

        p_value = stats.binom_test(num_normal_order, num_samples, p=0.5)
        random.setstate(random_state)
595
        self.assertGreater(p_value, 0.0001)
596
597
598
599

        # Checking if RandomOrder can be printed as string
        random_order_transform.__repr__()

600
    def test_to_tensor(self):
601
        test_channels = [1, 3, 4]
602
603
        height, width = 4, 4
        trans = transforms.ToTensor()
604

605
606
607
608
609
610
611
        with self.assertRaises(TypeError):
            trans(np.random.rand(1, height, width).tolist())

        with self.assertRaises(ValueError):
            trans(np.random.rand(height))
            trans(np.random.rand(1, 1, height, width))

612
613
614
615
        for channels in test_channels:
            input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
            img = transforms.ToPILImage()(input_data)
            output = trans(img)
616
            self.assertTrue(np.allclose(input_data.numpy(), output.numpy()))
617

618
            ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
619
620
            output = trans(ndarray)
            expected_output = ndarray.transpose((2, 0, 1)) / 255.0
621
            self.assertTrue(np.allclose(output.numpy(), expected_output))
622

623
624
625
            ndarray = np.random.rand(height, width, channels).astype(np.float32)
            output = trans(ndarray)
            expected_output = ndarray.transpose((2, 0, 1))
626
            self.assertTrue(np.allclose(output.numpy(), expected_output))
627

628
629
630
631
        # separate test for mode '1' PIL images
        input_data = torch.ByteTensor(1, height, width).bernoulli_()
        img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
        output = trans(img)
632
        self.assertTrue(np.allclose(input_data.numpy(), output.numpy()))
633

634
635
636
637
638
639
640
641
642
643
644
645
646
647
    def test_to_tensor_with_other_default_dtypes(self):
        current_def_dtype = torch.get_default_dtype()

        t = transforms.ToTensor()
        np_arr = np.random.randint(0, 255, (32, 32, 3), dtype=np.uint8)
        img = Image.fromarray(np_arr)

        for dtype in [torch.float16, torch.float, torch.double]:
            torch.set_default_dtype(dtype)
            res = t(img)
            self.assertTrue(res.dtype == dtype, msg=f"{res.dtype} vs {dtype}")

        torch.set_default_dtype(current_def_dtype)

648
649
650
651
    def test_max_value(self):
        for dtype in int_dtypes():
            self.assertEqual(F_t._max_value(dtype), torch.iinfo(dtype).max)

652
653
654
655
        # remove float testing as it can lead to errors such as
        # runtime error: 5.7896e+76 is outside the range of representable values of type 'float'
        # for dtype in float_dtypes():
        #     self.assertGreater(F_t._max_value(dtype), torch.finfo(dtype).max)
656

657
658
659
660
661
662
    def test_convert_image_dtype_float_to_float(self):
        for input_dtype, output_dtypes in cycle_over(float_dtypes()):
            input_image = torch.tensor((0.0, 1.0), dtype=input_dtype)
            for output_dtype in output_dtypes:
                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
663
664
                    transform_script = torch.jit.script(F.convert_image_dtype)

665
                    output_image = transform(input_image)
666
667
668
669
                    output_image_script = transform_script(input_image, output_dtype)

                    script_diff = output_image_script - output_image
                    self.assertLess(script_diff.abs().max(), 1e-6)
670
671
672
673
674
675
676
677
678
679
680
681
682

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0.0, 1.0

                    self.assertAlmostEqual(actual_min, desired_min)
                    self.assertAlmostEqual(actual_max, desired_max)

    def test_convert_image_dtype_float_to_int(self):
        for input_dtype in float_dtypes():
            input_image = torch.tensor((0.0, 1.0), dtype=input_dtype)
            for output_dtype in int_dtypes():
                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
683
                    transform_script = torch.jit.script(F.convert_image_dtype)
684
685
686
687
688
689
690
691

                    if (input_dtype == torch.float32 and output_dtype in (torch.int32, torch.int64)) or (
                            input_dtype == torch.float64 and output_dtype == torch.int64
                    ):
                        with self.assertRaises(RuntimeError):
                            transform(input_image)
                    else:
                        output_image = transform(input_image)
692
693
694
695
                        output_image_script = transform_script(input_image, output_dtype)

                        script_diff = output_image_script - output_image
                        self.assertLess(script_diff.abs().max(), 1e-6)
696
697
698
699
700
701
702
703
704
705
706
707
708

                        actual_min, actual_max = output_image.tolist()
                        desired_min, desired_max = 0, torch.iinfo(output_dtype).max

                        self.assertEqual(actual_min, desired_min)
                        self.assertEqual(actual_max, desired_max)

    def test_convert_image_dtype_int_to_float(self):
        for input_dtype in int_dtypes():
            input_image = torch.tensor((0, torch.iinfo(input_dtype).max), dtype=input_dtype)
            for output_dtype in float_dtypes():
                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
709
710
                    transform_script = torch.jit.script(F.convert_image_dtype)

711
                    output_image = transform(input_image)
712
713
714
715
                    output_image_script = transform_script(input_image, output_dtype)

                    script_diff = output_image_script - output_image
                    self.assertLess(script_diff.abs().max(), 1e-6)
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0.0, 1.0

                    self.assertAlmostEqual(actual_min, desired_min)
                    self.assertGreaterEqual(actual_min, desired_min)
                    self.assertAlmostEqual(actual_max, desired_max)
                    self.assertLessEqual(actual_max, desired_max)

    def test_convert_image_dtype_int_to_int(self):
        for input_dtype, output_dtypes in cycle_over(int_dtypes()):
            input_max = torch.iinfo(input_dtype).max
            input_image = torch.tensor((0, input_max), dtype=input_dtype)
            for output_dtype in output_dtypes:
                output_max = torch.iinfo(output_dtype).max

                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
734
735
                    transform_script = torch.jit.script(F.convert_image_dtype)

736
                    output_image = transform(input_image)
737
738
739
740
741
742
                    output_image_script = transform_script(input_image, output_dtype)

                    script_diff = output_image_script.float() - output_image.float()
                    self.assertLess(
                        script_diff.abs().max(), 1e-6, msg="{} vs {}".format(output_image_script, output_image)
                    )
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0, output_max

                    # see https://github.com/pytorch/vision/pull/2078#issuecomment-641036236 for details
                    if input_max >= output_max:
                        error_term = 0
                    else:
                        error_term = 1 - (torch.iinfo(output_dtype).max + 1) // (torch.iinfo(input_dtype).max + 1)

                    self.assertEqual(actual_min, desired_min)
                    self.assertEqual(actual_max, desired_max + error_term)

    def test_convert_image_dtype_int_to_int_consistency(self):
        for input_dtype, output_dtypes in cycle_over(int_dtypes()):
            input_max = torch.iinfo(input_dtype).max
            input_image = torch.tensor((0, input_max), dtype=input_dtype)
            for output_dtype in output_dtypes:
                output_max = torch.iinfo(output_dtype).max
                if output_max <= input_max:
                    continue

                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
                    inverse_transfrom = transforms.ConvertImageDtype(input_dtype)
                    output_image = inverse_transfrom(transform(input_image))

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0, input_max

                    self.assertEqual(actual_min, desired_min)
                    self.assertEqual(actual_max, desired_max)

776
777
778
779
780
781
782
783
    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_to_tensor(self):
        trans = transforms.ToTensor()

        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
        self.assertTrue(np.allclose(output.numpy(), expected_output.numpy()))

    def test_pil_to_tensor(self):
        test_channels = [1, 3, 4]
        height, width = 4, 4
        trans = transforms.PILToTensor()

        with self.assertRaises(TypeError):
            trans(np.random.rand(1, height, width).tolist())
            trans(np.random.rand(1, height, width))

        for channels in test_channels:
            input_data = torch.ByteTensor(channels, height, width).random_(0, 255)
            img = transforms.ToPILImage()(input_data)
            output = trans(img)
            self.assertTrue(np.allclose(input_data.numpy(), output.numpy()))

            input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
            img = transforms.ToPILImage()(input_data)
            output = trans(img)
            expected_output = input_data.transpose((2, 0, 1))
            self.assertTrue(np.allclose(output.numpy(), expected_output))

            input_data = torch.as_tensor(np.random.rand(channels, height, width).astype(np.float32))
            img = transforms.ToPILImage()(input_data)  # CHW -> HWC and (* 255).byte()
            output = trans(img)  # HWC -> CHW
            expected_output = (input_data * 255).byte()
            self.assertTrue(np.allclose(output.numpy(), expected_output.numpy()))

        # separate test for mode '1' PIL images
        input_data = torch.ByteTensor(1, height, width).bernoulli_()
        img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
        output = trans(img)
        self.assertTrue(np.allclose(input_data.numpy(), output.numpy()))

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_pil_to_tensor(self):
        trans = transforms.PILToTensor()

        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
827
        self.assertTrue(np.allclose(output.numpy(), expected_output.numpy()))
828
829
830
831

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_resize(self):
        trans = transforms.Compose([
832
            transforms.Resize(256, interpolation=Image.LINEAR),
833
834
835
            transforms.ToTensor(),
        ])

836
837
838
        # Checking if Compose, Resize and ToTensor can be printed as string
        trans.__repr__()

839
840
841
842
843
844
845
        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
        self.assertLess(np.abs((expected_output - output).mean()), 1e-3)
        self.assertLess((expected_output - output).var(), 1e-5)
        # note the high absolute tolerance
846
        self.assertTrue(np.allclose(output.numpy(), expected_output.numpy(), atol=5e-2))
847
848
849
850
851
852
853
854

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_crop(self):
        trans = transforms.Compose([
            transforms.CenterCrop(256),
            transforms.ToTensor(),
        ])

855
856
857
        # Checking if Compose, CenterCrop and ToTensor can be printed as string
        trans.__repr__()

858
859
860
861
        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
862
        self.assertTrue(np.allclose(output.numpy(), expected_output.numpy()))
863

864
    def test_1_channel_tensor_to_pil_image(self):
865
866
        to_tensor = transforms.ToTensor()

867
        img_data_float = torch.Tensor(1, 4, 4).uniform_()
868
869
870
871
        img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255)
        img_data_short = torch.ShortTensor(1, 4, 4).random_()
        img_data_int = torch.IntTensor(1, 4, 4).random_()

872
873
874
875
876
877
878
879
880
881
        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(),
                            img_data_byte.float().div(255.0).numpy(),
                            img_data_short.numpy(),
                            img_data_int.numpy()]
        expected_modes = ['L', 'L', 'I;16', 'I']

        for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
882
883
                self.assertEqual(img.mode, mode)
                self.assertTrue(np.allclose(expected_output, to_tensor(img).numpy()))
884
885
        # 'F' mode for torch.FloatTensor
        img_F_mode = transforms.ToPILImage(mode='F')(img_data_float)
886
887
888
        self.assertEqual(img_F_mode.mode, 'F')
        self.assertTrue(np.allclose(np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')),
                                    np.array(img_F_mode)))
889
890
891
892
893
894
895
896
897
898
899
900

    def test_1_channel_ndarray_to_pil_image(self):
        img_data_float = torch.Tensor(4, 4, 1).uniform_().numpy()
        img_data_byte = torch.ByteTensor(4, 4, 1).random_(0, 255).numpy()
        img_data_short = torch.ShortTensor(4, 4, 1).random_().numpy()
        img_data_int = torch.IntTensor(4, 4, 1).random_().numpy()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_modes = ['F', 'L', 'I;16', 'I']
        for img_data, mode in zip(inputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
901
902
                self.assertEqual(img.mode, mode)
                self.assertTrue(np.allclose(img_data[:, :, 0], img))
903

surgan12's avatar
surgan12 committed
904
905
906
907
    def test_2_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
908
                self.assertEqual(img.mode, 'LA')  # default should assume LA
surgan12's avatar
surgan12 committed
909
910
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
911
                self.assertEqual(img.mode, mode)
surgan12's avatar
surgan12 committed
912
913
            split = img.split()
            for i in range(2):
914
                self.assertTrue(np.allclose(img_data[:, :, i], split[i]))
surgan12's avatar
surgan12 committed
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931

        img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy()
        for mode in [None, 'LA']:
            verify_img_data(img_data, mode)

        transforms.ToPILImage().__repr__()

        with self.assertRaises(ValueError):
            # should raise if we try a mode for 4 or 1 or 3 channel images
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
            transforms.ToPILImage(mode='RGB')(img_data)

    def test_2_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
932
                self.assertEqual(img.mode, 'LA')  # default should assume LA
surgan12's avatar
surgan12 committed
933
934
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
935
                self.assertEqual(img.mode, mode)
surgan12's avatar
surgan12 committed
936
937
            split = img.split()
            for i in range(2):
938
                self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()))
surgan12's avatar
surgan12 committed
939
940
941
942
943
944
945
946
947
948
949
950

        img_data = torch.Tensor(2, 4, 4).uniform_()
        expected_output = img_data.mul(255).int().float().div(255)
        for mode in [None, 'LA']:
            verify_img_data(img_data, expected_output, mode=mode)

        with self.assertRaises(ValueError):
            # should raise if we try a mode for 4 or 1 or 3 channel images
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
            transforms.ToPILImage(mode='RGB')(img_data)

951
952
953
954
    def test_3_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
955
                self.assertEqual(img.mode, 'RGB')  # default should assume RGB
956
957
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
958
                self.assertEqual(img.mode, mode)
959
960
            split = img.split()
            for i in range(3):
961
                self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()))
962

963
964
965
966
        img_data = torch.Tensor(3, 4, 4).uniform_()
        expected_output = img_data.mul(255).int().float().div(255)
        for mode in [None, 'RGB', 'HSV', 'YCbCr']:
            verify_img_data(img_data, expected_output, mode=mode)
967

968
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
969
            # should raise if we try a mode for 4 or 1 or 2 channel images
970
971
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
972
            transforms.ToPILImage(mode='LA')(img_data)
973

Varun Agrawal's avatar
Varun Agrawal committed
974
975
976
        with self.assertRaises(ValueError):
            transforms.ToPILImage()(torch.Tensor(1, 3, 4, 4).uniform_())

977
978
979
980
    def test_3_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
981
                self.assertEqual(img.mode, 'RGB')  # default should assume RGB
982
983
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
984
                self.assertEqual(img.mode, mode)
985
986
            split = img.split()
            for i in range(3):
987
                self.assertTrue(np.allclose(img_data[:, :, i], split[i]))
988

989
990
991
992
        img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy()
        for mode in [None, 'RGB', 'HSV', 'YCbCr']:
            verify_img_data(img_data, mode)

993
994
995
        # Checking if ToPILImage can be printed as string
        transforms.ToPILImage().__repr__()

996
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
997
            # should raise if we try a mode for 4 or 1 or 2 channel images
998
999
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
1000
            transforms.ToPILImage(mode='LA')(img_data)
1001
1002
1003
1004
1005

    def test_4_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
1006
                self.assertEqual(img.mode, 'RGBA')  # default should assume RGBA
1007
1008
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
1009
                self.assertEqual(img.mode, mode)
1010
1011
1012

            split = img.split()
            for i in range(4):
1013
                self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()))
1014

1015
        img_data = torch.Tensor(4, 4, 4).uniform_()
1016
        expected_output = img_data.mul(255).int().float().div(255)
surgan12's avatar
surgan12 committed
1017
        for mode in [None, 'RGBA', 'CMYK', 'RGBX']:
1018
            verify_img_data(img_data, expected_output, mode)
1019

1020
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
1021
            # should raise if we try a mode for 3 or 1 or 2 channel images
1022
1023
            transforms.ToPILImage(mode='RGB')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
1024
            transforms.ToPILImage(mode='LA')(img_data)
1025
1026
1027
1028
1029

    def test_4_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
1030
                self.assertEqual(img.mode, 'RGBA')  # default should assume RGBA
1031
1032
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
1033
                self.assertEqual(img.mode, mode)
1034
1035
            split = img.split()
            for i in range(4):
1036
                self.assertTrue(np.allclose(img_data[:, :, i], split[i]))
1037

1038
        img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy()
surgan12's avatar
surgan12 committed
1039
        for mode in [None, 'RGBA', 'CMYK', 'RGBX']:
1040
            verify_img_data(img_data, mode)
1041

1042
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
1043
            # should raise if we try a mode for 3 or 1 or 2 channel images
1044
1045
            transforms.ToPILImage(mode='RGB')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
1046
            transforms.ToPILImage(mode='LA')(img_data)
1047

Varun Agrawal's avatar
Varun Agrawal committed
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
    def test_2d_tensor_to_pil_image(self):
        to_tensor = transforms.ToTensor()

        img_data_float = torch.Tensor(4, 4).uniform_()
        img_data_byte = torch.ByteTensor(4, 4).random_(0, 255)
        img_data_short = torch.ShortTensor(4, 4).random_()
        img_data_int = torch.IntTensor(4, 4).random_()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(),
                            img_data_byte.float().div(255.0).numpy(),
                            img_data_short.numpy(),
                            img_data_int.numpy()]
        expected_modes = ['L', 'L', 'I;16', 'I']

        for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
1066
1067
                self.assertEqual(img.mode, mode)
                self.assertTrue(np.allclose(expected_output, to_tensor(img).numpy()))
Varun Agrawal's avatar
Varun Agrawal committed
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079

    def test_2d_ndarray_to_pil_image(self):
        img_data_float = torch.Tensor(4, 4).uniform_().numpy()
        img_data_byte = torch.ByteTensor(4, 4).random_(0, 255).numpy()
        img_data_short = torch.ShortTensor(4, 4).random_().numpy()
        img_data_int = torch.IntTensor(4, 4).random_().numpy()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_modes = ['F', 'L', 'I;16', 'I']
        for img_data, mode in zip(inputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
1080
1081
                self.assertEqual(img.mode, mode)
                self.assertTrue(np.allclose(img_data, img))
Varun Agrawal's avatar
Varun Agrawal committed
1082
1083

    def test_tensor_bad_types_to_pil_image(self):
1084
        with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'):
Varun Agrawal's avatar
Varun Agrawal committed
1085
            transforms.ToPILImage()(torch.ones(1, 3, 4, 4))
1086
1087
        with self.assertRaisesRegex(ValueError, r'pic should not have > 4 channels. Got \d+ channels.'):
            transforms.ToPILImage()(torch.ones(6, 4, 4))
Varun Agrawal's avatar
Varun Agrawal committed
1088

1089
    def test_ndarray_bad_types_to_pil_image(self):
1090
        trans = transforms.ToPILImage()
1091
1092
        reg_msg = r'Input type \w+ is not supported'
        with self.assertRaisesRegex(TypeError, reg_msg):
1093
            trans(np.ones([4, 4, 1], np.int64))
1094
        with self.assertRaisesRegex(TypeError, reg_msg):
1095
            trans(np.ones([4, 4, 1], np.uint16))
1096
        with self.assertRaisesRegex(TypeError, reg_msg):
1097
            trans(np.ones([4, 4, 1], np.uint32))
1098
        with self.assertRaisesRegex(TypeError, reg_msg):
1099
1100
            trans(np.ones([4, 4, 1], np.float64))

1101
        with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'):
Varun Agrawal's avatar
Varun Agrawal committed
1102
            transforms.ToPILImage()(np.ones([1, 4, 4, 3]))
1103
1104
        with self.assertRaisesRegex(ValueError, r'pic should not have > 4 channels. Got \d+ channels.'):
            transforms.ToPILImage()(np.ones([4, 4, 6]))
Varun Agrawal's avatar
Varun Agrawal committed
1105

1106
1107
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_vertical_flip(self):
1108
1109
        random_state = random.getstate()
        random.seed(42)
1110
1111
1112
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        vimg = img.transpose(Image.FLIP_TOP_BOTTOM)

1113
        num_samples = 250
1114
        num_vertical = 0
1115
        for _ in range(num_samples):
1116
1117
1118
1119
            out = transforms.RandomVerticalFlip()(img)
            if out == vimg:
                num_vertical += 1

1120
1121
        p_value = stats.binom_test(num_vertical, num_samples, p=0.5)
        random.setstate(random_state)
1122
        self.assertGreater(p_value, 0.0001)
1123

1124
1125
1126
1127
1128
1129
1130
1131
1132
        num_samples = 250
        num_vertical = 0
        for _ in range(num_samples):
            out = transforms.RandomVerticalFlip(p=0.7)(img)
            if out == vimg:
                num_vertical += 1

        p_value = stats.binom_test(num_vertical, num_samples, p=0.7)
        random.setstate(random_state)
1133
        self.assertGreater(p_value, 0.0001)
1134

1135
1136
1137
        # Checking if RandomVerticalFlip can be printed as string
        transforms.RandomVerticalFlip().__repr__()

1138
1139
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_horizontal_flip(self):
1140
1141
        random_state = random.getstate()
        random.seed(42)
1142
1143
1144
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        himg = img.transpose(Image.FLIP_LEFT_RIGHT)

1145
        num_samples = 250
1146
        num_horizontal = 0
1147
        for _ in range(num_samples):
1148
1149
1150
1151
            out = transforms.RandomHorizontalFlip()(img)
            if out == himg:
                num_horizontal += 1

1152
1153
        p_value = stats.binom_test(num_horizontal, num_samples, p=0.5)
        random.setstate(random_state)
1154
        self.assertGreater(p_value, 0.0001)
1155

1156
1157
1158
1159
1160
1161
1162
1163
1164
        num_samples = 250
        num_horizontal = 0
        for _ in range(num_samples):
            out = transforms.RandomHorizontalFlip(p=0.7)(img)
            if out == himg:
                num_horizontal += 1

        p_value = stats.binom_test(num_horizontal, num_samples, p=0.7)
        random.setstate(random_state)
1165
        self.assertGreater(p_value, 0.0001)
1166

1167
1168
1169
        # Checking if RandomHorizontalFlip can be printed as string
        transforms.RandomHorizontalFlip().__repr__()

1170
    @unittest.skipIf(stats is None, 'scipy.stats is not available')
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
    def test_normalize(self):
        def samples_from_standard_normal(tensor):
            p_value = stats.kstest(list(tensor.view(-1)), 'norm', args=(0, 1)).pvalue
            return p_value > 0.0001

        random_state = random.getstate()
        random.seed(42)
        for channels in [1, 3]:
            img = torch.rand(channels, 10, 10)
            mean = [img[c].mean() for c in range(channels)]
            std = [img[c].std() for c in range(channels)]
            normalized = transforms.Normalize(mean, std)(img)
1183
            self.assertTrue(samples_from_standard_normal(normalized))
1184
1185
        random.setstate(random_state)

1186
1187
1188
        # Checking if Normalize can be printed as string
        transforms.Normalize(mean, std).__repr__()

1189
1190
1191
        # Checking the optional in-place behaviour
        tensor = torch.rand((1, 16, 16))
        tensor_inplace = transforms.Normalize((0.5,), (0.5,), inplace=True)(tensor)
1192
        self.assertTrue(torch.equal(tensor, tensor_inplace))
1193

1194
1195
1196
1197
1198
1199
1200
1201
1202
    def test_normalize_different_dtype(self):
        for dtype1 in [torch.float32, torch.float64]:
            img = torch.rand(3, 10, 10, dtype=dtype1)
            for dtype2 in [torch.int64, torch.float32, torch.float64]:
                mean = torch.tensor([1, 2, 3], dtype=dtype2)
                std = torch.tensor([1, 2, 1], dtype=dtype2)
                # checks that it doesn't crash
                transforms.functional.normalize(img, mean, std)

1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
    def test_normalize_3d_tensor(self):
        torch.manual_seed(28)
        n_channels = 3
        img_size = 10
        mean = torch.rand(n_channels)
        std = torch.rand(n_channels)
        img = torch.rand(n_channels, img_size, img_size)
        target = F.normalize(img, mean, std).numpy()

        mean_unsqueezed = mean.view(-1, 1, 1)
        std_unsqueezed = std.view(-1, 1, 1)
        result1 = F.normalize(img, mean_unsqueezed, std_unsqueezed)
        result2 = F.normalize(img,
                              mean_unsqueezed.repeat(1, img_size, img_size),
                              std_unsqueezed.repeat(1, img_size, img_size))
        assert_array_almost_equal(target, result1.numpy())
        assert_array_almost_equal(target, result2.numpy())

1221
1222
1223
1224
1225
1226
1227
    def test_adjust_brightness(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
1228
        y_pil = F.adjust_brightness(x_pil, 1)
1229
        y_np = np.array(y_pil)
1230
        self.assertTrue(np.allclose(y_np, x_np))
1231
1232

        # test 1
1233
        y_pil = F.adjust_brightness(x_pil, 0.5)
1234
1235
1236
        y_np = np.array(y_pil)
        y_ans = [0, 2, 6, 27, 67, 113, 18, 4, 117, 45, 127, 0]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1237
        self.assertTrue(np.allclose(y_np, y_ans))
1238
1239

        # test 2
1240
        y_pil = F.adjust_brightness(x_pil, 2)
1241
1242
1243
        y_np = np.array(y_pil)
        y_ans = [0, 10, 26, 108, 255, 255, 74, 16, 255, 180, 255, 2]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1244
        self.assertTrue(np.allclose(y_np, y_ans))
1245
1246
1247
1248
1249
1250
1251
1252

    def test_adjust_contrast(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
1253
        y_pil = F.adjust_contrast(x_pil, 1)
1254
        y_np = np.array(y_pil)
1255
        self.assertTrue(np.allclose(y_np, x_np))
1256
1257

        # test 1
1258
        y_pil = F.adjust_contrast(x_pil, 0.5)
1259
1260
1261
        y_np = np.array(y_pil)
        y_ans = [43, 45, 49, 70, 110, 156, 61, 47, 160, 88, 170, 43]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1262
        self.assertTrue(np.allclose(y_np, y_ans))
1263
1264

        # test 2
1265
        y_pil = F.adjust_contrast(x_pil, 2)
1266
1267
1268
        y_np = np.array(y_pil)
        y_ans = [0, 0, 0, 22, 184, 255, 0, 0, 255, 94, 255, 0]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1269
        self.assertTrue(np.allclose(y_np, y_ans))
1270

Francisco Massa's avatar
Francisco Massa committed
1271
    @unittest.skipIf(Image.__version__ >= '7', "Temporarily disabled")
1272
1273
1274
1275
1276
1277
1278
    def test_adjust_saturation(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
1279
        y_pil = F.adjust_saturation(x_pil, 1)
1280
        y_np = np.array(y_pil)
1281
        self.assertTrue(np.allclose(y_np, x_np))
1282
1283

        # test 1
1284
        y_pil = F.adjust_saturation(x_pil, 0.5)
1285
1286
1287
        y_np = np.array(y_pil)
        y_ans = [2, 4, 8, 87, 128, 173, 39, 25, 138, 133, 215, 88]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1288
        self.assertTrue(np.allclose(y_np, y_ans))
1289
1290

        # test 2
1291
        y_pil = F.adjust_saturation(x_pil, 2)
1292
1293
1294
        y_np = np.array(y_pil)
        y_ans = [0, 6, 22, 0, 149, 255, 32, 0, 255, 4, 255, 0]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1295
        self.assertTrue(np.allclose(y_np, y_ans))
1296
1297
1298
1299
1300
1301
1302
1303

    def test_adjust_hue(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        with self.assertRaises(ValueError):
1304
1305
            F.adjust_hue(x_pil, -0.7)
            F.adjust_hue(x_pil, 1)
1306
1307
1308

        # test 0: almost same as x_data but not exact.
        # probably because hsv <-> rgb floating point ops
1309
        y_pil = F.adjust_hue(x_pil, 0)
1310
1311
1312
        y_np = np.array(y_pil)
        y_ans = [0, 5, 13, 54, 139, 226, 35, 8, 234, 91, 255, 1]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1313
        self.assertTrue(np.allclose(y_np, y_ans))
1314
1315

        # test 1
1316
        y_pil = F.adjust_hue(x_pil, 0.25)
1317
1318
1319
        y_np = np.array(y_pil)
        y_ans = [13, 0, 12, 224, 54, 226, 234, 8, 99, 1, 222, 255]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1320
        self.assertTrue(np.allclose(y_np, y_ans))
1321
1322

        # test 2
1323
        y_pil = F.adjust_hue(x_pil, -0.25)
1324
1325
1326
        y_np = np.array(y_pil)
        y_ans = [0, 13, 2, 54, 226, 58, 8, 234, 152, 255, 43, 1]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1327
        self.assertTrue(np.allclose(y_np, y_ans))
1328

1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
    def test_adjust_sharpness(self):
        x_shape = [4, 4, 3]
        x_data = [75, 121, 114, 105, 97, 107, 105, 32, 66, 111, 117, 114, 99, 104, 97, 0,
                  0, 65, 108, 101, 120, 97, 110, 100, 101, 114, 32, 86, 114, 121, 110, 105,
                  111, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
        y_pil = F.adjust_sharpness(x_pil, 1)
        y_np = np.array(y_pil)
        self.assertTrue(np.allclose(y_np, x_np))

        # test 1
        y_pil = F.adjust_sharpness(x_pil, 0.5)
        y_np = np.array(y_pil)
        y_ans = [75, 121, 114, 105, 97, 107, 105, 32, 66, 111, 117, 114, 99, 104, 97, 30,
                 30, 74, 103, 96, 114, 97, 110, 100, 101, 114, 32, 81, 103, 108, 102, 101,
                 107, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        self.assertTrue(np.allclose(y_np, y_ans))

        # test 2
        y_pil = F.adjust_sharpness(x_pil, 2)
        y_np = np.array(y_pil)
        y_ans = [75, 121, 114, 105, 97, 107, 105, 32, 66, 111, 117, 114, 99, 104, 97, 0,
                 0, 46, 118, 111, 132, 97, 110, 100, 101, 114, 32, 95, 135, 146, 126, 112,
                 119, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        self.assertTrue(np.allclose(y_np, y_ans))

        # test 3
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_th = torch.tensor(x_np.transpose(2, 0, 1))
        y_pil = F.adjust_sharpness(x_pil, 2)
        y_np = np.array(y_pil).transpose(2, 0, 1)
        y_th = F.adjust_sharpness(x_th, 2)
        self.assertTrue(np.allclose(y_np, y_th.numpy()))

1371
1372
1373
1374
1375
1376
1377
    def test_adjust_gamma(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
1378
        y_pil = F.adjust_gamma(x_pil, 1)
1379
        y_np = np.array(y_pil)
1380
        self.assertTrue(np.allclose(y_np, x_np))
1381
1382

        # test 1
1383
        y_pil = F.adjust_gamma(x_pil, 0.5)
1384
        y_np = np.array(y_pil)
1385
        y_ans = [0, 35, 57, 117, 186, 241, 97, 45, 245, 152, 255, 16]
1386
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1387
        self.assertTrue(np.allclose(y_np, y_ans))
1388
1389

        # test 2
1390
        y_pil = F.adjust_gamma(x_pil, 2)
1391
        y_np = np.array(y_pil)
1392
        y_ans = [0, 0, 0, 11, 71, 201, 5, 0, 215, 31, 255, 0]
1393
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1394
        self.assertTrue(np.allclose(y_np, y_ans))
1395
1396
1397
1398
1399
1400
1401
1402

    def test_adjusts_L_mode(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_rgb = Image.fromarray(x_np, mode='RGB')

        x_l = x_rgb.convert('L')
1403
1404
1405
1406
        self.assertEqual(F.adjust_brightness(x_l, 2).mode, 'L')
        self.assertEqual(F.adjust_saturation(x_l, 2).mode, 'L')
        self.assertEqual(F.adjust_contrast(x_l, 2).mode, 'L')
        self.assertEqual(F.adjust_hue(x_l, 0.4).mode, 'L')
1407
        self.assertEqual(F.adjust_sharpness(x_l, 2).mode, 'L')
1408
        self.assertEqual(F.adjust_gamma(x_l, 0.5).mode, 'L')
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420

    def test_color_jitter(self):
        color_jitter = transforms.ColorJitter(2, 2, 2, 0.1)

        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')

        for i in range(10):
            y_pil = color_jitter(x_pil)
1421
            self.assertEqual(y_pil.mode, x_pil.mode)
1422
1423

            y_pil_2 = color_jitter(x_pil_2)
1424
            self.assertEqual(y_pil_2.mode, x_pil_2.mode)
1425

1426
1427
1428
        # Checking if ColorJitter can be printed as string
        color_jitter.__repr__()

1429
    def test_linear_transformation(self):
ekka's avatar
ekka committed
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
        num_samples = 1000
        x = torch.randn(num_samples, 3, 10, 10)
        flat_x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
        # compute principal components
        sigma = torch.mm(flat_x.t(), flat_x) / flat_x.size(0)
        u, s, _ = np.linalg.svd(sigma.numpy())
        zca_epsilon = 1e-10  # avoid division by 0
        d = torch.Tensor(np.diag(1. / np.sqrt(s + zca_epsilon)))
        u = torch.Tensor(u)
        principal_components = torch.mm(torch.mm(u, d), u.t())
        mean_vector = (torch.sum(flat_x, dim=0) / flat_x.size(0))
        # initialize whitening matrix
1442
        whitening = transforms.LinearTransformation(principal_components, mean_vector)
ekka's avatar
ekka committed
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
        # estimate covariance and mean using weak law of large number
        num_features = flat_x.size(1)
        cov = 0.0
        mean = 0.0
        for i in x:
            xwhite = whitening(i)
            xwhite = xwhite.view(1, -1).numpy()
            cov += np.dot(xwhite, xwhite.T) / num_features
            mean += np.sum(xwhite) / num_features
        # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov
1453
1454
1455
1456
        self.assertTrue(np.allclose(cov / num_samples, np.identity(1), rtol=2e-3),
                        "cov not close to 1")
        self.assertTrue(np.allclose(mean / num_samples, 0, rtol=1e-3),
                        "mean not close to 0")
ekka's avatar
ekka committed
1457

1458
        # Checking if LinearTransformation can be printed as string
ekka's avatar
ekka committed
1459
1460
        whitening.__repr__()

1461
1462
1463
1464
    def test_rotate(self):
        x = np.zeros((100, 100, 3), dtype=np.uint8)
        x[40, 40] = [255, 255, 255]

vfdev's avatar
vfdev committed
1465
        with self.assertRaisesRegex(TypeError, r"img should be PIL Image"):
1466
1467
1468
1469
1470
            F.rotate(x, 10)

        img = F.to_pil_image(x)

        result = F.rotate(img, 45)
1471
        self.assertEqual(result.size, (100, 100))
1472
        r, c, ch = np.where(result)
1473
1474
1475
        self.assertTrue(all(x in r for x in [49, 50]))
        self.assertTrue(all(x in c for x in [36]))
        self.assertTrue(all(x in ch for x in [0, 1, 2]))
1476
1477

        result = F.rotate(img, 45, expand=True)
1478
        self.assertEqual(result.size, (142, 142))
1479
        r, c, ch = np.where(result)
1480
1481
1482
        self.assertTrue(all(x in r for x in [70, 71]))
        self.assertTrue(all(x in c for x in [57]))
        self.assertTrue(all(x in ch for x in [0, 1, 2]))
1483
1484

        result = F.rotate(img, 45, center=(40, 40))
1485
        self.assertEqual(result.size, (100, 100))
1486
        r, c, ch = np.where(result)
1487
1488
1489
        self.assertTrue(all(x in r for x in [40]))
        self.assertTrue(all(x in c for x in [40]))
        self.assertTrue(all(x in ch for x in [0, 1, 2]))
1490
1491
1492
1493

        result_a = F.rotate(img, 90)
        result_b = F.rotate(img, -270)

1494
        self.assertTrue(np.all(np.array(result_a) == np.array(result_b)))
1495

Philip Meier's avatar
Philip Meier committed
1496
1497
1498
    def test_rotate_fill(self):
        img = F.to_pil_image(np.ones((100, 100, 3), dtype=np.uint8) * 255, "RGB")

1499
        modes = ("L", "RGB", "F")
Philip Meier's avatar
Philip Meier committed
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
        nums_bands = [len(mode) for mode in modes]
        fill = 127

        for mode, num_bands in zip(modes, nums_bands):
            img_conv = img.convert(mode)
            img_rot = F.rotate(img_conv, 45.0, fill=fill)
            pixel = img_rot.getpixel((0, 0))

            if not isinstance(pixel, tuple):
                pixel = (pixel,)
            self.assertTupleEqual(pixel, tuple([fill] * num_bands))

            for wrong_num_bands in set(nums_bands) - {num_bands}:
                with self.assertRaises(ValueError):
                    F.rotate(img_conv, 45.0, fill=tuple([fill] * wrong_num_bands))

1516
    def test_affine(self):
Francisco Massa's avatar
Francisco Massa committed
1517
1518
1519
        input_img = np.zeros((40, 40, 3), dtype=np.uint8)
        cnt = [20, 20]
        for pt in [(16, 16), (20, 16), (20, 20)]:
1520
1521
1522
1523
            for i in range(-5, 5):
                for j in range(-5, 5):
                    input_img[pt[0] + i, pt[1] + j, :] = [255, 155, 55]

vfdev's avatar
vfdev committed
1524
1525
        with self.assertRaises(TypeError, msg="Argument translate should be a sequence"):
            F.affine(input_img, 10, translate=0, scale=1, shear=1)
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536

        pil_img = F.to_pil_image(input_img)

        def _to_3x3_inv(inv_result_matrix):
            result_matrix = np.zeros((3, 3))
            result_matrix[:2, :] = np.array(inv_result_matrix).reshape((2, 3))
            result_matrix[2, 2] = 1
            return np.linalg.inv(result_matrix)

        def _test_transformation(a, t, s, sh):
            a_rad = math.radians(a)
ptrblck's avatar
ptrblck committed
1537
            s_rad = [math.radians(sh_) for sh_ in sh]
1538
1539
1540
1541
1542
            cx, cy = cnt
            tx, ty = t
            sx, sy = s_rad
            rot = a_rad

1543
            # 1) Check transformation matrix:
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
            C = np.array([[1, 0, cx],
                          [0, 1, cy],
                          [0, 0, 1]])
            T = np.array([[1, 0, tx],
                          [0, 1, ty],
                          [0, 0, 1]])
            Cinv = np.linalg.inv(C)

            RS = np.array(
                [[s * math.cos(rot), -s * math.sin(rot), 0],
                 [s * math.sin(rot), s * math.cos(rot), 0],
                 [0, 0, 1]])

            SHx = np.array([[1, -math.tan(sx), 0],
                            [0, 1, 0],
                            [0, 0, 1]])

            SHy = np.array([[1, 0, 0],
                            [-math.tan(sy), 1, 0],
                            [0, 0, 1]])

            RSS = np.matmul(RS, np.matmul(SHy, SHx))

            true_matrix = np.matmul(T, np.matmul(C, np.matmul(RSS, Cinv)))

1569
1570
            result_matrix = _to_3x3_inv(F._get_inverse_affine_matrix(center=cnt, angle=a,
                                                                     translate=t, scale=s, shear=sh))
1571
            self.assertLess(np.sum(np.abs(true_matrix - result_matrix)), 1e-10)
1572
            # 2) Perform inverse mapping:
Francisco Massa's avatar
Francisco Massa committed
1573
            true_result = np.zeros((40, 40, 3), dtype=np.uint8)
1574
1575
1576
            inv_true_matrix = np.linalg.inv(true_matrix)
            for y in range(true_result.shape[0]):
                for x in range(true_result.shape[1]):
1577
1578
1579
1580
1581
1582
                    # Same as for PIL:
                    # https://github.com/python-pillow/Pillow/blob/71f8ec6a0cfc1008076a023c0756542539d057ab/
                    # src/libImaging/Geometry.c#L1060
                    input_pt = np.array([x + 0.5, y + 0.5, 1.0])
                    res = np.floor(np.dot(inv_true_matrix, input_pt)).astype(np.int)
                    _x, _y = res[:2]
1583
1584
1585
1586
                    if 0 <= _x < input_img.shape[1] and 0 <= _y < input_img.shape[0]:
                        true_result[y, x, :] = input_img[_y, _x, :]

            result = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh)
1587
            self.assertEqual(result.size, pil_img.size)
1588
1589
1590
1591
            # Compute number of different pixels:
            np_result = np.array(result)
            n_diff_pixels = np.sum(np_result != true_result) / 3
            # Accept 3 wrong pixels
1592
1593
1594
            self.assertLess(n_diff_pixels, 3,
                            "a={}, t={}, s={}, sh={}\n".format(a, t, s, sh) +
                            "n diff pixels={}\n".format(np.sum(np.array(result)[:, :, 0] != true_result[:, :, 0])))
1595
1596
1597

        # Test rotation
        a = 45
ptrblck's avatar
ptrblck committed
1598
        _test_transformation(a=a, t=(0, 0), s=1.0, sh=(0.0, 0.0))
1599
1600
1601

        # Test translation
        t = [10, 15]
ptrblck's avatar
ptrblck committed
1602
        _test_transformation(a=0.0, t=t, s=1.0, sh=(0.0, 0.0))
1603
1604
1605

        # Test scale
        s = 1.2
ptrblck's avatar
ptrblck committed
1606
        _test_transformation(a=0.0, t=(0.0, 0.0), s=s, sh=(0.0, 0.0))
1607
1608

        # Test shear
ptrblck's avatar
ptrblck committed
1609
        sh = [45.0, 25.0]
1610
1611
1612
1613
1614
        _test_transformation(a=0.0, t=(0.0, 0.0), s=1.0, sh=sh)

        # Test rotation, scale, translation, shear
        for a in range(-90, 90, 25):
            for t1 in range(-10, 10, 5):
1615
                for s in [0.75, 0.98, 1.0, 1.2, 1.4]:
1616
                    for sh in range(-15, 15, 5):
ptrblck's avatar
ptrblck committed
1617
                        _test_transformation(a=a, t=(t1, t1), s=s, sh=(sh, sh))
1618

1619
1620
1621
1622
1623
1624
1625
    def test_random_rotation(self):

        with self.assertRaises(ValueError):
            transforms.RandomRotation(-0.7)
            transforms.RandomRotation([-0.7])
            transforms.RandomRotation([-0.7, 0, 0.7])

1626
1627
1628
1629
1630
1631
1632
        # assert fill being either a Sequence or a Number
        with self.assertRaises(TypeError):
            transforms.RandomRotation(0, fill={})

        t = transforms.RandomRotation(0, fill=None)
        self.assertTrue(t.fill == 0)

1633
1634
        t = transforms.RandomRotation(10)
        angle = t.get_params(t.degrees)
1635
        self.assertTrue(angle > -10 and angle < 10)
1636
1637
1638

        t = transforms.RandomRotation((-10, 10))
        angle = t.get_params(t.degrees)
1639
        self.assertTrue(-10 < angle < 10)
1640

1641
1642
1643
        # Checking if RandomRotation can be printed as string
        t.__repr__()

1644
1645
1646
        # assert deprecation warning and non-BC
        with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"):
            t = transforms.RandomRotation((-10, 10), resample=2)
1647
            self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR)
1648
1649

        # assert changed type warning
1650
        with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"):
1651
            t = transforms.RandomRotation((-10, 10), interpolation=2)
1652
            self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR)
1653

1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
    def test_random_affine(self):

        with self.assertRaises(ValueError):
            transforms.RandomAffine(-0.7)
            transforms.RandomAffine([-0.7])
            transforms.RandomAffine([-0.7, 0, 0.7])

            transforms.RandomAffine([-90, 90], translate=2.0)
            transforms.RandomAffine([-90, 90], translate=[-1.0, 1.0])
            transforms.RandomAffine([-90, 90], translate=[-1.0, 0.0, 1.0])

            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.0])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[-1.0, 1.0])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, -0.5])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 3.0, -0.5])

            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=-7)
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 0, 10])
ptrblck's avatar
ptrblck committed
1673
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 0, 10, 0, 10])
1674

1675
1676
1677
1678
1679
1680
1681
        # assert fill being either a Sequence or a Number
        with self.assertRaises(TypeError):
            transforms.RandomAffine(0, fill={})

        t = transforms.RandomAffine(0, fill=None)
        self.assertTrue(t.fill == 0)

1682
1683
1684
        x = np.zeros((100, 100, 3), dtype=np.uint8)
        img = F.to_pil_image(x)

ptrblck's avatar
ptrblck committed
1685
        t = transforms.RandomAffine(10, translate=[0.5, 0.3], scale=[0.7, 1.3], shear=[-10, 10, 20, 40])
1686
1687
1688
        for _ in range(100):
            angle, translations, scale, shear = t.get_params(t.degrees, t.translate, t.scale, t.shear,
                                                             img_size=img.size)
1689
1690
1691
1692
1693
1694
1695
1696
            self.assertTrue(-10 < angle < 10)
            self.assertTrue(-img.size[0] * 0.5 <= translations[0] <= img.size[0] * 0.5,
                            "{} vs {}".format(translations[0], img.size[0] * 0.5))
            self.assertTrue(-img.size[1] * 0.5 <= translations[1] <= img.size[1] * 0.5,
                            "{} vs {}".format(translations[1], img.size[1] * 0.5))
            self.assertTrue(0.7 < scale < 1.3)
            self.assertTrue(-10 < shear[0] < 10)
            self.assertTrue(-20 < shear[1] < 40)
1697
1698
1699
1700

        # Checking if RandomAffine can be printed as string
        t.__repr__()

1701
        t = transforms.RandomAffine(10, interpolation=transforms.InterpolationMode.BILINEAR)
1702
1703
1704
1705
1706
        self.assertIn("bilinear", t.__repr__())

        # assert deprecation warning and non-BC
        with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"):
            t = transforms.RandomAffine(10, resample=2)
1707
            self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR)
1708
1709
1710
1711
1712
1713

        with self.assertWarnsRegex(UserWarning, r"Argument fillcolor is deprecated and will be removed"):
            t = transforms.RandomAffine(10, fillcolor=10)
            self.assertEqual(t.fill, 10)

        # assert changed type warning
1714
        with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"):
1715
            t = transforms.RandomAffine(10, interpolation=2)
1716
            self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR)
1717

1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
    def test_to_grayscale(self):
        """Unit tests for grayscale transform"""

        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        # Test Set: Grayscale an image with desired number of output channels
        # Case 1: RGB -> 1 channel grayscale
        trans1 = transforms.Grayscale(num_output_channels=1)
        gray_pil_1 = trans1(x_pil)
        gray_np_1 = np.array(gray_pil_1)
1733
1734
        self.assertEqual(gray_pil_1.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_1.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1735
1736
1737
1738
1739
1740
        np.testing.assert_equal(gray_np, gray_np_1)

        # Case 2: RGB -> 3 channel grayscale
        trans2 = transforms.Grayscale(num_output_channels=3)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
1741
1742
        self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel')
1743
1744
1745
1746
1747
1748
1749
1750
        np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
        np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
        np.testing.assert_equal(gray_np, gray_np_2[:, :, 0])

        # Case 3: 1 channel grayscale -> 1 channel grayscale
        trans3 = transforms.Grayscale(num_output_channels=1)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
1751
1752
        self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1753
1754
1755
1756
1757
1758
        np.testing.assert_equal(gray_np, gray_np_3)

        # Case 4: 1 channel grayscale -> 3 channel grayscale
        trans4 = transforms.Grayscale(num_output_channels=3)
        gray_pil_4 = trans4(x_pil_2)
        gray_np_4 = np.array(gray_pil_4)
1759
1760
        self.assertEqual(gray_pil_4.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_4.shape, tuple(x_shape), 'should be 3 channel')
1761
1762
1763
1764
        np.testing.assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1])
        np.testing.assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2])
        np.testing.assert_equal(gray_np, gray_np_4[:, :, 0])

1765
1766
1767
        # Checking if Grayscale can be printed as string
        trans4.__repr__()

1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_grayscale(self):
        """Unit tests for random grayscale transform"""

        # Test Set 1: RGB -> 3 channel grayscale
        random_state = random.getstate()
        random.seed(42)
        x_shape = [2, 2, 3]
        x_np = np.random.randint(0, 256, x_shape, np.uint8)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        num_samples = 250
        num_gray = 0
        for _ in range(num_samples):
            gray_pil_2 = transforms.RandomGrayscale(p=0.5)(x_pil)
            gray_np_2 = np.array(gray_pil_2)
            if np.array_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) and \
1787
1788
                    np.array_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) and \
                    np.array_equal(gray_np, gray_np_2[:, :, 0]):
1789
1790
1791
1792
                num_gray = num_gray + 1

        p_value = stats.binom_test(num_gray, num_samples, p=0.5)
        random.setstate(random_state)
1793
        self.assertGreater(p_value, 0.0001)
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813

        # Test Set 2: grayscale -> 1 channel grayscale
        random_state = random.getstate()
        random.seed(42)
        x_shape = [2, 2, 3]
        x_np = np.random.randint(0, 256, x_shape, np.uint8)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        num_samples = 250
        num_gray = 0
        for _ in range(num_samples):
            gray_pil_3 = transforms.RandomGrayscale(p=0.5)(x_pil_2)
            gray_np_3 = np.array(gray_pil_3)
            if np.array_equal(gray_np, gray_np_3):
                num_gray = num_gray + 1

        p_value = stats.binom_test(num_gray, num_samples, p=1.0)  # Note: grayscale is always unchanged
        random.setstate(random_state)
1814
        self.assertGreater(p_value, 0.0001)
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827

        # Test set 3: Explicit tests
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        # Case 3a: RGB -> 3 channel grayscale (grayscaled)
        trans2 = transforms.RandomGrayscale(p=1.0)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
1828
1829
        self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel')
1830
1831
1832
1833
1834
1835
1836
1837
        np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
        np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
        np.testing.assert_equal(gray_np, gray_np_2[:, :, 0])

        # Case 3b: RGB -> 3 channel grayscale (unchanged)
        trans2 = transforms.RandomGrayscale(p=0.0)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
1838
1839
        self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel')
1840
1841
1842
1843
1844
1845
        np.testing.assert_equal(x_np, gray_np_2)

        # Case 3c: 1 channel grayscale -> 1 channel grayscale (grayscaled)
        trans3 = transforms.RandomGrayscale(p=1.0)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
1846
1847
        self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1848
1849
1850
1851
1852
1853
        np.testing.assert_equal(gray_np, gray_np_3)

        # Case 3d: 1 channel grayscale -> 1 channel grayscale (unchanged)
        trans3 = transforms.RandomGrayscale(p=0.0)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
1854
1855
        self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1856
1857
        np.testing.assert_equal(gray_np, gray_np_3)

1858
1859
1860
        # Checking if RandomGrayscale can be printed as string
        trans3.__repr__()

1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
    def test_gaussian_blur_asserts(self):
        np_img = np.ones((100, 100, 3), dtype=np.uint8) * 255
        img = F.to_pil_image(np_img, "RGB")

        with self.assertRaisesRegex(ValueError, r"If kernel_size is a sequence its length should be 2"):
            F.gaussian_blur(img, [3])

        with self.assertRaisesRegex(ValueError, r"If kernel_size is a sequence its length should be 2"):
            F.gaussian_blur(img, [3, 3, 3])
        with self.assertRaisesRegex(ValueError, r"Kernel size should be a tuple/list of two integers"):
            transforms.GaussianBlur([3, 3, 3])

        with self.assertRaisesRegex(ValueError, r"kernel_size should have odd and positive integers"):
            F.gaussian_blur(img, [4, 4])
        with self.assertRaisesRegex(ValueError, r"Kernel size value should be an odd and positive number"):
            transforms.GaussianBlur([4, 4])

        with self.assertRaisesRegex(ValueError, r"kernel_size should have odd and positive integers"):
            F.gaussian_blur(img, [-3, -3])
        with self.assertRaisesRegex(ValueError, r"Kernel size value should be an odd and positive number"):
            transforms.GaussianBlur([-3, -3])

        with self.assertRaisesRegex(ValueError, r"If sigma is a sequence, its length should be 2"):
            F.gaussian_blur(img, 3, [1, 1, 1])
        with self.assertRaisesRegex(ValueError, r"sigma should be a single number or a list/tuple with length 2"):
            transforms.GaussianBlur(3, [1, 1, 1])

        with self.assertRaisesRegex(ValueError, r"sigma should have positive values"):
            F.gaussian_blur(img, 3, -1.0)
        with self.assertRaisesRegex(ValueError, r"If sigma is a single number, it must be positive"):
            transforms.GaussianBlur(3, -1.0)

        with self.assertRaisesRegex(TypeError, r"kernel_size should be int or a sequence of integers"):
            F.gaussian_blur(img, "kernel_size_string")
        with self.assertRaisesRegex(ValueError, r"Kernel size should be a tuple/list of two integers"):
            transforms.GaussianBlur("kernel_size_string")

        with self.assertRaisesRegex(TypeError, r"sigma should be either float or sequence of floats"):
            F.gaussian_blur(img, 3, "sigma_string")
        with self.assertRaisesRegex(ValueError, r"sigma should be a single number or a list/tuple with length 2"):
            transforms.GaussianBlur(3, "sigma_string")

1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
    def _test_randomness(self, fn, trans, configs):
        random_state = random.getstate()
        random.seed(42)
        img = transforms.ToPILImage()(torch.rand(3, 16, 18))

        for p in [0.5, 0.7]:
            for config in configs:
                inv_img = fn(img, **config)

                num_samples = 250
                counts = 0
                for _ in range(num_samples):
                    tranformation = trans(p=p, **config)
                    tranformation.__repr__()
                    out = tranformation(img)
                    if out == inv_img:
                        counts += 1

                p_value = stats.binom_test(counts, num_samples, p=p)
                random.setstate(random_state)
                self.assertGreater(p_value, 0.0001)

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_invert(self):
        self._test_randomness(
            F.invert,
            transforms.RandomInvert,
            [{}]
        )

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_posterize(self):
        self._test_randomness(
            F.posterize,
            transforms.RandomPosterize,
            [{"bits": 4}]
        )

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_solarize(self):
        self._test_randomness(
            F.solarize,
            transforms.RandomSolarize,
            [{"threshold": 192}]
        )

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_adjust_sharpness(self):
        self._test_randomness(
            F.adjust_sharpness,
            transforms.RandomAdjustSharpness,
            [{"sharpness_factor": 2.0}]
        )

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_autocontrast(self):
        self._test_randomness(
            F.autocontrast,
            transforms.RandomAutocontrast,
            [{}]
        )

    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_equalize(self):
        self._test_randomness(
            F.equalize,
            transforms.RandomEqualize,
            [{}]
        )

    def test_autoaugment(self):
        for policy in transforms.AutoAugmentPolicy:
            for fill in [None, 85, (128, 128, 128)]:
                random.seed(42)
                img = Image.open(GRACE_HOPPER)
                transform = transforms.AutoAugment(policy=policy, fill=fill)
                for _ in range(100):
                    img = transform(img)
                transform.__repr__()

1983
    @unittest.skipIf(stats is None, 'scipy.stats not available')
1984
1985
1986
    def test_random_erasing(self):
        img = torch.ones(3, 128, 128)

1987
        t = transforms.RandomErasing(scale=(0.1, 0.1), ratio=(1 / 3, 3.))
1988
1989
        y, x, h, w, v = t.get_params(img, t.scale, t.ratio, [t.value, ])
        aspect_ratio = h / w
1990
1991
1992
        # Add some tolerance due to the rounding and int conversion used in the transform
        tol = 0.05
        self.assertTrue(1 / 3 - tol <= aspect_ratio <= 3 + tol)
1993
1994
1995
1996
1997
1998
1999
2000
2001

        aspect_ratios = []
        random.seed(42)
        trial = 1000
        for _ in range(trial):
            y, x, h, w, v = t.get_params(img, t.scale, t.ratio, [t.value, ])
            aspect_ratios.append(h / w)

        count_bigger_then_ones = len([1 for aspect_ratio in aspect_ratios if aspect_ratio > 1])
2002
2003
        p_value = stats.binom_test(count_bigger_then_ones, trial, p=0.5)
        self.assertGreater(p_value, 0.0001)
2004

2005
2006
2007
        # Checking if RandomErasing can be printed as string
        t.__repr__()

2008

2009
2010
if __name__ == '__main__':
    unittest.main()