test_transforms.py 70.7 KB
Newer Older
1
import os
2
3
import torch
import torchvision.transforms as transforms
4
import torchvision.transforms.functional as F
5
from torch._utils_internal import get_file_path_2
6
from numpy.testing import assert_array_almost_equal
7
import unittest
8
import math
9
import random
10
import numpy as np
11
12
13
14
15
16
from PIL import Image
try:
    import accimage
except ImportError:
    accimage = None

17
18
19
20
21
try:
    from scipy import stats
except ImportError:
    stats = None

22
23
GRACE_HOPPER = get_file_path_2(
    os.path.dirname(os.path.abspath(__file__)), 'assets', 'grace_hopper_517x606.jpg')
24

25

26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
def cycle_over(objs):
    objs = list(objs)
    for idx, obj in enumerate(objs):
        yield obj, objs[:idx] + objs[idx + 1:]


def int_dtypes():
    yield from iter(
        (torch.uint8, torch.int8, torch.int16, torch.short, torch.int32, torch.int, torch.int64, torch.long,)
    )


def float_dtypes():
    yield from iter((torch.float32, torch.float, torch.float64, torch.double))


42
class Tester(unittest.TestCase):
43

44
45
46
47
    def test_crop(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        oheight = random.randint(5, (height - 2) / 2) * 2
48
49
        owidth = random.randint(5, (width - 2) / 2) * 2

50
        img = torch.ones(3, height, width)
51
52
53
        oh1 = (height - oheight) // 2
        ow1 = (width - owidth) // 2
        imgnarrow = img[:, oh1:oh1 + oheight, ow1:ow1 + owidth]
54
55
56
57
58
59
        imgnarrow.fill_(0)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
60
61
        self.assertEqual(result.sum(), 0,
                         "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
62
63
64
65
66
67
68
69
        oheight += 1
        owidth += 1
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
        sum1 = result.sum()
70
71
        self.assertGreater(sum1, 1,
                           "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
72
        oheight += 1
73
        owidth += 1
74
75
76
77
78
79
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
        sum2 = result.sum()
80
81
82
83
        self.assertGreater(sum2, 0,
                           "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
        self.assertGreater(sum2, sum1,
                           "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth))
84

85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
    def test_five_crop(self):
        to_pil_image = transforms.ToPILImage()
        h = random.randint(5, 25)
        w = random.randint(5, 25)
        for single_dim in [True, False]:
            crop_h = random.randint(1, h)
            crop_w = random.randint(1, w)
            if single_dim:
                crop_h = min(crop_h, crop_w)
                crop_w = crop_h
                transform = transforms.FiveCrop(crop_h)
            else:
                transform = transforms.FiveCrop((crop_h, crop_w))

            img = torch.FloatTensor(3, h, w).uniform_()
            results = transform(to_pil_image(img))

102
            self.assertEqual(len(results), 5)
103
            for crop in results:
104
                self.assertEqual(crop.size, (crop_w, crop_h))
105
106
107
108
109
110
111
112

            to_pil_image = transforms.ToPILImage()
            tl = to_pil_image(img[:, 0:crop_h, 0:crop_w])
            tr = to_pil_image(img[:, 0:crop_h, w - crop_w:])
            bl = to_pil_image(img[:, h - crop_h:, 0:crop_w])
            br = to_pil_image(img[:, h - crop_h:, w - crop_w:])
            center = transforms.CenterCrop((crop_h, crop_w))(to_pil_image(img))
            expected_output = (tl, tr, bl, br, center)
113
            self.assertEqual(results, expected_output)
114
115
116
117
118
119
120
121
122
123
124
125

    def test_ten_crop(self):
        to_pil_image = transforms.ToPILImage()
        h = random.randint(5, 25)
        w = random.randint(5, 25)
        for should_vflip in [True, False]:
            for single_dim in [True, False]:
                crop_h = random.randint(1, h)
                crop_w = random.randint(1, w)
                if single_dim:
                    crop_h = min(crop_h, crop_w)
                    crop_w = crop_h
126
127
                    transform = transforms.TenCrop(crop_h,
                                                   vertical_flip=should_vflip)
128
129
                    five_crop = transforms.FiveCrop(crop_h)
                else:
130
131
                    transform = transforms.TenCrop((crop_h, crop_w),
                                                   vertical_flip=should_vflip)
132
133
134
135
136
                    five_crop = transforms.FiveCrop((crop_h, crop_w))

                img = to_pil_image(torch.FloatTensor(3, h, w).uniform_())
                results = transform(img)
                expected_output = five_crop(img)
137
138
139
140
141

                # Checking if FiveCrop and TenCrop can be printed as string
                transform.__repr__()
                five_crop.__repr__()

142
143
144
145
146
147
148
                if should_vflip:
                    vflipped_img = img.transpose(Image.FLIP_TOP_BOTTOM)
                    expected_output += five_crop(vflipped_img)
                else:
                    hflipped_img = img.transpose(Image.FLIP_LEFT_RIGHT)
                    expected_output += five_crop(hflipped_img)

149
150
                self.assertEqual(len(results), 10)
                self.assertEqual(results, expected_output)
151

152
153
154
155
156
157
158
159
    def test_randomresized_params(self):
        height = random.randint(24, 32) * 2
        width = random.randint(24, 32) * 2
        img = torch.ones(3, height, width)
        to_pil_image = transforms.ToPILImage()
        img = to_pil_image(img)
        size = 100
        epsilon = 0.05
160
        min_scale = 0.25
Francisco Massa's avatar
Francisco Massa committed
161
        for _ in range(10):
162
            scale_min = max(round(random.random(), 2), min_scale)
163
            scale_range = (scale_min, scale_min + round(random.random(), 2))
164
            aspect_min = max(round(random.random(), 2), epsilon)
165
166
            aspect_ratio_range = (aspect_min, aspect_min + round(random.random(), 2))
            randresizecrop = transforms.RandomResizedCrop(size, scale_range, aspect_ratio_range)
167
            i, j, h, w = randresizecrop.get_params(img, scale_range, aspect_ratio_range)
168
            aspect_ratio_obtained = w / h
169
170
171
172
173
174
175
            self.assertTrue((min(aspect_ratio_range) - epsilon <= aspect_ratio_obtained and
                             aspect_ratio_obtained <= max(aspect_ratio_range) + epsilon) or
                            aspect_ratio_obtained == 1.0)
            self.assertIsInstance(i, int)
            self.assertIsInstance(j, int)
            self.assertIsInstance(h, int)
            self.assertIsInstance(w, int)
176

177
    def test_randomperspective(self):
Francisco Massa's avatar
Francisco Massa committed
178
        for _ in range(10):
179
180
181
182
183
184
185
186
187
188
            height = random.randint(24, 32) * 2
            width = random.randint(24, 32) * 2
            img = torch.ones(3, height, width)
            to_pil_image = transforms.ToPILImage()
            img = to_pil_image(img)
            perp = transforms.RandomPerspective()
            startpoints, endpoints = perp.get_params(width, height, 0.5)
            tr_img = F.perspective(img, startpoints, endpoints)
            tr_img2 = F.to_tensor(F.perspective(tr_img, endpoints, startpoints))
            tr_img = F.to_tensor(tr_img)
189
190
191
192
            self.assertEqual(img.size[0], width)
            self.assertEqual(img.size[1], height)
            self.assertGreater(torch.nn.functional.mse_loss(tr_img, F.to_tensor(img)) + 0.3,
                               torch.nn.functional.mse_loss(tr_img2, F.to_tensor(img)))
193

194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
    def test_randomperspective_fill(self):
        height = 100
        width = 100
        img = torch.ones(3, height, width)
        to_pil_image = transforms.ToPILImage()
        img = to_pil_image(img)

        modes = ("L", "RGB", "F")
        nums_bands = [len(mode) for mode in modes]
        fill = 127

        for mode, num_bands in zip(modes, nums_bands):
            img_conv = img.convert(mode)
            perspective = transforms.RandomPerspective(p=1, fill=fill)
            tr_img = perspective(img_conv)
            pixel = tr_img.getpixel((0, 0))

            if not isinstance(pixel, tuple):
                pixel = (pixel,)
            self.assertTupleEqual(pixel, tuple([fill] * num_bands))

        for mode, num_bands in zip(modes, nums_bands):
            img_conv = img.convert(mode)
            startpoints, endpoints = transforms.RandomPerspective.get_params(width, height, 0.5)
            tr_img = F.perspective(img_conv, startpoints, endpoints, fill=fill)
            pixel = tr_img.getpixel((0, 0))
220

221
222
223
224
225
226
227
228
            if not isinstance(pixel, tuple):
                pixel = (pixel,)
            self.assertTupleEqual(pixel, tuple([fill] * num_bands))

            for wrong_num_bands in set(nums_bands) - {num_bands}:
                with self.assertRaises(ValueError):
                    F.perspective(img_conv, startpoints, endpoints, fill=tuple([fill] * wrong_num_bands))

229
    def test_resize(self):
230
231
232
        height = random.randint(24, 32) * 2
        width = random.randint(24, 32) * 2
        osize = random.randint(5, 12) * 2
233

234
235
236
        img = torch.ones(3, height, width)
        result = transforms.Compose([
            transforms.ToPILImage(),
237
            transforms.Resize(osize),
238
239
            transforms.ToTensor(),
        ])(img)
240
        self.assertIn(osize, result.size())
241
        if height < width:
242
            self.assertLessEqual(result.size(1), result.size(2))
243
        elif width < height:
244
            self.assertGreaterEqual(result.size(1), result.size(2))
245

246
247
        result = transforms.Compose([
            transforms.ToPILImage(),
248
            transforms.Resize([osize, osize]),
249
250
            transforms.ToTensor(),
        ])(img)
251
252
253
        self.assertIn(osize, result.size())
        self.assertEqual(result.size(1), osize)
        self.assertEqual(result.size(2), osize)
254

255
256
257
258
        oheight = random.randint(5, 12) * 2
        owidth = random.randint(5, 12) * 2
        result = transforms.Compose([
            transforms.ToPILImage(),
259
            transforms.Resize((oheight, owidth)),
260
261
            transforms.ToTensor(),
        ])(img)
262
263
        self.assertEqual(result.size(1), oheight)
        self.assertEqual(result.size(2), owidth)
264
265
266

        result = transforms.Compose([
            transforms.ToPILImage(),
267
            transforms.Resize([oheight, owidth]),
268
269
            transforms.ToTensor(),
        ])(img)
270
271
        self.assertEqual(result.size(1), oheight)
        self.assertEqual(result.size(2), owidth)
272

273
274
275
276
    def test_random_crop(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        oheight = random.randint(5, (height - 2) / 2) * 2
277
        owidth = random.randint(5, (width - 2) / 2) * 2
278
279
280
281
282
283
        img = torch.ones(3, height, width)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
284
285
        self.assertEqual(result.size(1), oheight)
        self.assertEqual(result.size(2), owidth)
286

287
288
289
290
291
292
        padding = random.randint(1, 20)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((oheight, owidth), padding=padding),
            transforms.ToTensor(),
        ])(img)
293
294
        self.assertEqual(result.size(1), oheight)
        self.assertEqual(result.size(2), owidth)
295

296
297
298
299
300
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((height, width)),
            transforms.ToTensor()
        ])(img)
301
302
303
        self.assertEqual(result.size(1), height)
        self.assertEqual(result.size(2), width)
        self.assertTrue(np.allclose(img.numpy(), result.numpy()))
304

305
306
307
308
309
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((height + 1, width + 1), pad_if_needed=True),
            transforms.ToTensor(),
        ])(img)
310
311
        self.assertEqual(result.size(1), height + 1)
        self.assertEqual(result.size(2), width + 1)
312

313
314
315
316
317
    def test_pad(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        img = torch.ones(3, height, width)
        padding = random.randint(1, 20)
318
        fill = random.randint(1, 50)
319
320
        result = transforms.Compose([
            transforms.ToPILImage(),
321
            transforms.Pad(padding, fill=fill),
322
323
            transforms.ToTensor(),
        ])(img)
324
325
        self.assertEqual(result.size(1), height + 2 * padding)
        self.assertEqual(result.size(2), width + 2 * padding)
326
327
328
329
330
331
332
333
        # check that all elements in the padded region correspond
        # to the pad value
        fill_v = fill / 255
        eps = 1e-5
        self.assertTrue((result[:, :padding, :] - fill_v).abs().max() < eps)
        self.assertTrue((result[:, :, :padding] - fill_v).abs().max() < eps)
        self.assertRaises(ValueError, transforms.Pad(padding, fill=(1, 2)),
                          transforms.ToPILImage()(img))
Soumith Chintala's avatar
Soumith Chintala committed
334

335
336
337
338
339
340
341
    def test_pad_with_tuple_of_pad_values(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        img = transforms.ToPILImage()(torch.ones(3, height, width))

        padding = tuple([random.randint(1, 20) for _ in range(2)])
        output = transforms.Pad(padding)(img)
342
        self.assertEqual(output.size, (width + padding[0] * 2, height + padding[1] * 2))
343
344
345

        padding = tuple([random.randint(1, 20) for _ in range(4)])
        output = transforms.Pad(padding)(img)
346
347
        self.assertEqual(output.size[0], width + padding[0] + padding[2])
        self.assertEqual(output.size[1], height + padding[1] + padding[3])
348

349
350
351
        # Checking if Padding can be printed as string
        transforms.Pad(padding).__repr__()

352
353
    def test_pad_with_non_constant_padding_modes(self):
        """Unit tests for edge, reflect, symmetric padding"""
vfdev's avatar
vfdev committed
354
        img = torch.zeros(3, 27, 27).byte()
355
356
357
358
359
360
361
362
363
        img[:, :, 0] = 1  # Constant value added to leftmost edge
        img = transforms.ToPILImage()(img)
        img = F.pad(img, 1, (200, 200, 200))

        # pad 3 to all sidess
        edge_padded_img = F.pad(img, 3, padding_mode='edge')
        # First 6 elements of leftmost edge in the middle of the image, values are in order:
        # edge_pad, edge_pad, edge_pad, constant_pad, constant value added to leftmost edge, 0
        edge_middle_slice = np.asarray(edge_padded_img).transpose(2, 0, 1)[0][17][:6]
364
365
        self.assertTrue(np.all(edge_middle_slice == np.asarray([200, 200, 200, 200, 1, 0])))
        self.assertEqual(transforms.ToTensor()(edge_padded_img).size(), (3, 35, 35))
366
367
368
369
370
371

        # Pad 3 to left/right, 2 to top/bottom
        reflect_padded_img = F.pad(img, (3, 2), padding_mode='reflect')
        # First 6 elements of leftmost edge in the middle of the image, values are in order:
        # reflect_pad, reflect_pad, reflect_pad, constant_pad, constant value added to leftmost edge, 0
        reflect_middle_slice = np.asarray(reflect_padded_img).transpose(2, 0, 1)[0][17][:6]
372
373
        self.assertTrue(np.all(reflect_middle_slice == np.asarray([0, 0, 1, 200, 1, 0])))
        self.assertEqual(transforms.ToTensor()(reflect_padded_img).size(), (3, 33, 35))
374
375
376
377
378
379

        # Pad 3 to left, 2 to top, 2 to right, 1 to bottom
        symmetric_padded_img = F.pad(img, (3, 2, 2, 1), padding_mode='symmetric')
        # First 6 elements of leftmost edge in the middle of the image, values are in order:
        # sym_pad, sym_pad, sym_pad, constant_pad, constant value added to leftmost edge, 0
        symmetric_middle_slice = np.asarray(symmetric_padded_img).transpose(2, 0, 1)[0][17][:6]
380
381
        self.assertTrue(np.all(symmetric_middle_slice == np.asarray([0, 1, 200, 200, 1, 0])))
        self.assertEqual(transforms.ToTensor()(symmetric_padded_img).size(), (3, 32, 34))
382

383
    def test_pad_raises_with_invalid_pad_sequence_len(self):
384
385
386
387
388
389
390
391
392
        with self.assertRaises(ValueError):
            transforms.Pad(())

        with self.assertRaises(ValueError):
            transforms.Pad((1, 2, 3))

        with self.assertRaises(ValueError):
            transforms.Pad((1, 2, 3, 4, 5))

393
394
395
396
397
398
399
400
    def test_pad_with_mode_F_images(self):
        pad = 2
        transform = transforms.Pad(pad)

        img = Image.new("F", (10, 10))
        padded_img = transform(img)
        self.assertSequenceEqual(padded_img.size, [edge_size + 2 * pad for edge_size in img.size])

Soumith Chintala's avatar
Soumith Chintala committed
401
402
403
404
    def test_lambda(self):
        trans = transforms.Lambda(lambda x: x.add(10))
        x = torch.randn(10)
        y = trans(x)
405
        self.assertTrue(y.equal(torch.add(x, 10)))
Soumith Chintala's avatar
Soumith Chintala committed
406
407
408
409

        trans = transforms.Lambda(lambda x: x.add_(10))
        x = torch.randn(10)
        y = trans(x)
410
        self.assertTrue(y.equal(x))
411

412
413
414
        # Checking if Lambda can be printed as string
        trans.__repr__()

415
    @unittest.skipIf(stats is None, 'scipy.stats not available')
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
    def test_random_apply(self):
        random_state = random.getstate()
        random.seed(42)
        random_apply_transform = transforms.RandomApply(
            [
                transforms.RandomRotation((-45, 45)),
                transforms.RandomHorizontalFlip(),
                transforms.RandomVerticalFlip(),
            ], p=0.75
        )
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        num_samples = 250
        num_applies = 0
        for _ in range(num_samples):
            out = random_apply_transform(img)
            if out != img:
                num_applies += 1

        p_value = stats.binom_test(num_applies, num_samples, p=0.75)
        random.setstate(random_state)
436
        self.assertGreater(p_value, 0.0001)
437
438
439
440

        # Checking if RandomApply can be printed as string
        random_apply_transform.__repr__()

441
    @unittest.skipIf(stats is None, 'scipy.stats not available')
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
    def test_random_choice(self):
        random_state = random.getstate()
        random.seed(42)
        random_choice_transform = transforms.RandomChoice(
            [
                transforms.Resize(15),
                transforms.Resize(20),
                transforms.CenterCrop(10)
            ]
        )
        img = transforms.ToPILImage()(torch.rand(3, 25, 25))
        num_samples = 250
        num_resize_15 = 0
        num_resize_20 = 0
        num_crop_10 = 0
        for _ in range(num_samples):
            out = random_choice_transform(img)
            if out.size == (15, 15):
                num_resize_15 += 1
            elif out.size == (20, 20):
                num_resize_20 += 1
            elif out.size == (10, 10):
                num_crop_10 += 1

        p_value = stats.binom_test(num_resize_15, num_samples, p=0.33333)
467
        self.assertGreater(p_value, 0.0001)
468
        p_value = stats.binom_test(num_resize_20, num_samples, p=0.33333)
469
        self.assertGreater(p_value, 0.0001)
470
        p_value = stats.binom_test(num_crop_10, num_samples, p=0.33333)
471
        self.assertGreater(p_value, 0.0001)
472
473
474
475
476

        random.setstate(random_state)
        # Checking if RandomChoice can be printed as string
        random_choice_transform.__repr__()

477
    @unittest.skipIf(stats is None, 'scipy.stats not available')
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
    def test_random_order(self):
        random_state = random.getstate()
        random.seed(42)
        random_order_transform = transforms.RandomOrder(
            [
                transforms.Resize(20),
                transforms.CenterCrop(10)
            ]
        )
        img = transforms.ToPILImage()(torch.rand(3, 25, 25))
        num_samples = 250
        num_normal_order = 0
        resize_crop_out = transforms.CenterCrop(10)(transforms.Resize(20)(img))
        for _ in range(num_samples):
            out = random_order_transform(img)
            if out == resize_crop_out:
                num_normal_order += 1

        p_value = stats.binom_test(num_normal_order, num_samples, p=0.5)
        random.setstate(random_state)
498
        self.assertGreater(p_value, 0.0001)
499
500
501
502

        # Checking if RandomOrder can be printed as string
        random_order_transform.__repr__()

503
    def test_to_tensor(self):
504
        test_channels = [1, 3, 4]
505
506
        height, width = 4, 4
        trans = transforms.ToTensor()
507

508
509
510
511
512
513
514
        with self.assertRaises(TypeError):
            trans(np.random.rand(1, height, width).tolist())

        with self.assertRaises(ValueError):
            trans(np.random.rand(height))
            trans(np.random.rand(1, 1, height, width))

515
516
517
518
        for channels in test_channels:
            input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
            img = transforms.ToPILImage()(input_data)
            output = trans(img)
519
            self.assertTrue(np.allclose(input_data.numpy(), output.numpy()))
520

521
            ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
522
523
            output = trans(ndarray)
            expected_output = ndarray.transpose((2, 0, 1)) / 255.0
524
            self.assertTrue(np.allclose(output.numpy(), expected_output))
525

526
527
528
            ndarray = np.random.rand(height, width, channels).astype(np.float32)
            output = trans(ndarray)
            expected_output = ndarray.transpose((2, 0, 1))
529
            self.assertTrue(np.allclose(output.numpy(), expected_output))
530

531
532
533
534
        # separate test for mode '1' PIL images
        input_data = torch.ByteTensor(1, height, width).bernoulli_()
        img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
        output = trans(img)
535
        self.assertTrue(np.allclose(input_data.numpy(), output.numpy()))
536

537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
    def test_convert_image_dtype_float_to_float(self):
        for input_dtype, output_dtypes in cycle_over(float_dtypes()):
            input_image = torch.tensor((0.0, 1.0), dtype=input_dtype)
            for output_dtype in output_dtypes:
                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
                    output_image = transform(input_image)

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0.0, 1.0

                    self.assertAlmostEqual(actual_min, desired_min)
                    self.assertAlmostEqual(actual_max, desired_max)

    def test_convert_image_dtype_float_to_int(self):
        for input_dtype in float_dtypes():
            input_image = torch.tensor((0.0, 1.0), dtype=input_dtype)
            for output_dtype in int_dtypes():
                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)

                    if (input_dtype == torch.float32 and output_dtype in (torch.int32, torch.int64)) or (
                            input_dtype == torch.float64 and output_dtype == torch.int64
                    ):
                        with self.assertRaises(RuntimeError):
                            transform(input_image)
                    else:
                        output_image = transform(input_image)

                        actual_min, actual_max = output_image.tolist()
                        desired_min, desired_max = 0, torch.iinfo(output_dtype).max

                        self.assertEqual(actual_min, desired_min)
                        self.assertEqual(actual_max, desired_max)

    def test_convert_image_dtype_int_to_float(self):
        for input_dtype in int_dtypes():
            input_image = torch.tensor((0, torch.iinfo(input_dtype).max), dtype=input_dtype)
            for output_dtype in float_dtypes():
                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
                    output_image = transform(input_image)

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0.0, 1.0

                    self.assertAlmostEqual(actual_min, desired_min)
                    self.assertGreaterEqual(actual_min, desired_min)
                    self.assertAlmostEqual(actual_max, desired_max)
                    self.assertLessEqual(actual_max, desired_max)

    def test_convert_image_dtype_int_to_int(self):
        for input_dtype, output_dtypes in cycle_over(int_dtypes()):
            input_max = torch.iinfo(input_dtype).max
            input_image = torch.tensor((0, input_max), dtype=input_dtype)
            for output_dtype in output_dtypes:
                output_max = torch.iinfo(output_dtype).max

                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
                    output_image = transform(input_image)

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0, output_max

                    # see https://github.com/pytorch/vision/pull/2078#issuecomment-641036236 for details
                    if input_max >= output_max:
                        error_term = 0
                    else:
                        error_term = 1 - (torch.iinfo(output_dtype).max + 1) // (torch.iinfo(input_dtype).max + 1)

                    self.assertEqual(actual_min, desired_min)
                    self.assertEqual(actual_max, desired_max + error_term)

    def test_convert_image_dtype_int_to_int_consistency(self):
        for input_dtype, output_dtypes in cycle_over(int_dtypes()):
            input_max = torch.iinfo(input_dtype).max
            input_image = torch.tensor((0, input_max), dtype=input_dtype)
            for output_dtype in output_dtypes:
                output_max = torch.iinfo(output_dtype).max
                if output_max <= input_max:
                    continue

                with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype):
                    transform = transforms.ConvertImageDtype(output_dtype)
                    inverse_transfrom = transforms.ConvertImageDtype(input_dtype)
                    output_image = inverse_transfrom(transform(input_image))

                    actual_min, actual_max = output_image.tolist()
                    desired_min, desired_max = 0, input_max

                    self.assertEqual(actual_min, desired_min)
                    self.assertEqual(actual_max, desired_max)

631
632
633
634
635
636
637
638
    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_to_tensor(self):
        trans = transforms.ToTensor()

        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
        self.assertTrue(np.allclose(output.numpy(), expected_output.numpy()))

    def test_pil_to_tensor(self):
        test_channels = [1, 3, 4]
        height, width = 4, 4
        trans = transforms.PILToTensor()

        with self.assertRaises(TypeError):
            trans(np.random.rand(1, height, width).tolist())
            trans(np.random.rand(1, height, width))

        for channels in test_channels:
            input_data = torch.ByteTensor(channels, height, width).random_(0, 255)
            img = transforms.ToPILImage()(input_data)
            output = trans(img)
            self.assertTrue(np.allclose(input_data.numpy(), output.numpy()))

            input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
            img = transforms.ToPILImage()(input_data)
            output = trans(img)
            expected_output = input_data.transpose((2, 0, 1))
            self.assertTrue(np.allclose(output.numpy(), expected_output))

            input_data = torch.as_tensor(np.random.rand(channels, height, width).astype(np.float32))
            img = transforms.ToPILImage()(input_data)  # CHW -> HWC and (* 255).byte()
            output = trans(img)  # HWC -> CHW
            expected_output = (input_data * 255).byte()
            self.assertTrue(np.allclose(output.numpy(), expected_output.numpy()))

        # separate test for mode '1' PIL images
        input_data = torch.ByteTensor(1, height, width).bernoulli_()
        img = transforms.ToPILImage()(input_data.mul(255)).convert('1')
        output = trans(img)
        self.assertTrue(np.allclose(input_data.numpy(), output.numpy()))

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_pil_to_tensor(self):
        trans = transforms.PILToTensor()

        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
682
        self.assertTrue(np.allclose(output.numpy(), expected_output.numpy()))
683
684
685
686

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_resize(self):
        trans = transforms.Compose([
687
            transforms.Resize(256, interpolation=Image.LINEAR),
688
689
690
            transforms.ToTensor(),
        ])

691
692
693
        # Checking if Compose, Resize and ToTensor can be printed as string
        trans.__repr__()

694
695
696
697
698
699
700
        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
        self.assertLess(np.abs((expected_output - output).mean()), 1e-3)
        self.assertLess((expected_output - output).var(), 1e-5)
        # note the high absolute tolerance
701
        self.assertTrue(np.allclose(output.numpy(), expected_output.numpy(), atol=5e-2))
702
703
704
705
706
707
708
709

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_crop(self):
        trans = transforms.Compose([
            transforms.CenterCrop(256),
            transforms.ToTensor(),
        ])

710
711
712
        # Checking if Compose, CenterCrop and ToTensor can be printed as string
        trans.__repr__()

713
714
715
716
        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
717
        self.assertTrue(np.allclose(output.numpy(), expected_output.numpy()))
718

719
    def test_1_channel_tensor_to_pil_image(self):
720
721
        to_tensor = transforms.ToTensor()

722
        img_data_float = torch.Tensor(1, 4, 4).uniform_()
723
724
725
726
        img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255)
        img_data_short = torch.ShortTensor(1, 4, 4).random_()
        img_data_int = torch.IntTensor(1, 4, 4).random_()

727
728
729
730
731
732
733
734
735
736
        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(),
                            img_data_byte.float().div(255.0).numpy(),
                            img_data_short.numpy(),
                            img_data_int.numpy()]
        expected_modes = ['L', 'L', 'I;16', 'I']

        for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
737
738
                self.assertEqual(img.mode, mode)
                self.assertTrue(np.allclose(expected_output, to_tensor(img).numpy()))
739
740
        # 'F' mode for torch.FloatTensor
        img_F_mode = transforms.ToPILImage(mode='F')(img_data_float)
741
742
743
        self.assertEqual(img_F_mode.mode, 'F')
        self.assertTrue(np.allclose(np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')),
                                    np.array(img_F_mode)))
744
745
746
747
748
749
750
751
752
753
754
755

    def test_1_channel_ndarray_to_pil_image(self):
        img_data_float = torch.Tensor(4, 4, 1).uniform_().numpy()
        img_data_byte = torch.ByteTensor(4, 4, 1).random_(0, 255).numpy()
        img_data_short = torch.ShortTensor(4, 4, 1).random_().numpy()
        img_data_int = torch.IntTensor(4, 4, 1).random_().numpy()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_modes = ['F', 'L', 'I;16', 'I']
        for img_data, mode in zip(inputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
756
757
                self.assertEqual(img.mode, mode)
                self.assertTrue(np.allclose(img_data[:, :, 0], img))
758

surgan12's avatar
surgan12 committed
759
760
761
762
    def test_2_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
763
                self.assertEqual(img.mode, 'LA')  # default should assume LA
surgan12's avatar
surgan12 committed
764
765
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
766
                self.assertEqual(img.mode, mode)
surgan12's avatar
surgan12 committed
767
768
            split = img.split()
            for i in range(2):
769
                self.assertTrue(np.allclose(img_data[:, :, i], split[i]))
surgan12's avatar
surgan12 committed
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786

        img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy()
        for mode in [None, 'LA']:
            verify_img_data(img_data, mode)

        transforms.ToPILImage().__repr__()

        with self.assertRaises(ValueError):
            # should raise if we try a mode for 4 or 1 or 3 channel images
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
            transforms.ToPILImage(mode='RGB')(img_data)

    def test_2_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
787
                self.assertEqual(img.mode, 'LA')  # default should assume LA
surgan12's avatar
surgan12 committed
788
789
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
790
                self.assertEqual(img.mode, mode)
surgan12's avatar
surgan12 committed
791
792
            split = img.split()
            for i in range(2):
793
                self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()))
surgan12's avatar
surgan12 committed
794
795
796
797
798
799
800
801
802
803
804
805

        img_data = torch.Tensor(2, 4, 4).uniform_()
        expected_output = img_data.mul(255).int().float().div(255)
        for mode in [None, 'LA']:
            verify_img_data(img_data, expected_output, mode=mode)

        with self.assertRaises(ValueError):
            # should raise if we try a mode for 4 or 1 or 3 channel images
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
            transforms.ToPILImage(mode='RGB')(img_data)

806
807
808
809
    def test_3_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
810
                self.assertEqual(img.mode, 'RGB')  # default should assume RGB
811
812
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
813
                self.assertEqual(img.mode, mode)
814
815
            split = img.split()
            for i in range(3):
816
                self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()))
817

818
819
820
821
        img_data = torch.Tensor(3, 4, 4).uniform_()
        expected_output = img_data.mul(255).int().float().div(255)
        for mode in [None, 'RGB', 'HSV', 'YCbCr']:
            verify_img_data(img_data, expected_output, mode=mode)
822

823
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
824
            # should raise if we try a mode for 4 or 1 or 2 channel images
825
826
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
827
            transforms.ToPILImage(mode='LA')(img_data)
828

Varun Agrawal's avatar
Varun Agrawal committed
829
830
831
        with self.assertRaises(ValueError):
            transforms.ToPILImage()(torch.Tensor(1, 3, 4, 4).uniform_())

832
833
834
835
    def test_3_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
836
                self.assertEqual(img.mode, 'RGB')  # default should assume RGB
837
838
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
839
                self.assertEqual(img.mode, mode)
840
841
            split = img.split()
            for i in range(3):
842
                self.assertTrue(np.allclose(img_data[:, :, i], split[i]))
843

844
845
846
847
        img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy()
        for mode in [None, 'RGB', 'HSV', 'YCbCr']:
            verify_img_data(img_data, mode)

848
849
850
        # Checking if ToPILImage can be printed as string
        transforms.ToPILImage().__repr__()

851
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
852
            # should raise if we try a mode for 4 or 1 or 2 channel images
853
854
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
855
            transforms.ToPILImage(mode='LA')(img_data)
856
857
858
859
860

    def test_4_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
861
                self.assertEqual(img.mode, 'RGBA')  # default should assume RGBA
862
863
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
864
                self.assertEqual(img.mode, mode)
865
866
867

            split = img.split()
            for i in range(4):
868
                self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy()))
869

870
        img_data = torch.Tensor(4, 4, 4).uniform_()
871
        expected_output = img_data.mul(255).int().float().div(255)
surgan12's avatar
surgan12 committed
872
        for mode in [None, 'RGBA', 'CMYK', 'RGBX']:
873
            verify_img_data(img_data, expected_output, mode)
874

875
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
876
            # should raise if we try a mode for 3 or 1 or 2 channel images
877
878
            transforms.ToPILImage(mode='RGB')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
879
            transforms.ToPILImage(mode='LA')(img_data)
880
881
882
883
884

    def test_4_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
885
                self.assertEqual(img.mode, 'RGBA')  # default should assume RGBA
886
887
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
888
                self.assertEqual(img.mode, mode)
889
890
            split = img.split()
            for i in range(4):
891
                self.assertTrue(np.allclose(img_data[:, :, i], split[i]))
892

893
        img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy()
surgan12's avatar
surgan12 committed
894
        for mode in [None, 'RGBA', 'CMYK', 'RGBX']:
895
            verify_img_data(img_data, mode)
896

897
        with self.assertRaises(ValueError):
surgan12's avatar
surgan12 committed
898
            # should raise if we try a mode for 3 or 1 or 2 channel images
899
900
            transforms.ToPILImage(mode='RGB')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
surgan12's avatar
surgan12 committed
901
            transforms.ToPILImage(mode='LA')(img_data)
902

Varun Agrawal's avatar
Varun Agrawal committed
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
    def test_2d_tensor_to_pil_image(self):
        to_tensor = transforms.ToTensor()

        img_data_float = torch.Tensor(4, 4).uniform_()
        img_data_byte = torch.ByteTensor(4, 4).random_(0, 255)
        img_data_short = torch.ShortTensor(4, 4).random_()
        img_data_int = torch.IntTensor(4, 4).random_()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(),
                            img_data_byte.float().div(255.0).numpy(),
                            img_data_short.numpy(),
                            img_data_int.numpy()]
        expected_modes = ['L', 'L', 'I;16', 'I']

        for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
921
922
                self.assertEqual(img.mode, mode)
                self.assertTrue(np.allclose(expected_output, to_tensor(img).numpy()))
Varun Agrawal's avatar
Varun Agrawal committed
923
924
925
926
927
928
929
930
931
932
933
934

    def test_2d_ndarray_to_pil_image(self):
        img_data_float = torch.Tensor(4, 4).uniform_().numpy()
        img_data_byte = torch.ByteTensor(4, 4).random_(0, 255).numpy()
        img_data_short = torch.ShortTensor(4, 4).random_().numpy()
        img_data_int = torch.IntTensor(4, 4).random_().numpy()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_modes = ['F', 'L', 'I;16', 'I']
        for img_data, mode in zip(inputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
935
936
                self.assertEqual(img.mode, mode)
                self.assertTrue(np.allclose(img_data, img))
Varun Agrawal's avatar
Varun Agrawal committed
937
938
939
940
941

    def test_tensor_bad_types_to_pil_image(self):
        with self.assertRaises(ValueError):
            transforms.ToPILImage()(torch.ones(1, 3, 4, 4))

942
    def test_ndarray_bad_types_to_pil_image(self):
943
        trans = transforms.ToPILImage()
944
        with self.assertRaises(TypeError):
945
946
947
948
949
            trans(np.ones([4, 4, 1], np.int64))
            trans(np.ones([4, 4, 1], np.uint16))
            trans(np.ones([4, 4, 1], np.uint32))
            trans(np.ones([4, 4, 1], np.float64))

Varun Agrawal's avatar
Varun Agrawal committed
950
951
952
        with self.assertRaises(ValueError):
            transforms.ToPILImage()(np.ones([1, 4, 4, 3]))

953
954
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_vertical_flip(self):
955
956
        random_state = random.getstate()
        random.seed(42)
957
958
959
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        vimg = img.transpose(Image.FLIP_TOP_BOTTOM)

960
        num_samples = 250
961
        num_vertical = 0
962
        for _ in range(num_samples):
963
964
965
966
            out = transforms.RandomVerticalFlip()(img)
            if out == vimg:
                num_vertical += 1

967
968
        p_value = stats.binom_test(num_vertical, num_samples, p=0.5)
        random.setstate(random_state)
969
        self.assertGreater(p_value, 0.0001)
970

971
972
973
974
975
976
977
978
979
        num_samples = 250
        num_vertical = 0
        for _ in range(num_samples):
            out = transforms.RandomVerticalFlip(p=0.7)(img)
            if out == vimg:
                num_vertical += 1

        p_value = stats.binom_test(num_vertical, num_samples, p=0.7)
        random.setstate(random_state)
980
        self.assertGreater(p_value, 0.0001)
981

982
983
984
        # Checking if RandomVerticalFlip can be printed as string
        transforms.RandomVerticalFlip().__repr__()

985
986
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_horizontal_flip(self):
987
988
        random_state = random.getstate()
        random.seed(42)
989
990
991
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        himg = img.transpose(Image.FLIP_LEFT_RIGHT)

992
        num_samples = 250
993
        num_horizontal = 0
994
        for _ in range(num_samples):
995
996
997
998
            out = transforms.RandomHorizontalFlip()(img)
            if out == himg:
                num_horizontal += 1

999
1000
        p_value = stats.binom_test(num_horizontal, num_samples, p=0.5)
        random.setstate(random_state)
1001
        self.assertGreater(p_value, 0.0001)
1002

1003
1004
1005
1006
1007
1008
1009
1010
1011
        num_samples = 250
        num_horizontal = 0
        for _ in range(num_samples):
            out = transforms.RandomHorizontalFlip(p=0.7)(img)
            if out == himg:
                num_horizontal += 1

        p_value = stats.binom_test(num_horizontal, num_samples, p=0.7)
        random.setstate(random_state)
1012
        self.assertGreater(p_value, 0.0001)
1013

1014
1015
1016
        # Checking if RandomHorizontalFlip can be printed as string
        transforms.RandomHorizontalFlip().__repr__()

1017
    @unittest.skipIf(stats is None, 'scipy.stats is not available')
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
    def test_normalize(self):
        def samples_from_standard_normal(tensor):
            p_value = stats.kstest(list(tensor.view(-1)), 'norm', args=(0, 1)).pvalue
            return p_value > 0.0001

        random_state = random.getstate()
        random.seed(42)
        for channels in [1, 3]:
            img = torch.rand(channels, 10, 10)
            mean = [img[c].mean() for c in range(channels)]
            std = [img[c].std() for c in range(channels)]
            normalized = transforms.Normalize(mean, std)(img)
1030
            self.assertTrue(samples_from_standard_normal(normalized))
1031
1032
        random.setstate(random_state)

1033
1034
1035
        # Checking if Normalize can be printed as string
        transforms.Normalize(mean, std).__repr__()

1036
1037
1038
        # Checking the optional in-place behaviour
        tensor = torch.rand((1, 16, 16))
        tensor_inplace = transforms.Normalize((0.5,), (0.5,), inplace=True)(tensor)
1039
        self.assertTrue(torch.equal(tensor, tensor_inplace))
1040

1041
1042
1043
1044
1045
1046
1047
1048
1049
    def test_normalize_different_dtype(self):
        for dtype1 in [torch.float32, torch.float64]:
            img = torch.rand(3, 10, 10, dtype=dtype1)
            for dtype2 in [torch.int64, torch.float32, torch.float64]:
                mean = torch.tensor([1, 2, 3], dtype=dtype2)
                std = torch.tensor([1, 2, 1], dtype=dtype2)
                # checks that it doesn't crash
                transforms.functional.normalize(img, mean, std)

1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
    def test_normalize_3d_tensor(self):
        torch.manual_seed(28)
        n_channels = 3
        img_size = 10
        mean = torch.rand(n_channels)
        std = torch.rand(n_channels)
        img = torch.rand(n_channels, img_size, img_size)
        target = F.normalize(img, mean, std).numpy()

        mean_unsqueezed = mean.view(-1, 1, 1)
        std_unsqueezed = std.view(-1, 1, 1)
        result1 = F.normalize(img, mean_unsqueezed, std_unsqueezed)
        result2 = F.normalize(img,
                              mean_unsqueezed.repeat(1, img_size, img_size),
                              std_unsqueezed.repeat(1, img_size, img_size))
        assert_array_almost_equal(target, result1.numpy())
        assert_array_almost_equal(target, result2.numpy())

1068
1069
1070
1071
1072
1073
1074
    def test_adjust_brightness(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
1075
        y_pil = F.adjust_brightness(x_pil, 1)
1076
        y_np = np.array(y_pil)
1077
        self.assertTrue(np.allclose(y_np, x_np))
1078
1079

        # test 1
1080
        y_pil = F.adjust_brightness(x_pil, 0.5)
1081
1082
1083
        y_np = np.array(y_pil)
        y_ans = [0, 2, 6, 27, 67, 113, 18, 4, 117, 45, 127, 0]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1084
        self.assertTrue(np.allclose(y_np, y_ans))
1085
1086

        # test 2
1087
        y_pil = F.adjust_brightness(x_pil, 2)
1088
1089
1090
        y_np = np.array(y_pil)
        y_ans = [0, 10, 26, 108, 255, 255, 74, 16, 255, 180, 255, 2]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1091
        self.assertTrue(np.allclose(y_np, y_ans))
1092
1093
1094
1095
1096
1097
1098
1099

    def test_adjust_contrast(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
1100
        y_pil = F.adjust_contrast(x_pil, 1)
1101
        y_np = np.array(y_pil)
1102
        self.assertTrue(np.allclose(y_np, x_np))
1103
1104

        # test 1
1105
        y_pil = F.adjust_contrast(x_pil, 0.5)
1106
1107
1108
        y_np = np.array(y_pil)
        y_ans = [43, 45, 49, 70, 110, 156, 61, 47, 160, 88, 170, 43]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1109
        self.assertTrue(np.allclose(y_np, y_ans))
1110
1111

        # test 2
1112
        y_pil = F.adjust_contrast(x_pil, 2)
1113
1114
1115
        y_np = np.array(y_pil)
        y_ans = [0, 0, 0, 22, 184, 255, 0, 0, 255, 94, 255, 0]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1116
        self.assertTrue(np.allclose(y_np, y_ans))
1117

Francisco Massa's avatar
Francisco Massa committed
1118
    @unittest.skipIf(Image.__version__ >= '7', "Temporarily disabled")
1119
1120
1121
1122
1123
1124
1125
    def test_adjust_saturation(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
1126
        y_pil = F.adjust_saturation(x_pil, 1)
1127
        y_np = np.array(y_pil)
1128
        self.assertTrue(np.allclose(y_np, x_np))
1129
1130

        # test 1
1131
        y_pil = F.adjust_saturation(x_pil, 0.5)
1132
1133
1134
        y_np = np.array(y_pil)
        y_ans = [2, 4, 8, 87, 128, 173, 39, 25, 138, 133, 215, 88]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1135
        self.assertTrue(np.allclose(y_np, y_ans))
1136
1137

        # test 2
1138
        y_pil = F.adjust_saturation(x_pil, 2)
1139
1140
1141
        y_np = np.array(y_pil)
        y_ans = [0, 6, 22, 0, 149, 255, 32, 0, 255, 4, 255, 0]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1142
        self.assertTrue(np.allclose(y_np, y_ans))
1143
1144
1145
1146
1147
1148
1149
1150

    def test_adjust_hue(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        with self.assertRaises(ValueError):
1151
1152
            F.adjust_hue(x_pil, -0.7)
            F.adjust_hue(x_pil, 1)
1153
1154
1155

        # test 0: almost same as x_data but not exact.
        # probably because hsv <-> rgb floating point ops
1156
        y_pil = F.adjust_hue(x_pil, 0)
1157
1158
1159
        y_np = np.array(y_pil)
        y_ans = [0, 5, 13, 54, 139, 226, 35, 8, 234, 91, 255, 1]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1160
        self.assertTrue(np.allclose(y_np, y_ans))
1161
1162

        # test 1
1163
        y_pil = F.adjust_hue(x_pil, 0.25)
1164
1165
1166
        y_np = np.array(y_pil)
        y_ans = [13, 0, 12, 224, 54, 226, 234, 8, 99, 1, 222, 255]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1167
        self.assertTrue(np.allclose(y_np, y_ans))
1168
1169

        # test 2
1170
        y_pil = F.adjust_hue(x_pil, -0.25)
1171
1172
1173
        y_np = np.array(y_pil)
        y_ans = [0, 13, 2, 54, 226, 58, 8, 234, 152, 255, 43, 1]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1174
        self.assertTrue(np.allclose(y_np, y_ans))
1175
1176
1177
1178
1179
1180
1181
1182

    def test_adjust_gamma(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
1183
        y_pil = F.adjust_gamma(x_pil, 1)
1184
        y_np = np.array(y_pil)
1185
        self.assertTrue(np.allclose(y_np, x_np))
1186
1187

        # test 1
1188
        y_pil = F.adjust_gamma(x_pil, 0.5)
1189
        y_np = np.array(y_pil)
1190
        y_ans = [0, 35, 57, 117, 186, 241, 97, 45, 245, 152, 255, 16]
1191
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1192
        self.assertTrue(np.allclose(y_np, y_ans))
1193
1194

        # test 2
1195
        y_pil = F.adjust_gamma(x_pil, 2)
1196
        y_np = np.array(y_pil)
1197
        y_ans = [0, 0, 0, 11, 71, 201, 5, 0, 215, 31, 255, 0]
1198
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
1199
        self.assertTrue(np.allclose(y_np, y_ans))
1200
1201
1202
1203
1204
1205
1206
1207

    def test_adjusts_L_mode(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_rgb = Image.fromarray(x_np, mode='RGB')

        x_l = x_rgb.convert('L')
1208
1209
1210
1211
1212
        self.assertEqual(F.adjust_brightness(x_l, 2).mode, 'L')
        self.assertEqual(F.adjust_saturation(x_l, 2).mode, 'L')
        self.assertEqual(F.adjust_contrast(x_l, 2).mode, 'L')
        self.assertEqual(F.adjust_hue(x_l, 0.4).mode, 'L')
        self.assertEqual(F.adjust_gamma(x_l, 0.5).mode, 'L')
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224

    def test_color_jitter(self):
        color_jitter = transforms.ColorJitter(2, 2, 2, 0.1)

        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')

        for i in range(10):
            y_pil = color_jitter(x_pil)
1225
            self.assertEqual(y_pil.mode, x_pil.mode)
1226
1227

            y_pil_2 = color_jitter(x_pil_2)
1228
            self.assertEqual(y_pil_2.mode, x_pil_2.mode)
1229

1230
1231
1232
        # Checking if ColorJitter can be printed as string
        color_jitter.__repr__()

1233
    def test_linear_transformation(self):
ekka's avatar
ekka committed
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
        num_samples = 1000
        x = torch.randn(num_samples, 3, 10, 10)
        flat_x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
        # compute principal components
        sigma = torch.mm(flat_x.t(), flat_x) / flat_x.size(0)
        u, s, _ = np.linalg.svd(sigma.numpy())
        zca_epsilon = 1e-10  # avoid division by 0
        d = torch.Tensor(np.diag(1. / np.sqrt(s + zca_epsilon)))
        u = torch.Tensor(u)
        principal_components = torch.mm(torch.mm(u, d), u.t())
        mean_vector = (torch.sum(flat_x, dim=0) / flat_x.size(0))
        # initialize whitening matrix
1246
        whitening = transforms.LinearTransformation(principal_components, mean_vector)
ekka's avatar
ekka committed
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
        # estimate covariance and mean using weak law of large number
        num_features = flat_x.size(1)
        cov = 0.0
        mean = 0.0
        for i in x:
            xwhite = whitening(i)
            xwhite = xwhite.view(1, -1).numpy()
            cov += np.dot(xwhite, xwhite.T) / num_features
            mean += np.sum(xwhite) / num_features
        # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov
1257
1258
1259
1260
        self.assertTrue(np.allclose(cov / num_samples, np.identity(1), rtol=2e-3),
                        "cov not close to 1")
        self.assertTrue(np.allclose(mean / num_samples, 0, rtol=1e-3),
                        "mean not close to 0")
ekka's avatar
ekka committed
1261

1262
        # Checking if LinearTransformation can be printed as string
ekka's avatar
ekka committed
1263
1264
        whitening.__repr__()

1265
1266
1267
1268
    def test_rotate(self):
        x = np.zeros((100, 100, 3), dtype=np.uint8)
        x[40, 40] = [255, 255, 255]

vfdev's avatar
vfdev committed
1269
        with self.assertRaisesRegex(TypeError, r"img should be PIL Image"):
1270
1271
1272
1273
1274
            F.rotate(x, 10)

        img = F.to_pil_image(x)

        result = F.rotate(img, 45)
1275
        self.assertEqual(result.size, (100, 100))
1276
        r, c, ch = np.where(result)
1277
1278
1279
        self.assertTrue(all(x in r for x in [49, 50]))
        self.assertTrue(all(x in c for x in [36]))
        self.assertTrue(all(x in ch for x in [0, 1, 2]))
1280
1281

        result = F.rotate(img, 45, expand=True)
1282
        self.assertEqual(result.size, (142, 142))
1283
        r, c, ch = np.where(result)
1284
1285
1286
        self.assertTrue(all(x in r for x in [70, 71]))
        self.assertTrue(all(x in c for x in [57]))
        self.assertTrue(all(x in ch for x in [0, 1, 2]))
1287
1288

        result = F.rotate(img, 45, center=(40, 40))
1289
        self.assertEqual(result.size, (100, 100))
1290
        r, c, ch = np.where(result)
1291
1292
1293
        self.assertTrue(all(x in r for x in [40]))
        self.assertTrue(all(x in c for x in [40]))
        self.assertTrue(all(x in ch for x in [0, 1, 2]))
1294
1295
1296
1297

        result_a = F.rotate(img, 90)
        result_b = F.rotate(img, -270)

1298
        self.assertTrue(np.all(np.array(result_a) == np.array(result_b)))
1299

Philip Meier's avatar
Philip Meier committed
1300
1301
1302
    def test_rotate_fill(self):
        img = F.to_pil_image(np.ones((100, 100, 3), dtype=np.uint8) * 255, "RGB")

1303
        modes = ("L", "RGB", "F")
Philip Meier's avatar
Philip Meier committed
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
        nums_bands = [len(mode) for mode in modes]
        fill = 127

        for mode, num_bands in zip(modes, nums_bands):
            img_conv = img.convert(mode)
            img_rot = F.rotate(img_conv, 45.0, fill=fill)
            pixel = img_rot.getpixel((0, 0))

            if not isinstance(pixel, tuple):
                pixel = (pixel,)
            self.assertTupleEqual(pixel, tuple([fill] * num_bands))

            for wrong_num_bands in set(nums_bands) - {num_bands}:
                with self.assertRaises(ValueError):
                    F.rotate(img_conv, 45.0, fill=tuple([fill] * wrong_num_bands))

1320
    def test_affine(self):
Francisco Massa's avatar
Francisco Massa committed
1321
1322
1323
        input_img = np.zeros((40, 40, 3), dtype=np.uint8)
        cnt = [20, 20]
        for pt in [(16, 16), (20, 16), (20, 20)]:
1324
1325
1326
1327
            for i in range(-5, 5):
                for j in range(-5, 5):
                    input_img[pt[0] + i, pt[1] + j, :] = [255, 155, 55]

vfdev's avatar
vfdev committed
1328
1329
        with self.assertRaises(TypeError, msg="Argument translate should be a sequence"):
            F.affine(input_img, 10, translate=0, scale=1, shear=1)
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340

        pil_img = F.to_pil_image(input_img)

        def _to_3x3_inv(inv_result_matrix):
            result_matrix = np.zeros((3, 3))
            result_matrix[:2, :] = np.array(inv_result_matrix).reshape((2, 3))
            result_matrix[2, 2] = 1
            return np.linalg.inv(result_matrix)

        def _test_transformation(a, t, s, sh):
            a_rad = math.radians(a)
ptrblck's avatar
ptrblck committed
1341
            s_rad = [math.radians(sh_) for sh_ in sh]
1342
1343
1344
1345
1346
            cx, cy = cnt
            tx, ty = t
            sx, sy = s_rad
            rot = a_rad

1347
            # 1) Check transformation matrix:
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
            C = np.array([[1, 0, cx],
                          [0, 1, cy],
                          [0, 0, 1]])
            T = np.array([[1, 0, tx],
                          [0, 1, ty],
                          [0, 0, 1]])
            Cinv = np.linalg.inv(C)

            RS = np.array(
                [[s * math.cos(rot), -s * math.sin(rot), 0],
                 [s * math.sin(rot), s * math.cos(rot), 0],
                 [0, 0, 1]])

            SHx = np.array([[1, -math.tan(sx), 0],
                            [0, 1, 0],
                            [0, 0, 1]])

            SHy = np.array([[1, 0, 0],
                            [-math.tan(sy), 1, 0],
                            [0, 0, 1]])

            RSS = np.matmul(RS, np.matmul(SHy, SHx))

            true_matrix = np.matmul(T, np.matmul(C, np.matmul(RSS, Cinv)))

1373
1374
            result_matrix = _to_3x3_inv(F._get_inverse_affine_matrix(center=cnt, angle=a,
                                                                     translate=t, scale=s, shear=sh))
1375
            self.assertLess(np.sum(np.abs(true_matrix - result_matrix)), 1e-10)
1376
            # 2) Perform inverse mapping:
Francisco Massa's avatar
Francisco Massa committed
1377
            true_result = np.zeros((40, 40, 3), dtype=np.uint8)
1378
1379
1380
            inv_true_matrix = np.linalg.inv(true_matrix)
            for y in range(true_result.shape[0]):
                for x in range(true_result.shape[1]):
1381
1382
1383
1384
1385
1386
                    # Same as for PIL:
                    # https://github.com/python-pillow/Pillow/blob/71f8ec6a0cfc1008076a023c0756542539d057ab/
                    # src/libImaging/Geometry.c#L1060
                    input_pt = np.array([x + 0.5, y + 0.5, 1.0])
                    res = np.floor(np.dot(inv_true_matrix, input_pt)).astype(np.int)
                    _x, _y = res[:2]
1387
1388
1389
1390
                    if 0 <= _x < input_img.shape[1] and 0 <= _y < input_img.shape[0]:
                        true_result[y, x, :] = input_img[_y, _x, :]

            result = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh)
1391
            self.assertEqual(result.size, pil_img.size)
1392
1393
1394
1395
            # Compute number of different pixels:
            np_result = np.array(result)
            n_diff_pixels = np.sum(np_result != true_result) / 3
            # Accept 3 wrong pixels
1396
1397
1398
            self.assertLess(n_diff_pixels, 3,
                            "a={}, t={}, s={}, sh={}\n".format(a, t, s, sh) +
                            "n diff pixels={}\n".format(np.sum(np.array(result)[:, :, 0] != true_result[:, :, 0])))
1399
1400
1401

        # Test rotation
        a = 45
ptrblck's avatar
ptrblck committed
1402
        _test_transformation(a=a, t=(0, 0), s=1.0, sh=(0.0, 0.0))
1403
1404
1405

        # Test translation
        t = [10, 15]
ptrblck's avatar
ptrblck committed
1406
        _test_transformation(a=0.0, t=t, s=1.0, sh=(0.0, 0.0))
1407
1408
1409

        # Test scale
        s = 1.2
ptrblck's avatar
ptrblck committed
1410
        _test_transformation(a=0.0, t=(0.0, 0.0), s=s, sh=(0.0, 0.0))
1411
1412

        # Test shear
ptrblck's avatar
ptrblck committed
1413
        sh = [45.0, 25.0]
1414
1415
1416
1417
1418
        _test_transformation(a=0.0, t=(0.0, 0.0), s=1.0, sh=sh)

        # Test rotation, scale, translation, shear
        for a in range(-90, 90, 25):
            for t1 in range(-10, 10, 5):
1419
                for s in [0.75, 0.98, 1.0, 1.2, 1.4]:
1420
                    for sh in range(-15, 15, 5):
ptrblck's avatar
ptrblck committed
1421
                        _test_transformation(a=a, t=(t1, t1), s=s, sh=(sh, sh))
1422

1423
1424
1425
1426
1427
1428
1429
1430
1431
    def test_random_rotation(self):

        with self.assertRaises(ValueError):
            transforms.RandomRotation(-0.7)
            transforms.RandomRotation([-0.7])
            transforms.RandomRotation([-0.7, 0, 0.7])

        t = transforms.RandomRotation(10)
        angle = t.get_params(t.degrees)
1432
        self.assertTrue(angle > -10 and angle < 10)
1433
1434
1435

        t = transforms.RandomRotation((-10, 10))
        angle = t.get_params(t.degrees)
1436
        self.assertTrue(angle > -10 and angle < 10)
1437

1438
1439
1440
        # Checking if RandomRotation can be printed as string
        t.__repr__()

1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
    def test_random_affine(self):

        with self.assertRaises(ValueError):
            transforms.RandomAffine(-0.7)
            transforms.RandomAffine([-0.7])
            transforms.RandomAffine([-0.7, 0, 0.7])

            transforms.RandomAffine([-90, 90], translate=2.0)
            transforms.RandomAffine([-90, 90], translate=[-1.0, 1.0])
            transforms.RandomAffine([-90, 90], translate=[-1.0, 0.0, 1.0])

            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.0])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[-1.0, 1.0])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, -0.5])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 3.0, -0.5])

            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=-7)
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10])
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 0, 10])
ptrblck's avatar
ptrblck committed
1460
            transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 0, 10, 0, 10])
1461
1462
1463
1464

        x = np.zeros((100, 100, 3), dtype=np.uint8)
        img = F.to_pil_image(x)

ptrblck's avatar
ptrblck committed
1465
        t = transforms.RandomAffine(10, translate=[0.5, 0.3], scale=[0.7, 1.3], shear=[-10, 10, 20, 40])
1466
1467
1468
        for _ in range(100):
            angle, translations, scale, shear = t.get_params(t.degrees, t.translate, t.scale, t.shear,
                                                             img_size=img.size)
1469
1470
1471
1472
1473
1474
1475
1476
            self.assertTrue(-10 < angle < 10)
            self.assertTrue(-img.size[0] * 0.5 <= translations[0] <= img.size[0] * 0.5,
                            "{} vs {}".format(translations[0], img.size[0] * 0.5))
            self.assertTrue(-img.size[1] * 0.5 <= translations[1] <= img.size[1] * 0.5,
                            "{} vs {}".format(translations[1], img.size[1] * 0.5))
            self.assertTrue(0.7 < scale < 1.3)
            self.assertTrue(-10 < shear[0] < 10)
            self.assertTrue(-20 < shear[1] < 40)
1477
1478
1479
1480
1481

        # Checking if RandomAffine can be printed as string
        t.__repr__()

        t = transforms.RandomAffine(10, resample=Image.BILINEAR)
1482
        self.assertIn("Image.BILINEAR", t.__repr__())
1483

1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
    def test_to_grayscale(self):
        """Unit tests for grayscale transform"""

        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        # Test Set: Grayscale an image with desired number of output channels
        # Case 1: RGB -> 1 channel grayscale
        trans1 = transforms.Grayscale(num_output_channels=1)
        gray_pil_1 = trans1(x_pil)
        gray_np_1 = np.array(gray_pil_1)
1499
1500
        self.assertEqual(gray_pil_1.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_1.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1501
1502
1503
1504
1505
1506
        np.testing.assert_equal(gray_np, gray_np_1)

        # Case 2: RGB -> 3 channel grayscale
        trans2 = transforms.Grayscale(num_output_channels=3)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
1507
1508
        self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel')
1509
1510
1511
1512
1513
1514
1515
1516
        np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
        np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
        np.testing.assert_equal(gray_np, gray_np_2[:, :, 0])

        # Case 3: 1 channel grayscale -> 1 channel grayscale
        trans3 = transforms.Grayscale(num_output_channels=1)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
1517
1518
        self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1519
1520
1521
1522
1523
1524
        np.testing.assert_equal(gray_np, gray_np_3)

        # Case 4: 1 channel grayscale -> 3 channel grayscale
        trans4 = transforms.Grayscale(num_output_channels=3)
        gray_pil_4 = trans4(x_pil_2)
        gray_np_4 = np.array(gray_pil_4)
1525
1526
        self.assertEqual(gray_pil_4.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_4.shape, tuple(x_shape), 'should be 3 channel')
1527
1528
1529
1530
        np.testing.assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1])
        np.testing.assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2])
        np.testing.assert_equal(gray_np, gray_np_4[:, :, 0])

1531
1532
1533
        # Checking if Grayscale can be printed as string
        trans4.__repr__()

1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_grayscale(self):
        """Unit tests for random grayscale transform"""

        # Test Set 1: RGB -> 3 channel grayscale
        random_state = random.getstate()
        random.seed(42)
        x_shape = [2, 2, 3]
        x_np = np.random.randint(0, 256, x_shape, np.uint8)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        num_samples = 250
        num_gray = 0
        for _ in range(num_samples):
            gray_pil_2 = transforms.RandomGrayscale(p=0.5)(x_pil)
            gray_np_2 = np.array(gray_pil_2)
            if np.array_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) and \
1553
1554
                    np.array_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) and \
                    np.array_equal(gray_np, gray_np_2[:, :, 0]):
1555
1556
1557
1558
                num_gray = num_gray + 1

        p_value = stats.binom_test(num_gray, num_samples, p=0.5)
        random.setstate(random_state)
1559
        self.assertGreater(p_value, 0.0001)
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579

        # Test Set 2: grayscale -> 1 channel grayscale
        random_state = random.getstate()
        random.seed(42)
        x_shape = [2, 2, 3]
        x_np = np.random.randint(0, 256, x_shape, np.uint8)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        num_samples = 250
        num_gray = 0
        for _ in range(num_samples):
            gray_pil_3 = transforms.RandomGrayscale(p=0.5)(x_pil_2)
            gray_np_3 = np.array(gray_pil_3)
            if np.array_equal(gray_np, gray_np_3):
                num_gray = num_gray + 1

        p_value = stats.binom_test(num_gray, num_samples, p=1.0)  # Note: grayscale is always unchanged
        random.setstate(random_state)
1580
        self.assertGreater(p_value, 0.0001)
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593

        # Test set 3: Explicit tests
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        # Case 3a: RGB -> 3 channel grayscale (grayscaled)
        trans2 = transforms.RandomGrayscale(p=1.0)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
1594
1595
        self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel')
1596
1597
1598
1599
1600
1601
1602
1603
        np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
        np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
        np.testing.assert_equal(gray_np, gray_np_2[:, :, 0])

        # Case 3b: RGB -> 3 channel grayscale (unchanged)
        trans2 = transforms.RandomGrayscale(p=0.0)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
1604
1605
        self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB')
        self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel')
1606
1607
1608
1609
1610
1611
        np.testing.assert_equal(x_np, gray_np_2)

        # Case 3c: 1 channel grayscale -> 1 channel grayscale (grayscaled)
        trans3 = transforms.RandomGrayscale(p=1.0)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
1612
1613
        self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1614
1615
1616
1617
1618
1619
        np.testing.assert_equal(gray_np, gray_np_3)

        # Case 3d: 1 channel grayscale -> 1 channel grayscale (unchanged)
        trans3 = transforms.RandomGrayscale(p=0.0)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
1620
1621
        self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L')
        self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel')
1622
1623
        np.testing.assert_equal(gray_np, gray_np_3)

1624
1625
1626
        # Checking if RandomGrayscale can be printed as string
        trans3.__repr__()

1627
1628
    def test_random_erasing(self):
        """Unit tests for random erasing transform"""
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
        for is_scripted in [False, True]:
            torch.manual_seed(12)
            img = torch.rand(3, 60, 60)

            # Test Set 0: invalid value
            random_erasing = transforms.RandomErasing(value=(0.1, 0.2, 0.3, 0.4), p=1.0)
            with self.assertRaises(ValueError, msg="If value is a sequence, it should have either a single value or 3"):
                img_re = random_erasing(img)

            # Test Set 1: Erasing with int value
            random_erasing = transforms.RandomErasing(value=0.2)
            if is_scripted:
                random_erasing = torch.jit.script(random_erasing)

            i, j, h, w, v = transforms.RandomErasing.get_params(
                img, scale=random_erasing.scale, ratio=random_erasing.ratio, value=[random_erasing.value, ]
            )
            img_output = F.erase(img, i, j, h, w, v)
            self.assertEqual(img_output.size(0), 3)

            # Test Set 2: Check if the unerased region is preserved
            true_output = img.clone()
            true_output[:, i:i + h, j:j + w] = random_erasing.value
            self.assertTrue(torch.equal(true_output, img_output))

            # Test Set 3: Erasing with random value
            random_erasing = transforms.RandomErasing(value="random")
            if is_scripted:
                random_erasing = torch.jit.script(random_erasing)
            img_re = random_erasing(img)

            self.assertEqual(img_re.size(0), 3)

            # Test Set 4: Erasing with tuple value
            random_erasing = transforms.RandomErasing(value=(0.2, 0.2, 0.2))
            if is_scripted:
                random_erasing = torch.jit.script(random_erasing)
            img_re = random_erasing(img)
            self.assertEqual(img_re.size(0), 3)
            true_output = img.clone()
            true_output[:, i:i + h, j:j + w] = torch.tensor(random_erasing.value)[:, None, None]
            self.assertTrue(torch.equal(true_output, img_output))

            # Test Set 5: Testing the inplace behaviour
            random_erasing = transforms.RandomErasing(value=(0.2,), inplace=True)
            if is_scripted:
                random_erasing = torch.jit.script(random_erasing)

            img_re = random_erasing(img)
            self.assertTrue(torch.equal(img_re, img))

            # Test Set 6: Checking when no erased region is selected
            img = torch.rand([3, 300, 1])
            random_erasing = transforms.RandomErasing(ratio=(0.1, 0.2), value="random")
            if is_scripted:
                random_erasing = torch.jit.script(random_erasing)
            img_re = random_erasing(img)
            self.assertTrue(torch.equal(img_re, img))
Zhun Zhong's avatar
Zhun Zhong committed
1687

1688

1689
1690
if __name__ == '__main__':
    unittest.main()