test_transforms.py 34.9 KB
Newer Older
1
2
import torch
import torchvision.transforms as transforms
3
import torchvision.transforms.functional as F
4
5
import unittest
import random
6
import numpy as np
7
8
9
10
11
12
from PIL import Image
try:
    import accimage
except ImportError:
    accimage = None

13
14
15
16
17
try:
    from scipy import stats
except ImportError:
    stats = None

18
GRACE_HOPPER = 'assets/grace_hopper_517x606.jpg'
19

20

21
class Tester(unittest.TestCase):
22

23
24
25
26
    def test_crop(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        oheight = random.randint(5, (height - 2) / 2) * 2
27
28
        owidth = random.randint(5, (width - 2) / 2) * 2

29
        img = torch.ones(3, height, width)
30
31
32
        oh1 = (height - oheight) // 2
        ow1 = (width - owidth) // 2
        imgnarrow = img[:, oh1:oh1 + oheight, ow1:ow1 + owidth]
33
34
35
36
37
38
39
        imgnarrow.fill_(0)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
        assert result.sum() == 0, "height: " + str(height) + " width: " \
40
                                  + str(width) + " oheight: " + str(oheight) + " owidth: " + str(owidth)
41
42
43
44
45
46
47
48
49
        oheight += 1
        owidth += 1
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
        sum1 = result.sum()
        assert sum1 > 1, "height: " + str(height) + " width: " \
50
                         + str(width) + " oheight: " + str(oheight) + " owidth: " + str(owidth)
51
        oheight += 1
52
        owidth += 1
53
54
55
56
57
58
59
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
        sum2 = result.sum()
        assert sum2 > 0, "height: " + str(height) + " width: " \
60
                         + str(width) + " oheight: " + str(oheight) + " owidth: " + str(owidth)
61
        assert sum2 > sum1, "height: " + str(height) + " width: " \
62
                            + str(width) + " oheight: " + str(oheight) + " owidth: " + str(owidth)
63

64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
    def test_five_crop(self):
        to_pil_image = transforms.ToPILImage()
        h = random.randint(5, 25)
        w = random.randint(5, 25)
        for single_dim in [True, False]:
            crop_h = random.randint(1, h)
            crop_w = random.randint(1, w)
            if single_dim:
                crop_h = min(crop_h, crop_w)
                crop_w = crop_h
                transform = transforms.FiveCrop(crop_h)
            else:
                transform = transforms.FiveCrop((crop_h, crop_w))

            img = torch.FloatTensor(3, h, w).uniform_()
            results = transform(to_pil_image(img))

            assert len(results) == 5
            for crop in results:
                assert crop.size == (crop_w, crop_h)

            to_pil_image = transforms.ToPILImage()
            tl = to_pil_image(img[:, 0:crop_h, 0:crop_w])
            tr = to_pil_image(img[:, 0:crop_h, w - crop_w:])
            bl = to_pil_image(img[:, h - crop_h:, 0:crop_w])
            br = to_pil_image(img[:, h - crop_h:, w - crop_w:])
            center = transforms.CenterCrop((crop_h, crop_w))(to_pil_image(img))
            expected_output = (tl, tr, bl, br, center)
            assert results == expected_output

    def test_ten_crop(self):
        to_pil_image = transforms.ToPILImage()
        h = random.randint(5, 25)
        w = random.randint(5, 25)
        for should_vflip in [True, False]:
            for single_dim in [True, False]:
                crop_h = random.randint(1, h)
                crop_w = random.randint(1, w)
                if single_dim:
                    crop_h = min(crop_h, crop_w)
                    crop_w = crop_h
105
106
                    transform = transforms.TenCrop(crop_h,
                                                   vertical_flip=should_vflip)
107
108
                    five_crop = transforms.FiveCrop(crop_h)
                else:
109
110
                    transform = transforms.TenCrop((crop_h, crop_w),
                                                   vertical_flip=should_vflip)
111
112
113
114
115
                    five_crop = transforms.FiveCrop((crop_h, crop_w))

                img = to_pil_image(torch.FloatTensor(3, h, w).uniform_())
                results = transform(img)
                expected_output = five_crop(img)
116
117
118
119
120

                # Checking if FiveCrop and TenCrop can be printed as string
                transform.__repr__()
                five_crop.__repr__()

121
122
123
124
125
126
127
128
129
130
                if should_vflip:
                    vflipped_img = img.transpose(Image.FLIP_TOP_BOTTOM)
                    expected_output += five_crop(vflipped_img)
                else:
                    hflipped_img = img.transpose(Image.FLIP_LEFT_RIGHT)
                    expected_output += five_crop(hflipped_img)

                assert len(results) == 10
                assert expected_output == results

131
    def test_resize(self):
132
133
134
        height = random.randint(24, 32) * 2
        width = random.randint(24, 32) * 2
        osize = random.randint(5, 12) * 2
135

136
137
138
        img = torch.ones(3, height, width)
        result = transforms.Compose([
            transforms.ToPILImage(),
139
            transforms.Resize(osize),
140
141
142
143
            transforms.ToTensor(),
        ])(img)
        assert osize in result.size()
        if height < width:
144
            assert result.size(1) <= result.size(2)
145
146
147
        elif width < height:
            assert result.size(1) >= result.size(2)

148
149
        result = transforms.Compose([
            transforms.ToPILImage(),
150
            transforms.Resize([osize, osize]),
151
152
153
154
155
156
            transforms.ToTensor(),
        ])(img)
        assert osize in result.size()
        assert result.size(1) == osize
        assert result.size(2) == osize

157
158
159
160
        oheight = random.randint(5, 12) * 2
        owidth = random.randint(5, 12) * 2
        result = transforms.Compose([
            transforms.ToPILImage(),
161
            transforms.Resize((oheight, owidth)),
162
163
164
165
166
167
168
            transforms.ToTensor(),
        ])(img)
        assert result.size(1) == oheight
        assert result.size(2) == owidth

        result = transforms.Compose([
            transforms.ToPILImage(),
169
            transforms.Resize([oheight, owidth]),
170
171
172
173
174
            transforms.ToTensor(),
        ])(img)
        assert result.size(1) == oheight
        assert result.size(2) == owidth

175
176
177
178
    def test_random_crop(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        oheight = random.randint(5, (height - 2) / 2) * 2
179
        owidth = random.randint(5, (width - 2) / 2) * 2
180
181
182
183
184
185
186
187
188
        img = torch.ones(3, height, width)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((oheight, owidth)),
            transforms.ToTensor(),
        ])(img)
        assert result.size(1) == oheight
        assert result.size(2) == owidth

189
190
191
192
193
194
195
196
197
        padding = random.randint(1, 20)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((oheight, owidth), padding=padding),
            transforms.ToTensor(),
        ])(img)
        assert result.size(1) == oheight
        assert result.size(2) == owidth

198
199
200
201
202
203
204
205
206
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomCrop((height, width)),
            transforms.ToTensor()
        ])(img)
        assert result.size(1) == height
        assert result.size(2) == width
        assert np.allclose(img.numpy(), result.numpy())

207
208
209
210
211
212
213
214
215
216
    def test_pad(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        img = torch.ones(3, height, width)
        padding = random.randint(1, 20)
        result = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Pad(padding),
            transforms.ToTensor(),
        ])(img)
217
218
        assert result.size(1) == height + 2 * padding
        assert result.size(2) == width + 2 * padding
Soumith Chintala's avatar
Soumith Chintala committed
219

220
221
222
223
224
225
226
227
228
229
230
231
232
233
    def test_pad_with_tuple_of_pad_values(self):
        height = random.randint(10, 32) * 2
        width = random.randint(10, 32) * 2
        img = transforms.ToPILImage()(torch.ones(3, height, width))

        padding = tuple([random.randint(1, 20) for _ in range(2)])
        output = transforms.Pad(padding)(img)
        assert output.size == (width + padding[0] * 2, height + padding[1] * 2)

        padding = tuple([random.randint(1, 20) for _ in range(4)])
        output = transforms.Pad(padding)(img)
        assert output.size[0] == width + padding[0] + padding[2]
        assert output.size[1] == height + padding[1] + padding[3]

234
235
236
        # Checking if Padding can be printed as string
        transforms.Pad(padding).__repr__()

237
    def test_pad_raises_with_invalid_pad_sequence_len(self):
238
239
240
241
242
243
244
245
246
        with self.assertRaises(ValueError):
            transforms.Pad(())

        with self.assertRaises(ValueError):
            transforms.Pad((1, 2, 3))

        with self.assertRaises(ValueError):
            transforms.Pad((1, 2, 3, 4, 5))

Soumith Chintala's avatar
Soumith Chintala committed
247
248
249
250
    def test_lambda(self):
        trans = transforms.Lambda(lambda x: x.add(10))
        x = torch.randn(10)
        y = trans(x)
251
        assert (y.equal(torch.add(x, 10)))
Soumith Chintala's avatar
Soumith Chintala committed
252
253
254
255

        trans = transforms.Lambda(lambda x: x.add_(10))
        x = torch.randn(10)
        y = trans(x)
256
257
        assert (y.equal(x))

258
259
260
        # Checking if Lambda can be printed as string
        trans.__repr__()

261
    def test_to_tensor(self):
262
        test_channels = [1, 3, 4]
263
264
        height, width = 4, 4
        trans = transforms.ToTensor()
265
266
267
268
269
270
271

        for channels in test_channels:
            input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
            img = transforms.ToPILImage()(input_data)
            output = trans(img)
            assert np.allclose(input_data.numpy(), output.numpy())

272
            ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
273
274
275
            output = trans(ndarray)
            expected_output = ndarray.transpose((2, 0, 1)) / 255.0
            assert np.allclose(output.numpy(), expected_output)
276

277
278
279
280
281
            ndarray = np.random.rand(height, width, channels).astype(np.float32)
            output = trans(ndarray)
            expected_output = ndarray.transpose((2, 0, 1))
            assert np.allclose(output.numpy(), expected_output)

282
283
284
285
286
287
288
289
290
291
292
293
294
    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_to_tensor(self):
        trans = transforms.ToTensor()

        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
        assert np.allclose(output.numpy(), expected_output.numpy())

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_resize(self):
        trans = transforms.Compose([
295
            transforms.Resize(256, interpolation=Image.LINEAR),
296
297
298
            transforms.ToTensor(),
        ])

299
300
301
        # Checking if Compose, Resize and ToTensor can be printed as string
        trans.__repr__()

302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
        self.assertLess(np.abs((expected_output - output).mean()), 1e-3)
        self.assertLess((expected_output - output).var(), 1e-5)
        # note the high absolute tolerance
        assert np.allclose(output.numpy(), expected_output.numpy(), atol=5e-2)

    @unittest.skipIf(accimage is None, 'accimage not available')
    def test_accimage_crop(self):
        trans = transforms.Compose([
            transforms.CenterCrop(256),
            transforms.ToTensor(),
        ])

318
319
320
        # Checking if Compose, CenterCrop and ToTensor can be printed as string
        trans.__repr__()

321
322
323
324
325
326
        expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB'))
        output = trans(accimage.Image(GRACE_HOPPER))

        self.assertEqual(expected_output.size(), output.size())
        assert np.allclose(output.numpy(), expected_output.numpy())

327
    def test_1_channel_tensor_to_pil_image(self):
328
329
        to_tensor = transforms.ToTensor()

330
        img_data_float = torch.Tensor(1, 4, 4).uniform_()
331
332
333
334
        img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255)
        img_data_short = torch.ShortTensor(1, 4, 4).random_()
        img_data_int = torch.IntTensor(1, 4, 4).random_()

335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(),
                            img_data_byte.float().div(255.0).numpy(),
                            img_data_short.numpy(),
                            img_data_int.numpy()]
        expected_modes = ['L', 'L', 'I;16', 'I']

        for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
                assert img.mode == mode
                assert np.allclose(expected_output, to_tensor(img).numpy())

    def test_1_channel_ndarray_to_pil_image(self):
        img_data_float = torch.Tensor(4, 4, 1).uniform_().numpy()
        img_data_byte = torch.ByteTensor(4, 4, 1).random_(0, 255).numpy()
        img_data_short = torch.ShortTensor(4, 4, 1).random_().numpy()
        img_data_int = torch.IntTensor(4, 4, 1).random_().numpy()

        inputs = [img_data_float, img_data_byte, img_data_short, img_data_int]
        expected_modes = ['F', 'L', 'I;16', 'I']
        for img_data, mode in zip(inputs, expected_modes):
            for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]:
                img = transform(img_data)
                assert img.mode == mode
                assert np.allclose(img_data[:, :, 0], img)

    def test_3_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
                assert img.mode == 'RGB'  # default should assume RGB
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
                assert img.mode == mode
            split = img.split()
            for i in range(3):
372
                assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy())
373

374
375
376
377
        img_data = torch.Tensor(3, 4, 4).uniform_()
        expected_output = img_data.mul(255).int().float().div(255)
        for mode in [None, 'RGB', 'HSV', 'YCbCr']:
            verify_img_data(img_data, expected_output, mode=mode)
378

379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
        with self.assertRaises(ValueError):
            # should raise if we try a mode for 4 or 1 channel images
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)

    def test_3_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
                assert img.mode == 'RGB'  # default should assume RGB
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
                assert img.mode == mode
            split = img.split()
            for i in range(3):
                assert np.allclose(img_data[:, :, i], split[i])
395

396
397
398
399
        img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy()
        for mode in [None, 'RGB', 'HSV', 'YCbCr']:
            verify_img_data(img_data, mode)

400
401
402
        # Checking if ToPILImage can be printed as string
        transforms.ToPILImage().__repr__()

403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
        with self.assertRaises(ValueError):
            # should raise if we try a mode for 4 or 1 channel images
            transforms.ToPILImage(mode='RGBA')(img_data)
            transforms.ToPILImage(mode='P')(img_data)

    def test_4_channel_tensor_to_pil_image(self):
        def verify_img_data(img_data, expected_output, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
                assert img.mode == 'RGBA'  # default should assume RGBA
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
                assert img.mode == mode

            split = img.split()
            for i in range(4):
419
                assert np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy())
420

421
        img_data = torch.Tensor(4, 4, 4).uniform_()
422
        expected_output = img_data.mul(255).int().float().div(255)
423
424
        for mode in [None, 'RGBA', 'CMYK']:
            verify_img_data(img_data, expected_output, mode)
425

426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
        with self.assertRaises(ValueError):
            # should raise if we try a mode for 3 or 1 channel images
            transforms.ToPILImage(mode='RGB')(img_data)
            transforms.ToPILImage(mode='P')(img_data)

    def test_4_channel_ndarray_to_pil_image(self):
        def verify_img_data(img_data, mode):
            if mode is None:
                img = transforms.ToPILImage()(img_data)
                assert img.mode == 'RGBA'  # default should assume RGBA
            else:
                img = transforms.ToPILImage(mode=mode)(img_data)
                assert img.mode == mode
            split = img.split()
            for i in range(4):
                assert np.allclose(img_data[:, :, i], split[i])
442

443
444
445
        img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy()
        for mode in [None, 'RGBA', 'CMYK']:
            verify_img_data(img_data, mode)
446

447
448
449
450
        with self.assertRaises(ValueError):
            # should raise if we try a mode for 3 or 1 channel images
            transforms.ToPILImage(mode='RGB')(img_data)
            transforms.ToPILImage(mode='P')(img_data)
451

452
    def test_ndarray_bad_types_to_pil_image(self):
453
        trans = transforms.ToPILImage()
454
        with self.assertRaises(TypeError):
455
456
457
458
459
            trans(np.ones([4, 4, 1], np.int64))
            trans(np.ones([4, 4, 1], np.uint16))
            trans(np.ones([4, 4, 1], np.uint32))
            trans(np.ones([4, 4, 1], np.float64))

460
461
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_vertical_flip(self):
462
463
        random_state = random.getstate()
        random.seed(42)
464
465
466
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        vimg = img.transpose(Image.FLIP_TOP_BOTTOM)

467
        num_samples = 250
468
        num_vertical = 0
469
        for _ in range(num_samples):
470
471
472
473
            out = transforms.RandomVerticalFlip()(img)
            if out == vimg:
                num_vertical += 1

474
475
476
        p_value = stats.binom_test(num_vertical, num_samples, p=0.5)
        random.setstate(random_state)
        assert p_value > 0.0001
477

478
479
480
        # Checking if RandomVerticalFlip can be printed as string
        transforms.RandomVerticalFlip().__repr__()

481
482
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_horizontal_flip(self):
483
484
        random_state = random.getstate()
        random.seed(42)
485
486
487
        img = transforms.ToPILImage()(torch.rand(3, 10, 10))
        himg = img.transpose(Image.FLIP_LEFT_RIGHT)

488
        num_samples = 250
489
        num_horizontal = 0
490
        for _ in range(num_samples):
491
492
493
494
            out = transforms.RandomHorizontalFlip()(img)
            if out == himg:
                num_horizontal += 1

495
496
497
        p_value = stats.binom_test(num_horizontal, num_samples, p=0.5)
        random.setstate(random_state)
        assert p_value > 0.0001
498

499
500
501
        # Checking if RandomHorizontalFlip can be printed as string
        transforms.RandomHorizontalFlip().__repr__()

502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
    @unittest.skipIf(stats is None, 'scipt.stats is not available')
    def test_normalize(self):
        def samples_from_standard_normal(tensor):
            p_value = stats.kstest(list(tensor.view(-1)), 'norm', args=(0, 1)).pvalue
            return p_value > 0.0001

        random_state = random.getstate()
        random.seed(42)
        for channels in [1, 3]:
            img = torch.rand(channels, 10, 10)
            mean = [img[c].mean() for c in range(channels)]
            std = [img[c].std() for c in range(channels)]
            normalized = transforms.Normalize(mean, std)(img)
            assert samples_from_standard_normal(normalized)
        random.setstate(random_state)

518
519
520
        # Checking if Normalize can be printed as string
        transforms.Normalize(mean, std).__repr__()

521
522
523
524
525
526
527
    def test_adjust_brightness(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
528
        y_pil = F.adjust_brightness(x_pil, 1)
529
530
531
532
        y_np = np.array(y_pil)
        assert np.allclose(y_np, x_np)

        # test 1
533
        y_pil = F.adjust_brightness(x_pil, 0.5)
534
535
536
537
538
539
        y_np = np.array(y_pil)
        y_ans = [0, 2, 6, 27, 67, 113, 18, 4, 117, 45, 127, 0]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

        # test 2
540
        y_pil = F.adjust_brightness(x_pil, 2)
541
542
543
544
545
546
547
548
549
550
551
552
        y_np = np.array(y_pil)
        y_ans = [0, 10, 26, 108, 255, 255, 74, 16, 255, 180, 255, 2]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

    def test_adjust_contrast(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
553
        y_pil = F.adjust_contrast(x_pil, 1)
554
555
556
557
        y_np = np.array(y_pil)
        assert np.allclose(y_np, x_np)

        # test 1
558
        y_pil = F.adjust_contrast(x_pil, 0.5)
559
560
561
562
563
564
        y_np = np.array(y_pil)
        y_ans = [43, 45, 49, 70, 110, 156, 61, 47, 160, 88, 170, 43]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

        # test 2
565
        y_pil = F.adjust_contrast(x_pil, 2)
566
567
568
569
570
571
572
573
574
575
576
577
        y_np = np.array(y_pil)
        y_ans = [0, 0, 0, 22, 184, 255, 0, 0, 255, 94, 255, 0]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

    def test_adjust_saturation(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
578
        y_pil = F.adjust_saturation(x_pil, 1)
579
580
581
582
        y_np = np.array(y_pil)
        assert np.allclose(y_np, x_np)

        # test 1
583
        y_pil = F.adjust_saturation(x_pil, 0.5)
584
585
586
587
588
589
        y_np = np.array(y_pil)
        y_ans = [2, 4, 8, 87, 128, 173, 39, 25, 138, 133, 215, 88]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

        # test 2
590
        y_pil = F.adjust_saturation(x_pil, 2)
591
592
593
594
595
596
597
598
599
600
601
602
        y_np = np.array(y_pil)
        y_ans = [0, 6, 22, 0, 149, 255, 32, 0, 255, 4, 255, 0]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

    def test_adjust_hue(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        with self.assertRaises(ValueError):
603
604
            F.adjust_hue(x_pil, -0.7)
            F.adjust_hue(x_pil, 1)
605
606
607

        # test 0: almost same as x_data but not exact.
        # probably because hsv <-> rgb floating point ops
608
        y_pil = F.adjust_hue(x_pil, 0)
609
610
611
612
613
614
        y_np = np.array(y_pil)
        y_ans = [0, 5, 13, 54, 139, 226, 35, 8, 234, 91, 255, 1]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

        # test 1
615
        y_pil = F.adjust_hue(x_pil, 0.25)
616
617
618
619
620
621
        y_np = np.array(y_pil)
        y_ans = [13, 0, 12, 224, 54, 226, 234, 8, 99, 1, 222, 255]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

        # test 2
622
        y_pil = F.adjust_hue(x_pil, -0.25)
623
624
625
626
627
628
629
630
631
632
633
634
        y_np = np.array(y_pil)
        y_ans = [0, 13, 2, 54, 226, 58, 8, 234, 152, 255, 43, 1]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

    def test_adjust_gamma(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')

        # test 0
635
        y_pil = F.adjust_gamma(x_pil, 1)
636
637
638
639
        y_np = np.array(y_pil)
        assert np.allclose(y_np, x_np)

        # test 1
640
        y_pil = F.adjust_gamma(x_pil, 0.5)
641
642
643
644
645
646
        y_np = np.array(y_pil)
        y_ans = [0, 35, 57, 117, 185, 240, 97, 45, 244, 151, 255, 15]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

        # test 2
647
        y_pil = F.adjust_gamma(x_pil, 2)
648
649
650
651
652
653
654
655
656
657
658
659
        y_np = np.array(y_pil)
        y_ans = [0, 0, 0, 11, 71, 200, 5, 0, 214, 31, 255, 0]
        y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
        assert np.allclose(y_np, y_ans)

    def test_adjusts_L_mode(self):
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_rgb = Image.fromarray(x_np, mode='RGB')

        x_l = x_rgb.convert('L')
660
661
662
663
664
        assert F.adjust_brightness(x_l, 2).mode == 'L'
        assert F.adjust_saturation(x_l, 2).mode == 'L'
        assert F.adjust_contrast(x_l, 2).mode == 'L'
        assert F.adjust_hue(x_l, 0.4).mode == 'L'
        assert F.adjust_gamma(x_l, 0.5).mode == 'L'
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681

    def test_color_jitter(self):
        color_jitter = transforms.ColorJitter(2, 2, 2, 0.1)

        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')

        for i in range(10):
            y_pil = color_jitter(x_pil)
            assert y_pil.mode == x_pil.mode

            y_pil_2 = color_jitter(x_pil_2)
            assert y_pil_2.mode == x_pil_2.mode

682
683
684
        # Checking if ColorJitter can be printed as string
        color_jitter.__repr__()

685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
    def test_linear_transformation(self):
        x = torch.randn(250, 10, 10, 3)
        flat_x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
        # compute principal components
        sigma = torch.mm(flat_x.t(), flat_x) / flat_x.size(0)
        u, s, _ = np.linalg.svd(sigma.numpy())
        zca_epsilon = 1e-10  # avoid division by 0
        d = torch.Tensor(np.diag(1. / np.sqrt(s + zca_epsilon)))
        u = torch.Tensor(u)
        principal_components = torch.mm(torch.mm(u, d), u.t())
        # initialize whitening matrix
        whitening = transforms.LinearTransformation(principal_components)
        # pass first vector
        xwhite = whitening(x[0].view(10, 10, 3))
        # estimate covariance
        xwhite = xwhite.view(1, 300).numpy()
        cov = np.dot(xwhite, xwhite.T) / x.size(0)
        assert np.allclose(cov, np.identity(1), rtol=1e-3)

704
705
706
        # Checking if LinearTransformation can be printed as string
        whitening.__repr__()

707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
    def test_rotate(self):
        x = np.zeros((100, 100, 3), dtype=np.uint8)
        x[40, 40] = [255, 255, 255]

        with self.assertRaises(TypeError):
            F.rotate(x, 10)

        img = F.to_pil_image(x)

        result = F.rotate(img, 45)
        assert result.size == (100, 100)
        r, c, ch = np.where(result)
        assert all(x in r for x in [49, 50])
        assert all(x in c for x in [36])
        assert all(x in ch for x in [0, 1, 2])

        result = F.rotate(img, 45, expand=True)
        assert result.size == (142, 142)
        r, c, ch = np.where(result)
        assert all(x in r for x in [70, 71])
        assert all(x in c for x in [57])
        assert all(x in ch for x in [0, 1, 2])

        result = F.rotate(img, 45, center=(40, 40))
        assert result.size == (100, 100)
        r, c, ch = np.where(result)
        assert all(x in r for x in [40])
        assert all(x in c for x in [40])
        assert all(x in ch for x in [0, 1, 2])

        result_a = F.rotate(img, 90)
        result_b = F.rotate(img, -270)

        assert np.all(np.array(result_a) == np.array(result_b))

    def test_random_rotation(self):

        with self.assertRaises(ValueError):
            transforms.RandomRotation(-0.7)
            transforms.RandomRotation([-0.7])
            transforms.RandomRotation([-0.7, 0, 0.7])

        t = transforms.RandomRotation(10)
        angle = t.get_params(t.degrees)
        assert angle > -10 and angle < 10

        t = transforms.RandomRotation((-10, 10))
        angle = t.get_params(t.degrees)
        assert angle > -10 and angle < 10

757
758
759
        # Checking if RandomRotation can be printed as string
        t.__repr__()

760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
    def test_to_grayscale(self):
        """Unit tests for grayscale transform"""

        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        # Test Set: Grayscale an image with desired number of output channels
        # Case 1: RGB -> 1 channel grayscale
        trans1 = transforms.Grayscale(num_output_channels=1)
        gray_pil_1 = trans1(x_pil)
        gray_np_1 = np.array(gray_pil_1)
        assert gray_pil_1.mode == 'L', 'mode should be L'
        assert gray_np_1.shape == tuple(x_shape[0:2]), 'should be 1 channel'
        np.testing.assert_equal(gray_np, gray_np_1)

        # Case 2: RGB -> 3 channel grayscale
        trans2 = transforms.Grayscale(num_output_channels=3)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
        assert gray_pil_2.mode == 'RGB', 'mode should be RGB'
        assert gray_np_2.shape == tuple(x_shape), 'should be 3 channel'
        np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
        np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
        np.testing.assert_equal(gray_np, gray_np_2[:, :, 0])

        # Case 3: 1 channel grayscale -> 1 channel grayscale
        trans3 = transforms.Grayscale(num_output_channels=1)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
        assert gray_pil_3.mode == 'L', 'mode should be L'
        assert gray_np_3.shape == tuple(x_shape[0:2]), 'should be 1 channel'
        np.testing.assert_equal(gray_np, gray_np_3)

        # Case 4: 1 channel grayscale -> 3 channel grayscale
        trans4 = transforms.Grayscale(num_output_channels=3)
        gray_pil_4 = trans4(x_pil_2)
        gray_np_4 = np.array(gray_pil_4)
        assert gray_pil_4.mode == 'RGB', 'mode should be RGB'
        assert gray_np_4.shape == tuple(x_shape), 'should be 3 channel'
        np.testing.assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1])
        np.testing.assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2])
        np.testing.assert_equal(gray_np, gray_np_4[:, :, 0])

807
808
809
        # Checking if Grayscale can be printed as string
        trans4.__repr__()

810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
    @unittest.skipIf(stats is None, 'scipy.stats not available')
    def test_random_grayscale(self):
        """Unit tests for random grayscale transform"""

        # Test Set 1: RGB -> 3 channel grayscale
        random_state = random.getstate()
        random.seed(42)
        x_shape = [2, 2, 3]
        x_np = np.random.randint(0, 256, x_shape, np.uint8)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        num_samples = 250
        num_gray = 0
        for _ in range(num_samples):
            gray_pil_2 = transforms.RandomGrayscale(p=0.5)(x_pil)
            gray_np_2 = np.array(gray_pil_2)
            if np.array_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) and \
               np.array_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) and \
               np.array_equal(gray_np, gray_np_2[:, :, 0]):
                num_gray = num_gray + 1

        p_value = stats.binom_test(num_gray, num_samples, p=0.5)
        random.setstate(random_state)
        assert p_value > 0.0001

        # Test Set 2: grayscale -> 1 channel grayscale
        random_state = random.getstate()
        random.seed(42)
        x_shape = [2, 2, 3]
        x_np = np.random.randint(0, 256, x_shape, np.uint8)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        num_samples = 250
        num_gray = 0
        for _ in range(num_samples):
            gray_pil_3 = transforms.RandomGrayscale(p=0.5)(x_pil_2)
            gray_np_3 = np.array(gray_pil_3)
            if np.array_equal(gray_np, gray_np_3):
                num_gray = num_gray + 1

        p_value = stats.binom_test(num_gray, num_samples, p=1.0)  # Note: grayscale is always unchanged
        random.setstate(random_state)
        assert p_value > 0.0001

        # Test set 3: Explicit tests
        x_shape = [2, 2, 3]
        x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
        x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
        x_pil = Image.fromarray(x_np, mode='RGB')
        x_pil_2 = x_pil.convert('L')
        gray_np = np.array(x_pil_2)

        # Case 3a: RGB -> 3 channel grayscale (grayscaled)
        trans2 = transforms.RandomGrayscale(p=1.0)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
        assert gray_pil_2.mode == 'RGB', 'mode should be RGB'
        assert gray_np_2.shape == tuple(x_shape), 'should be 3 channel'
        np.testing.assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
        np.testing.assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
        np.testing.assert_equal(gray_np, gray_np_2[:, :, 0])

        # Case 3b: RGB -> 3 channel grayscale (unchanged)
        trans2 = transforms.RandomGrayscale(p=0.0)
        gray_pil_2 = trans2(x_pil)
        gray_np_2 = np.array(gray_pil_2)
        assert gray_pil_2.mode == 'RGB', 'mode should be RGB'
        assert gray_np_2.shape == tuple(x_shape), 'should be 3 channel'
        np.testing.assert_equal(x_np, gray_np_2)

        # Case 3c: 1 channel grayscale -> 1 channel grayscale (grayscaled)
        trans3 = transforms.RandomGrayscale(p=1.0)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
        assert gray_pil_3.mode == 'L', 'mode should be L'
        assert gray_np_3.shape == tuple(x_shape[0:2]), 'should be 1 channel'
        np.testing.assert_equal(gray_np, gray_np_3)

        # Case 3d: 1 channel grayscale -> 1 channel grayscale (unchanged)
        trans3 = transforms.RandomGrayscale(p=0.0)
        gray_pil_3 = trans3(x_pil_2)
        gray_np_3 = np.array(gray_pil_3)
        assert gray_pil_3.mode == 'L', 'mode should be L'
        assert gray_np_3.shape == tuple(x_shape[0:2]), 'should be 1 channel'
        np.testing.assert_equal(gray_np, gray_np_3)

900
901
902
        # Checking if RandomGrayscale can be printed as string
        trans3.__repr__()

903

904
905
if __name__ == '__main__':
    unittest.main()