test_transforms_tensor.py 14.2 KB
Newer Older
1
2
3
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
4

vfdev's avatar
vfdev committed
5
from PIL.Image import NEAREST, BILINEAR, BICUBIC
6
7
8
9
10

import numpy as np

import unittest

11
from common_utils import TransformsTester
12
13


14
class Tester(TransformsTester):
15

16
17
18
    def setUp(self):
        self.device = "cpu"

19
    def _test_functional_op(self, func, fn_kwargs):
20
21
        if fn_kwargs is None:
            fn_kwargs = {}
22
        tensor, pil_img = self._create_data(height=10, width=10, device=self.device)
23
24
25
26
        transformed_tensor = getattr(F, func)(tensor, **fn_kwargs)
        transformed_pil_img = getattr(F, func)(pil_img, **fn_kwargs)
        self.compareTensorToPIL(transformed_tensor, transformed_pil_img)

27
    def _test_class_op(self, method, meth_kwargs=None, test_exact_match=True, **match_kwargs):
28
29
        if meth_kwargs is None:
            meth_kwargs = {}
vfdev's avatar
vfdev committed
30

vfdev's avatar
vfdev committed
31
        tensor, pil_img = self._create_data(26, 34, device=self.device)
vfdev's avatar
vfdev committed
32
33
34
35
36
37
38
39
40
        # test for class interface
        f = getattr(T, method)(**meth_kwargs)
        scripted_fn = torch.jit.script(f)

        # set seed to reproduce the same transformation for tensor and PIL image
        torch.manual_seed(12)
        transformed_tensor = f(tensor)
        torch.manual_seed(12)
        transformed_pil_img = f(pil_img)
41
42
43
44
        if test_exact_match:
            self.compareTensorToPIL(transformed_tensor, transformed_pil_img, **match_kwargs)
        else:
            self.approxEqualTensorToPIL(transformed_tensor.float(), transformed_pil_img, **match_kwargs)
45

vfdev's avatar
vfdev committed
46
47
        torch.manual_seed(12)
        transformed_tensor_script = scripted_fn(tensor)
48
        self.assertTrue(transformed_tensor.equal(transformed_tensor_script))
49

50
51
52
    def _test_op(self, func, method, fn_kwargs=None, meth_kwargs=None):
        self._test_functional_op(func, fn_kwargs)
        self._test_class_op(method, meth_kwargs)
53
54

    def test_random_horizontal_flip(self):
55
        self._test_op('hflip', 'RandomHorizontalFlip')
56
57

    def test_random_vertical_flip(self):
58
        self._test_op('vflip', 'RandomVerticalFlip')
59

vfdev's avatar
vfdev committed
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
    def test_color_jitter(self):

        tol = 1.0 + 1e-10
        for f in [0.1, 0.5, 1.0, 1.34]:
            meth_kwargs = {"brightness": f}
            self._test_class_op(
                "ColorJitter", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max"
            )

        for f in [0.2, 0.5, 1.0, 1.5]:
            meth_kwargs = {"contrast": f}
            self._test_class_op(
                "ColorJitter", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max"
            )

        for f in [0.5, 0.75, 1.0, 1.25]:
            meth_kwargs = {"saturation": f}
            self._test_class_op(
                "ColorJitter", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max"
            )
80

81
82
83
    def test_pad(self):

        # Test functional.pad (PIL and Tensor) with padding as single int
84
        self._test_functional_op(
85
86
87
88
            "pad", fn_kwargs={"padding": 2, "fill": 0, "padding_mode": "constant"}
        )
        # Test functional.pad and transforms.Pad with padding as [int, ]
        fn_kwargs = meth_kwargs = {"padding": [2, ], "fill": 0, "padding_mode": "constant"}
89
        self._test_op(
90
91
92
93
            "pad", "Pad", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )
        # Test functional.pad and transforms.Pad with padding as list
        fn_kwargs = meth_kwargs = {"padding": [4, 4], "fill": 0, "padding_mode": "constant"}
94
        self._test_op(
95
96
97
98
            "pad", "Pad", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )
        # Test functional.pad and transforms.Pad with padding as tuple
        fn_kwargs = meth_kwargs = {"padding": (2, 2, 2, 2), "fill": 127, "padding_mode": "constant"}
99
        self._test_op(
100
101
102
            "pad", "Pad", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )

vfdev's avatar
vfdev committed
103
104
105
106
    def test_crop(self):
        fn_kwargs = {"top": 2, "left": 3, "height": 4, "width": 5}
        # Test transforms.RandomCrop with size and padding as tuple
        meth_kwargs = {"size": (4, 5), "padding": (4, 4), "pad_if_needed": True, }
107
        self._test_op(
vfdev's avatar
vfdev committed
108
109
110
            'crop', 'RandomCrop', fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )

vfdev's avatar
vfdev committed
111
112
113
114
115
116
117
118
119
120
121
122
123
        sizes = [5, [5, ], [6, 6]]
        padding_configs = [
            {"padding_mode": "constant", "fill": 0},
            {"padding_mode": "constant", "fill": 10},
            {"padding_mode": "constant", "fill": 20},
            {"padding_mode": "edge"},
            {"padding_mode": "reflect"},
        ]

        for size in sizes:
            for padding_config in padding_configs:
                config = dict(padding_config)
                config["size"] = size
124
                self._test_class_op("RandomCrop", config)
vfdev's avatar
vfdev committed
125
126
127
128

    def test_center_crop(self):
        fn_kwargs = {"output_size": (4, 5)}
        meth_kwargs = {"size": (4, 5), }
129
        self._test_op(
vfdev's avatar
vfdev committed
130
131
132
133
            "center_crop", "CenterCrop", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )
        fn_kwargs = {"output_size": (5,)}
        meth_kwargs = {"size": (5, )}
134
        self._test_op(
vfdev's avatar
vfdev committed
135
136
            "center_crop", "CenterCrop", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )
137
        tensor = torch.randint(0, 255, (3, 10, 10), dtype=torch.uint8, device=self.device)
vfdev's avatar
vfdev committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
        # Test torchscript of transforms.CenterCrop with size as int
        f = T.CenterCrop(size=5)
        scripted_fn = torch.jit.script(f)
        scripted_fn(tensor)

        # Test torchscript of transforms.CenterCrop with size as [int, ]
        f = T.CenterCrop(size=[5, ])
        scripted_fn = torch.jit.script(f)
        scripted_fn(tensor)

        # Test torchscript of transforms.CenterCrop with size as tuple
        f = T.CenterCrop(size=(6, 6))
        scripted_fn = torch.jit.script(f)
        scripted_fn(tensor)

153
    def _test_op_list_output(self, func, method, out_length, fn_kwargs=None, meth_kwargs=None):
vfdev's avatar
vfdev committed
154
155
156
157
        if fn_kwargs is None:
            fn_kwargs = {}
        if meth_kwargs is None:
            meth_kwargs = {}
158
        tensor, pil_img = self._create_data(height=20, width=20, device=self.device)
vfdev's avatar
vfdev committed
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
        transformed_t_list = getattr(F, func)(tensor, **fn_kwargs)
        transformed_p_list = getattr(F, func)(pil_img, **fn_kwargs)
        self.assertEqual(len(transformed_t_list), len(transformed_p_list))
        self.assertEqual(len(transformed_t_list), out_length)
        for transformed_tensor, transformed_pil_img in zip(transformed_t_list, transformed_p_list):
            self.compareTensorToPIL(transformed_tensor, transformed_pil_img)

        scripted_fn = torch.jit.script(getattr(F, func))
        transformed_t_list_script = scripted_fn(tensor.detach().clone(), **fn_kwargs)
        self.assertEqual(len(transformed_t_list), len(transformed_t_list_script))
        self.assertEqual(len(transformed_t_list_script), out_length)
        for transformed_tensor, transformed_tensor_script in zip(transformed_t_list, transformed_t_list_script):
            self.assertTrue(transformed_tensor.equal(transformed_tensor_script),
                            msg="{} vs {}".format(transformed_tensor, transformed_tensor_script))

        # test for class interface
        f = getattr(T, method)(**meth_kwargs)
        scripted_fn = torch.jit.script(f)
        output = scripted_fn(tensor)
        self.assertEqual(len(output), len(transformed_t_list_script))

    def test_five_crop(self):
        fn_kwargs = meth_kwargs = {"size": (5,)}
182
        self._test_op_list_output(
vfdev's avatar
vfdev committed
183
184
185
            "five_crop", "FiveCrop", out_length=5, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )
        fn_kwargs = meth_kwargs = {"size": [5, ]}
186
        self._test_op_list_output(
vfdev's avatar
vfdev committed
187
188
189
            "five_crop", "FiveCrop", out_length=5, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )
        fn_kwargs = meth_kwargs = {"size": (4, 5)}
190
        self._test_op_list_output(
vfdev's avatar
vfdev committed
191
192
193
            "five_crop", "FiveCrop", out_length=5, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )
        fn_kwargs = meth_kwargs = {"size": [4, 5]}
194
        self._test_op_list_output(
vfdev's avatar
vfdev committed
195
196
197
198
199
            "five_crop", "FiveCrop", out_length=5, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )

    def test_ten_crop(self):
        fn_kwargs = meth_kwargs = {"size": (5,)}
200
        self._test_op_list_output(
vfdev's avatar
vfdev committed
201
202
203
            "ten_crop", "TenCrop", out_length=10, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )
        fn_kwargs = meth_kwargs = {"size": [5, ]}
204
        self._test_op_list_output(
vfdev's avatar
vfdev committed
205
206
207
            "ten_crop", "TenCrop", out_length=10, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )
        fn_kwargs = meth_kwargs = {"size": (4, 5)}
208
        self._test_op_list_output(
vfdev's avatar
vfdev committed
209
210
211
            "ten_crop", "TenCrop", out_length=10, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )
        fn_kwargs = meth_kwargs = {"size": [4, 5]}
212
        self._test_op_list_output(
vfdev's avatar
vfdev committed
213
214
215
            "ten_crop", "TenCrop", out_length=10, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs
        )

vfdev's avatar
vfdev committed
216
    def test_resize(self):
217
        tensor, _ = self._create_data(height=34, width=36, device=self.device)
vfdev's avatar
vfdev committed
218
219
220
221
222
223
        script_fn = torch.jit.script(F.resize)

        for dt in [None, torch.float32, torch.float64]:
            if dt is not None:
                # This is a trivial cast to float of uint8 data to test all cases
                tensor = tensor.to(dt)
224
            for size in [32, 34, [32, ], [32, 32], (32, 32), [34, 35]]:
vfdev's avatar
vfdev committed
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
                for interpolation in [BILINEAR, BICUBIC, NEAREST]:

                    resized_tensor = F.resize(tensor, size=size, interpolation=interpolation)

                    if isinstance(size, int):
                        script_size = [size, ]
                    else:
                        script_size = size

                    s_resized_tensor = script_fn(tensor, size=script_size, interpolation=interpolation)
                    self.assertTrue(s_resized_tensor.equal(resized_tensor))

                    transform = T.Resize(size=script_size, interpolation=interpolation)
                    resized_tensor = transform(tensor)
                    script_transform = torch.jit.script(transform)
                    s_resized_tensor = script_transform(tensor)
                    self.assertTrue(s_resized_tensor.equal(resized_tensor))

243
    def test_resized_crop(self):
244
        tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
245

246
247
        for scale in [(0.7, 1.2), [0.7, 1.2]]:
            for ratio in [(0.75, 1.333), [0.75, 1.333]]:
248
                for size in [(32, ), [44, ], [32, ], [32, 32], (32, 32), [44, 55]]:
249
250
251
252
253
254
255
256
257
258
259
260
261
                    for interpolation in [NEAREST, BILINEAR, BICUBIC]:
                        transform = T.RandomResizedCrop(
                            size=size, scale=scale, ratio=ratio, interpolation=interpolation
                        )
                        s_transform = torch.jit.script(transform)

                        torch.manual_seed(12)
                        out1 = transform(tensor)
                        torch.manual_seed(12)
                        out2 = s_transform(tensor)
                        self.assertTrue(out1.equal(out2))

    def test_random_affine(self):
262
        tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279

        for shear in [15, 10.0, (5.0, 10.0), [-15, 15], [-10.0, 10.0, -11.0, 11.0]]:
            for scale in [(0.7, 1.2), [0.7, 1.2]]:
                for translate in [(0.1, 0.2), [0.2, 0.1]]:
                    for degrees in [45, 35.0, (-45, 45), [-90.0, 90.0]]:
                        for interpolation in [NEAREST, BILINEAR]:
                            transform = T.RandomAffine(
                                degrees=degrees, translate=translate,
                                scale=scale, shear=shear, resample=interpolation
                            )
                            s_transform = torch.jit.script(transform)

                            torch.manual_seed(12)
                            out1 = transform(tensor)
                            torch.manual_seed(12)
                            out2 = s_transform(tensor)
                            self.assertTrue(out1.equal(out2))
280

281
    def test_random_rotate(self):
282
        tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298

        for center in [(0, 0), [10, 10], None, (56, 44)]:
            for expand in [True, False]:
                for degrees in [45, 35.0, (-45, 45), [-90.0, 90.0]]:
                    for interpolation in [NEAREST, BILINEAR]:
                        transform = T.RandomRotation(
                            degrees=degrees, resample=interpolation, expand=expand, center=center
                        )
                        s_transform = torch.jit.script(transform)

                        torch.manual_seed(12)
                        out1 = transform(tensor)
                        torch.manual_seed(12)
                        out2 = s_transform(tensor)
                        self.assertTrue(out1.equal(out2))

299
    def test_random_perspective(self):
300
        tensor = torch.randint(0, 255, size=(3, 44, 56), dtype=torch.uint8, device=self.device)
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315

        for distortion_scale in np.linspace(0.1, 1.0, num=20):
            for interpolation in [NEAREST, BILINEAR]:
                transform = T.RandomPerspective(
                    distortion_scale=distortion_scale,
                    interpolation=interpolation
                )
                s_transform = torch.jit.script(transform)

                torch.manual_seed(12)
                out1 = transform(tensor)
                torch.manual_seed(12)
                out2 = s_transform(tensor)
                self.assertTrue(out1.equal(out2))

316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
    def test_to_grayscale(self):

        meth_kwargs = {"num_output_channels": 1}
        tol = 1.0 + 1e-10
        self._test_class_op(
            "Grayscale", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max"
        )

        meth_kwargs = {"num_output_channels": 3}
        self._test_class_op(
            "Grayscale", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max"
        )

        meth_kwargs = {}
        self._test_class_op(
            "RandomGrayscale", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max"
        )

334

335
336
337
338
339
340
341
@unittest.skipIf(not torch.cuda.is_available(), reason="Skip if no CUDA device")
class CUDATester(Tester):

    def setUp(self):
        self.device = "cuda"


342
343
if __name__ == '__main__':
    unittest.main()