test_photometric.py 18.1 KB
Newer Older
1
# Copyright (c) OpenMMLab. All rights reserved.
Kai Chen's avatar
Kai Chen committed
2
3
4
5
import os.path as osp

import cv2
import numpy as np
yamengxi's avatar
yamengxi committed
6
import pytest
Kai Chen's avatar
Kai Chen committed
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from numpy.testing import assert_array_equal

import mmcv


class TestPhotometric:

    @classmethod
    def setup_class(cls):
        # the test img resolution is 400x300
        cls.img_path = osp.join(osp.dirname(__file__), '../data/color.jpg')
        cls.img = cv2.imread(cls.img_path)
        cls.mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
        cls.std = np.array([58.395, 57.12, 57.375], dtype=np.float32)

    def test_imnormalize(self):
        rgb_img = self.img[:, :, ::-1]
        baseline = (rgb_img - self.mean) / self.std
        img = mmcv.imnormalize(self.img, self.mean, self.std)
        assert np.allclose(img, baseline)
        assert id(img) != id(self.img)
        img = mmcv.imnormalize(rgb_img, self.mean, self.std, to_rgb=False)
        assert np.allclose(img, baseline)
        assert id(img) != id(rgb_img)

    def test_imnormalize_(self):
        img_for_normalize = np.float32(self.img)
        rgb_img_for_normalize = np.float32(self.img[:, :, ::-1])
        baseline = (rgb_img_for_normalize - self.mean) / self.std
        img = mmcv.imnormalize_(img_for_normalize, self.mean, self.std)
        assert np.allclose(img_for_normalize, baseline)
        assert id(img) == id(img_for_normalize)
        img = mmcv.imnormalize_(
            rgb_img_for_normalize, self.mean, self.std, to_rgb=False)
        assert np.allclose(img, baseline)
        assert id(img) == id(rgb_img_for_normalize)

    def test_imdenormalize(self):
        norm_img = (self.img[:, :, ::-1] - self.mean) / self.std
        rgb_baseline = (norm_img * self.std + self.mean)
        bgr_baseline = rgb_baseline[:, :, ::-1]
        img = mmcv.imdenormalize(norm_img, self.mean, self.std)
        assert np.allclose(img, bgr_baseline)
        img = mmcv.imdenormalize(norm_img, self.mean, self.std, to_bgr=False)
        assert np.allclose(img, rgb_baseline)

    def test_iminvert(self):
        img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
                       dtype=np.uint8)
        img_r = np.array([[255, 127, 0], [254, 128, 1], [253, 126, 2]],
                         dtype=np.uint8)
        assert_array_equal(mmcv.iminvert(img), img_r)

    def test_solarize(self):
        img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
                       dtype=np.uint8)
        img_r = np.array([[0, 127, 0], [1, 127, 1], [2, 126, 2]],
                         dtype=np.uint8)
        assert_array_equal(mmcv.solarize(img), img_r)
        img_r = np.array([[0, 127, 0], [1, 128, 1], [2, 126, 2]],
                         dtype=np.uint8)
        assert_array_equal(mmcv.solarize(img, 100), img_r)

    def test_posterize(self):
        img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
                       dtype=np.uint8)
        img_r = np.array([[0, 128, 128], [0, 0, 128], [0, 128, 128]],
                         dtype=np.uint8)
        assert_array_equal(mmcv.posterize(img, 1), img_r)
        img_r = np.array([[0, 128, 224], [0, 96, 224], [0, 128, 224]],
                         dtype=np.uint8)
        assert_array_equal(mmcv.posterize(img, 3), img_r)
79

80
    def test_adjust_color(self, nb_rand_test=100):
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
        img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
                       dtype=np.uint8)
        img = np.stack([img, img, img], axis=-1)
        assert_array_equal(mmcv.adjust_color(img), img)
        img_gray = mmcv.bgr2gray(img)
        img_r = np.stack([img_gray, img_gray, img_gray], axis=-1)
        assert_array_equal(mmcv.adjust_color(img, 0), img_r)
        assert_array_equal(mmcv.adjust_color(img, 0, 1), img_r)
        assert_array_equal(
            mmcv.adjust_color(img, 0.5, 0.5),
            np.round(np.clip((img * 0.5 + img_r * 0.5), 0,
                             255)).astype(img.dtype))
        assert_array_equal(
            mmcv.adjust_color(img, 1, 1.5),
            np.round(np.clip(img * 1 + img_r * 1.5, 0, 255)).astype(img.dtype))
        assert_array_equal(
            mmcv.adjust_color(img, 0.8, -0.6, gamma=2),
            np.round(np.clip(img * 0.8 - 0.6 * img_r + 2, 0,
                             255)).astype(img.dtype))
        assert_array_equal(
            mmcv.adjust_color(img, 0.8, -0.6, gamma=-0.6),
            np.round(np.clip(img * 0.8 - 0.6 * img_r - 0.6, 0,
                             255)).astype(img.dtype))

        # test float type of image
        img = img.astype(np.float32)
        assert_array_equal(
            np.round(mmcv.adjust_color(img, 0.8, -0.6, gamma=-0.6)),
            np.round(np.clip(img * 0.8 - 0.6 * img_r - 0.6, 0, 255)))
110

111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
        # test equalize with randomly sampled image.
        for _ in range(nb_rand_test):
            img = np.clip(np.random.normal(0, 1, (256, 256, 3)) * 260, 0,
                          255).astype(np.uint8)
            factor = np.random.uniform()
            cv2_img = mmcv.adjust_color(img, alpha=factor)
            pil_img = mmcv.adjust_color(img, alpha=factor, backend='pillow')
            np.testing.assert_allclose(cv2_img, pil_img, rtol=0, atol=2)

        # the input type must be uint8 for pillow backend
        with pytest.raises(AssertionError):
            mmcv.adjust_color(img.astype(np.float32), backend='pillow')

        # backend must be 'cv2' or 'pillow'
        with pytest.raises(ValueError):
            mmcv.adjust_color(img.astype(np.uint8), backend='not support')

128
129
130
131
    def test_imequalize(self, nb_rand_test=100):

        def _imequalize(img):
            # equalize the image using PIL.ImageOps.equalize
132
            from PIL import Image, ImageOps
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
            img = Image.fromarray(img)
            equalized_img = np.asarray(ImageOps.equalize(img))
            return equalized_img

        img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
                       dtype=np.uint8)
        img = np.stack([img, img, img], axis=-1)
        equalized_img = mmcv.imequalize(img)
        assert_array_equal(equalized_img, _imequalize(img))

        # test equalize with case step=0
        img = np.array([[0, 0, 0], [120, 120, 120], [255, 255, 255]],
                       dtype=np.uint8)
        img = np.stack([img, img, img], axis=-1)
        assert_array_equal(mmcv.imequalize(img), img)

        # test equalize with randomly sampled image.
        for _ in range(nb_rand_test):
151
152
            img = np.clip(np.random.normal(0, 1, (256, 256, 3)) * 260, 0,
                          255).astype(np.uint8)
153
154
            equalized_img = mmcv.imequalize(img)
            assert_array_equal(equalized_img, _imequalize(img))
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169

    def test_adjust_brightness(self, nb_rand_test=100):

        img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
                       dtype=np.uint8)
        img = np.stack([img, img, img], axis=-1)
        # test case with factor 1.0
        assert_array_equal(mmcv.adjust_brightness(img, 1.), img)
        # test case with factor 0.0
        assert_array_equal(mmcv.adjust_brightness(img, 0.), np.zeros_like(img))
        # test adjust_brightness with randomly sampled images and factors.
        for _ in range(nb_rand_test):
            img = np.clip(
                np.random.uniform(0, 1, (1000, 1200, 3)) * 260, 0,
                255).astype(np.uint8)
170
            factor = np.random.uniform() + np.random.choice([0, 1])
171
172
            np.testing.assert_allclose(
                mmcv.adjust_brightness(img, factor).astype(np.int32),
173
174
                mmcv.adjust_brightness(img, factor,
                                       backend='pillow').astype(np.int32),
175
176
177
                rtol=0,
                atol=1)

178
179
180
        # the input type must be uint8 for pillow backend
        with pytest.raises(AssertionError):
            mmcv.adjust_brightness(img.astype(np.float32), backend='pillow')
181

182
183
184
        # backend must be 'cv2' or 'pillow'
        with pytest.raises(ValueError):
            mmcv.adjust_brightness(img.astype(np.uint8), backend='not support')
185

186
    def test_adjust_contrast(self, nb_rand_test=100):
187
188
189
190
191
192
193
194

        img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
                       dtype=np.uint8)
        img = np.stack([img, img, img], axis=-1)
        # test case with factor 1.0
        assert_array_equal(mmcv.adjust_contrast(img, 1.), img)
        # test case with factor 0.0
        assert_array_equal(
195
196
            mmcv.adjust_contrast(img, 0.),
            mmcv.adjust_contrast(img, 0., backend='pillow'))
197
198
199
200
201
        # test adjust_contrast with randomly sampled images and factors.
        for _ in range(nb_rand_test):
            img = np.clip(
                np.random.uniform(0, 1, (1200, 1000, 3)) * 260, 0,
                255).astype(np.uint8)
202
            factor = np.random.uniform() + np.random.choice([0, 1])
203
204
205
206
207
            # Note the gap (less_equal 1) between PIL.ImageEnhance.Contrast
            # and mmcv.adjust_contrast comes from the gap that converts from
            # a color image to gray image using mmcv or PIL.
            np.testing.assert_allclose(
                mmcv.adjust_contrast(img, factor).astype(np.int32),
208
209
                mmcv.adjust_contrast(img, factor,
                                     backend='pillow').astype(np.int32),
210
211
                rtol=0,
                atol=1)
yamengxi's avatar
yamengxi committed
212

213
214
215
216
217
218
219
220
        # the input type must be uint8 pillow backend
        with pytest.raises(AssertionError):
            mmcv.adjust_contrast(img.astype(np.float32), backend='pillow')

        # backend must be 'cv2' or 'pillow'
        with pytest.raises(ValueError):
            mmcv.adjust_contrast(img.astype(np.uint8), backend='not support')

221
222
223
224
    def test_auto_contrast(self, nb_rand_test=100):

        def _auto_contrast(img, cutoff=0):
            from PIL import Image
225
226
            from PIL.ImageOps import autocontrast

227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
            # Image.fromarray defaultly supports RGB, not BGR.
            # convert from BGR to RGB
            img = Image.fromarray(img[..., ::-1], mode='RGB')
            contrasted_img = autocontrast(img, cutoff)
            # convert from RGB to BGR
            return np.asarray(contrasted_img)[..., ::-1]

        img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
                       dtype=np.uint8)
        img = np.stack([img, img, img], axis=-1)

        # test case without cut-off
        assert_array_equal(mmcv.auto_contrast(img), _auto_contrast(img))
        # test case with cut-off as int
        assert_array_equal(
            mmcv.auto_contrast(img, 10), _auto_contrast(img, 10))
        # test case with cut-off as float
        assert_array_equal(
            mmcv.auto_contrast(img, 12.5), _auto_contrast(img, 12.5))
        # test case with cut-off as tuple
        assert_array_equal(
            mmcv.auto_contrast(img, (10, 10)), _auto_contrast(img, 10))
        # test case with cut-off with sum over 100
        assert_array_equal(
            mmcv.auto_contrast(img, 60), _auto_contrast(img, 60))

        # test auto_contrast with randomly sampled images and factors.
        for _ in range(nb_rand_test):
            img = np.clip(
                np.random.uniform(0, 1, (1200, 1000, 3)) * 260, 0,
                255).astype(np.uint8)
            # cut-offs are not set as tuple since in `build.yml`, pillow 6.2.2
            # is installed, which does not support setting low cut-off and high
            #  cut-off differently.
            # With pillow above 8.0.0, cutoff can be set as tuple
            cutoff = np.random.rand() * 100
            assert_array_equal(
                mmcv.auto_contrast(img, cutoff), _auto_contrast(img, cutoff))

LXXXXR's avatar
LXXXXR committed
266
267
268
269
270
271
    def test_adjust_sharpness(self, nb_rand_test=100):

        def _adjust_sharpness(img, factor):
            # adjust the sharpness of image using
            # PIL.ImageEnhance.Sharpness
            from PIL import Image
272
            from PIL.ImageEnhance import Sharpness
LXXXXR's avatar
LXXXXR committed
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
            img = Image.fromarray(img)
            sharpened_img = Sharpness(img).enhance(factor)
            return np.asarray(sharpened_img)

        img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]],
                       dtype=np.uint8)
        img = np.stack([img, img, img], axis=-1)

        # test case with invalid type of kernel
        with pytest.raises(AssertionError):
            mmcv.adjust_sharpness(img, 1., kernel=1.)
        # test case with invalid shape of kernel
        kernel = np.ones((3, 3, 3))
        with pytest.raises(AssertionError):
            mmcv.adjust_sharpness(img, 1., kernel=kernel)
        # test case with all-zero kernel, factor 0.0
        kernel = np.zeros((3, 3))
        assert_array_equal(
            mmcv.adjust_sharpness(img, 0., kernel=kernel), np.zeros_like(img))

        # test case with factor 1.0
        assert_array_equal(mmcv.adjust_sharpness(img, 1.), img)
        # test adjust_sharpness with randomly sampled images and factors.
        for _ in range(nb_rand_test):
            img = np.clip(
                np.random.uniform(0, 1, (1000, 1200, 3)) * 260, 0,
                255).astype(np.uint8)
            factor = np.random.uniform()
            # Note the gap between PIL.ImageEnhance.Sharpness and
            # mmcv.adjust_sharpness mainly comes from the difference ways of
            # handling img edges when applying filters
            np.testing.assert_allclose(
                mmcv.adjust_sharpness(img, factor).astype(np.int32)[1:-1,
                                                                    1:-1],
                _adjust_sharpness(img, factor).astype(np.int32)[1:-1, 1:-1],
                rtol=0,
                atol=1)

311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
    def test_adjust_lighting(self):
        img = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.uint8)
        img = np.stack([img, img, img], axis=-1)

        # eigval and eigvec must be np.ndarray
        with pytest.raises(AssertionError):
            mmcv.adjust_lighting(img, 1, np.ones((3, 1)))
        with pytest.raises(AssertionError):
            mmcv.adjust_lighting(img, np.array([1]), (1, 1, 1))
        # we must have the same number of eigval and eigvec
        with pytest.raises(AssertionError):
            mmcv.adjust_lighting(img, np.array([1]), np.eye(2))
        with pytest.raises(AssertionError):
            mmcv.adjust_lighting(img, np.array([1]), np.array([1]))

        img_adjusted = mmcv.adjust_lighting(
            img,
            np.random.normal(0, 1, 2),
            np.random.normal(0, 1, (3, 2)),
            alphastd=0.)
        assert_array_equal(img_adjusted, img)

yamengxi's avatar
yamengxi committed
333
334
335
    def test_lut_transform(self):
        lut_table = np.array(list(range(256)))

yamengxi's avatar
yamengxi committed
336
337
338
339
340
341
342
343
344
345
346
347
        # test assertion image values should between 0 and 255.
        with pytest.raises(AssertionError):
            mmcv.lut_transform(np.array([256]), lut_table)
        with pytest.raises(AssertionError):
            mmcv.lut_transform(np.array([-1]), lut_table)

        # test assertion lut_table should be ndarray with shape (256, )
        with pytest.raises(AssertionError):
            mmcv.lut_transform(np.array([0]), list(range(256)))
        with pytest.raises(AssertionError):
            mmcv.lut_transform(np.array([1]), np.array(list(range(257))))

yamengxi's avatar
yamengxi committed
348
349
350
351
352
353
        img = mmcv.lut_transform(self.img, lut_table)
        baseline = cv2.LUT(self.img, lut_table)
        assert np.allclose(img, baseline)

        input_img = np.array(
            [[[0, 128, 255], [255, 128, 0]], [[0, 128, 255], [255, 128, 0]]],
354
            dtype=float)
yamengxi's avatar
yamengxi committed
355
356
357
358
359
360
361
362
        img = mmcv.lut_transform(input_img, lut_table)
        baseline = cv2.LUT(np.array(input_img, dtype=np.uint8), lut_table)
        assert np.allclose(img, baseline)

        input_img = np.random.randint(0, 256, size=(7, 8, 9, 10, 11))
        img = mmcv.lut_transform(input_img, lut_table)
        baseline = cv2.LUT(np.array(input_img, dtype=np.uint8), lut_table)
        assert np.allclose(img, baseline)
yamengxi's avatar
yamengxi committed
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396

    def test_clahe(self):

        def _clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)):
            clahe = cv2.createCLAHE(clip_limit, tile_grid_size)
            return clahe.apply(np.array(img, dtype=np.uint8))

        # test assertion image should have the right shape
        with pytest.raises(AssertionError):
            mmcv.clahe(self.img)

        # test assertion tile_grid_size should be a tuple with 2 integers
        with pytest.raises(AssertionError):
            mmcv.clahe(self.img[:, :, 0], tile_grid_size=(8.0, 8.0))
        with pytest.raises(AssertionError):
            mmcv.clahe(self.img[:, :, 0], tile_grid_size=(8, 8, 8))
        with pytest.raises(AssertionError):
            mmcv.clahe(self.img[:, :, 0], tile_grid_size=[8, 8])

        # test with different channels
        for i in range(self.img.shape[-1]):
            img = mmcv.clahe(self.img[:, :, i])
            img_std = _clahe(self.img[:, :, i])
            assert np.allclose(img, img_std)
            assert id(img) != id(self.img[:, :, i])
            assert id(img_std) != id(self.img[:, :, i])

        # test case with clip_limit=1.2
        for i in range(self.img.shape[-1]):
            img = mmcv.clahe(self.img[:, :, i], 1.2)
            img_std = _clahe(self.img[:, :, i], 1.2)
            assert np.allclose(img, img_std)
            assert id(img) != id(self.img[:, :, i])
            assert id(img_std) != id(self.img[:, :, i])
397
398

    def test_adjust_hue(self):
399
        # test case with img is not ndarray
400
401
402
403
404
405
406
407
408
409
410
411
412
        from PIL import Image
        pil_img = Image.fromarray(self.img)

        with pytest.raises(TypeError):
            mmcv.adjust_hue(pil_img, hue_factor=0.0)

        # test case with hue_factor > 0.5 or hue_factor < -0.5
        with pytest.raises(ValueError):
            mmcv.adjust_hue(self.img, hue_factor=-0.6)
        with pytest.raises(ValueError):
            mmcv.adjust_hue(self.img, hue_factor=0.6)

        for i in np.arange(-0.5, 0.5, 0.2):
413
            pil_res = mmcv.adjust_hue(self.img, hue_factor=i, backend='pillow')
414
415
416
            pil_res = np.array(pil_res)
            cv2_res = mmcv.adjust_hue(self.img, hue_factor=i)
            assert np.allclose(pil_res, cv2_res, atol=10.0)
417
418
419
420
421
422
423
424
425
426

        # test pillow backend
        with pytest.raises(AssertionError):
            mmcv.adjust_hue(
                self.img.astype(np.float32), hue_factor=0, backend='pillow')

        # backend must be 'cv2' or 'pillow'
        with pytest.raises(ValueError):
            mmcv.adjust_hue(
                self.img.astype(np.uint8), hue_factor=0, backend='not support')