functional_pil.py 12.4 KB
Newer Older
1
import numbers
2
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
3

vfdev's avatar
vfdev committed
4
import numpy as np
5
import torch
6
from PIL import Image, ImageOps, ImageEnhance
vfdev's avatar
vfdev committed
7

8
9
10
11
12
13
14
try:
    import accimage
except ImportError:
    accimage = None


@torch.jit.unused
vfdev's avatar
vfdev committed
15
def _is_pil_image(img: Any) -> bool:
16
17
18
19
20
21
    if accimage is not None:
        return isinstance(img, (Image.Image, accimage.Image))
    else:
        return isinstance(img, Image.Image)


vfdev's avatar
vfdev committed
22
23
24
25
26
27
28
@torch.jit.unused
def _get_image_size(img: Any) -> List[int]:
    if _is_pil_image(img):
        return img.size
    raise TypeError("Unexpected type {}".format(type(img)))


29
30
31
32
33
34
35
@torch.jit.unused
def _get_image_num_channels(img: Any) -> int:
    if _is_pil_image(img):
        return 1 if img.mode == 'L' else 3
    raise TypeError("Unexpected type {}".format(type(img)))


36
@torch.jit.unused
37
def hflip(img: Image.Image) -> Image.Image:
38
39
40
41
42
43
44
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    return img.transpose(Image.FLIP_LEFT_RIGHT)


@torch.jit.unused
45
def vflip(img: Image.Image) -> Image.Image:
46
47
48
49
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    return img.transpose(Image.FLIP_TOP_BOTTOM)
50
51
52


@torch.jit.unused
53
def adjust_brightness(img: Image.Image, brightness_factor: float) -> Image.Image:
54
55
56
57
58
59
60
61
62
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Brightness(img)
    img = enhancer.enhance(brightness_factor)
    return img


@torch.jit.unused
63
def adjust_contrast(img: Image.Image, contrast_factor: float) -> Image.Image:
64
65
66
67
68
69
70
71
72
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Contrast(img)
    img = enhancer.enhance(contrast_factor)
    return img


@torch.jit.unused
73
def adjust_saturation(img: Image.Image, saturation_factor: float) -> Image.Image:
74
75
76
77
78
79
80
81
82
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Color(img)
    img = enhancer.enhance(saturation_factor)
    return img


@torch.jit.unused
83
def adjust_hue(img: Image.Image, hue_factor: float) -> Image.Image:
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
    if not(-0.5 <= hue_factor <= 0.5):
        raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))

    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    input_mode = img.mode
    if input_mode in {'L', '1', 'I', 'F'}:
        return img

    h, s, v = img.convert('HSV').split()

    np_h = np.array(h, dtype=np.uint8)
    # uint8 addition take cares of rotation across boundaries
    with np.errstate(over='ignore'):
        np_h += np.uint8(hue_factor * 255)
    h = Image.fromarray(np_h, 'L')

    img = Image.merge('HSV', (h, s, v)).convert(input_mode)
    return img
104
105


106
@torch.jit.unused
107
108
109
110
111
112
def adjust_gamma(
    img: Image.Image,
    gamma: float,
    gain: float = 1.0,
) -> Image.Image:

113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if gamma < 0:
        raise ValueError('Gamma should be a non-negative real number')

    input_mode = img.mode
    img = img.convert('RGB')
    gamma_map = [(255 + 1 - 1e-3) * gain * pow(ele / 255., gamma) for ele in range(256)] * 3
    img = img.point(gamma_map)  # use PIL's point-function to accelerate this part

    img = img.convert(input_mode)
    return img


128
@torch.jit.unused
129
130
131
132
133
134
135
def pad(
    img: Image.Image,
    padding: Union[int, List[int], Tuple[int, ...]],
    fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
    padding_mode: str = "constant",
) -> Image.Image:

136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
    if not _is_pil_image(img):
        raise TypeError("img should be PIL Image. Got {}".format(type(img)))

    if not isinstance(padding, (numbers.Number, tuple, list)):
        raise TypeError("Got inappropriate padding arg")
    if not isinstance(fill, (numbers.Number, str, tuple)):
        raise TypeError("Got inappropriate fill arg")
    if not isinstance(padding_mode, str):
        raise TypeError("Got inappropriate padding_mode arg")

    if isinstance(padding, list):
        padding = tuple(padding)

    if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]:
        raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
                         "{} element tuple".format(len(padding)))

    if isinstance(padding, tuple) and len(padding) == 1:
        # Compatibility with `functional_tensor.pad`
        padding = padding[0]

    if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
        raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")

    if padding_mode == "constant":
161
        opts = _parse_fill(fill, img, name="fill")
162
163
        if img.mode == "P":
            palette = img.getpalette()
164
            image = ImageOps.expand(img, border=padding, **opts)
165
166
167
            image.putpalette(palette)
            return image

168
        return ImageOps.expand(img, border=padding, **opts)
169
170
171
172
173
174
175
176
177
178
179
180
    else:
        if isinstance(padding, int):
            pad_left = pad_right = pad_top = pad_bottom = padding
        if isinstance(padding, tuple) and len(padding) == 2:
            pad_left = pad_right = padding[0]
            pad_top = pad_bottom = padding[1]
        if isinstance(padding, tuple) and len(padding) == 4:
            pad_left = padding[0]
            pad_top = padding[1]
            pad_right = padding[2]
            pad_bottom = padding[3]

181
182
183
184
185
186
187
188
189
        p = [pad_left, pad_top, pad_right, pad_bottom]
        cropping = -np.minimum(p, 0)

        if cropping.any():
            crop_left, crop_top, crop_right, crop_bottom = cropping
            img = img.crop((crop_left, crop_top, img.width - crop_right, img.height - crop_bottom))

        pad_left, pad_top, pad_right, pad_bottom = np.maximum(p, 0)

190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
        if img.mode == 'P':
            palette = img.getpalette()
            img = np.asarray(img)
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
            img = Image.fromarray(img)
            img.putpalette(palette)
            return img

        img = np.asarray(img)
        # RGB image
        if len(img.shape) == 3:
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
        # Grayscale image
        if len(img.shape) == 2:
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)

        return Image.fromarray(img)
vfdev's avatar
vfdev committed
207
208
209


@torch.jit.unused
210
211
212
213
214
215
216
217
def crop(
    img: Image.Image,
    top: int,
    left: int,
    height: int,
    width: int,
) -> Image.Image:

vfdev's avatar
vfdev committed
218
219
220
221
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    return img.crop((left, top, left + width, top + height))
vfdev's avatar
vfdev committed
222
223
224


@torch.jit.unused
225
226
227
228
229
230
231
def resize(
    img: Image.Image,
    size: Union[Sequence[int], int],
    interpolation: int = Image.BILINEAR,
    max_size: Optional[int] = None,
) -> Image.Image:

vfdev's avatar
vfdev committed
232
233
234
235
236
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
    if not (isinstance(size, int) or (isinstance(size, Sequence) and len(size) in (1, 2))):
        raise TypeError('Got inappropriate size arg: {}'.format(size))

237
238
239
    if isinstance(size, Sequence) and len(size) == 1:
        size = size[0]
    if isinstance(size, int):
vfdev's avatar
vfdev committed
240
        w, h = img.size
241
242
243

        short, long = (w, h) if w <= h else (h, w)
        if short == size:
vfdev's avatar
vfdev committed
244
            return img
245
246
247
248
249
250
251
252
253
254
255
256
257
258

        new_short, new_long = size, int(size * long / short)

        if max_size is not None:
            if max_size <= size:
                raise ValueError(
                    f"max_size = {max_size} must be strictly greater than the requested "
                    f"size for the smaller edge size = {size}"
                )
            if new_long > max_size:
                new_short, new_long = int(max_size * new_short / new_long), max_size

        new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
        return img.resize((new_w, new_h), interpolation)
vfdev's avatar
vfdev committed
259
    else:
260
261
262
263
264
        if max_size is not None:
            raise ValueError(
                "max_size should only be passed if size specifies the length of the smaller edge, "
                "i.e. size should be an int or a sequence of length 1 in torchscript mode."
            )
vfdev's avatar
vfdev committed
265
        return img.resize(size[::-1], interpolation)
vfdev's avatar
vfdev committed
266
267
268


@torch.jit.unused
269
270
271
272
273
274
def _parse_fill(
    fill: Optional[Union[float, List[float], Tuple[float, ...]]],
    img: Image.Image,
    name: str = "fillcolor",
) -> Dict[str, Optional[Union[float, List[float], Tuple[float, ...]]]]:

275
    # Process fill color for affine transforms
vfdev's avatar
vfdev committed
276
277
278
279
280
    num_bands = len(img.getbands())
    if fill is None:
        fill = 0
    if isinstance(fill, (int, float)) and num_bands > 1:
        fill = tuple([fill] * num_bands)
281
282
283
284
285
286
287
    if isinstance(fill, (list, tuple)):
        if len(fill) != num_bands:
            msg = ("The number of elements in 'fill' does not match the number of "
                   "bands of the image ({} != {})")
            raise ValueError(msg.format(len(fill), num_bands))

        fill = tuple(fill)
vfdev's avatar
vfdev committed
288

289
    return {name: fill}
vfdev's avatar
vfdev committed
290
291
292


@torch.jit.unused
293
294
295
296
297
298
299
def affine(
    img: Image.Image,
    matrix: List[float],
    interpolation: int = Image.NEAREST,
    fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
) -> Image.Image:

vfdev's avatar
vfdev committed
300
301
302
303
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    output_size = img.size
304
    opts = _parse_fill(fill, img)
305
    return img.transform(output_size, Image.AFFINE, matrix, interpolation, **opts)
vfdev's avatar
vfdev committed
306
307
308


@torch.jit.unused
309
310
311
312
313
314
315
316
317
def rotate(
    img: Image.Image,
    angle: float,
    interpolation: int = Image.NEAREST,
    expand: bool = False,
    center: Optional[Tuple[int, int]] = None,
    fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
) -> Image.Image:

vfdev's avatar
vfdev committed
318
319
320
    if not _is_pil_image(img):
        raise TypeError("img should be PIL Image. Got {}".format(type(img)))

321
    opts = _parse_fill(fill, img)
322
    return img.rotate(angle, interpolation, expand, center, **opts)
323
324
325


@torch.jit.unused
326
327
328
329
330
331
332
def perspective(
    img: Image.Image,
    perspective_coeffs: float,
    interpolation: int = Image.BICUBIC,
    fill: Optional[Union[float, List[float], Tuple[float, ...]]] = 0,
) -> Image.Image:

333
334
335
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

336
    opts = _parse_fill(fill, img)
337
338

    return img.transform(img.size, Image.PERSPECTIVE, perspective_coeffs, interpolation, **opts)
339
340
341


@torch.jit.unused
342
def to_grayscale(img: Image.Image, num_output_channels: int) -> Image.Image:
343
344
345
346
347
348
349
350
351
352
353
354
355
356
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if num_output_channels == 1:
        img = img.convert('L')
    elif num_output_channels == 3:
        img = img.convert('L')
        np_img = np.array(img, dtype=np.uint8)
        np_img = np.dstack([np_img, np_img, np_img])
        img = Image.fromarray(np_img, 'RGB')
    else:
        raise ValueError('num_output_channels should be either 1 or 3')

    return img
357
358
359


@torch.jit.unused
360
def invert(img: Image.Image) -> Image.Image:
361
362
363
364
365
366
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
    return ImageOps.invert(img)


@torch.jit.unused
367
def posterize(img: Image.Image, bits: int) -> Image.Image:
368
369
370
371
372
373
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
    return ImageOps.posterize(img, bits)


@torch.jit.unused
374
def solarize(img: Image.Image, threshold: int) -> Image.Image:
375
376
377
378
379
380
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
    return ImageOps.solarize(img, threshold)


@torch.jit.unused
381
def adjust_sharpness(img: Image.Image, sharpness_factor: float) -> Image.Image:
382
383
384
385
386
387
388
389
390
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Sharpness(img)
    img = enhancer.enhance(sharpness_factor)
    return img


@torch.jit.unused
391
def autocontrast(img: Image.Image) -> Image.Image:
392
393
394
395
396
397
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
    return ImageOps.autocontrast(img)


@torch.jit.unused
398
def equalize(img: Image.Image) -> Image.Image:
399
400
401
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
    return ImageOps.equalize(img)