functional.py 33.7 KB
Newer Older
1
import torch
Tongzhou Wang's avatar
Tongzhou Wang committed
2
import sys
3
import math
4
from PIL import Image, ImageOps, ImageEnhance, __version__ as PILLOW_VERSION
5
6
7
8
9
try:
    import accimage
except ImportError:
    accimage = None
import numpy as np
10
from numpy import sin, cos, tan
11
12
13
14
import numbers
import collections
import warnings

Tongzhou Wang's avatar
Tongzhou Wang committed
15
16
17
18
19
20
21
if sys.version_info < (3, 3):
    Sequence = collections.Sequence
    Iterable = collections.Iterable
else:
    Sequence = collections.abc.Sequence
    Iterable = collections.abc.Iterable

22
23
24
25
26
27
28
29

def _is_pil_image(img):
    if accimage is not None:
        return isinstance(img, (Image.Image, accimage.Image))
    else:
        return isinstance(img, Image.Image)


30
31
32
33
def _is_numpy(img):
    return isinstance(img, np.ndarray)


34
def _is_numpy_image(img):
35
    return img.ndim in {2, 3}
36
37
38
39
40
41
42
43
44
45
46
47
48


def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.

    See ``ToTensor`` for more details.

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
49
    if not(_is_pil_image(pic) or _is_numpy(pic)):
50
51
        raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))

52
53
54
    if _is_numpy(pic) and not _is_numpy_image(pic):
        raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

55
56
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
57
58
59
        if pic.ndim == 2:
            pic = pic[:, :, None]

60
61
        img = torch.from_numpy(pic.transpose((2, 0, 1)))
        # backward compatibility
62
63
64
65
        if isinstance(img, torch.ByteTensor):
            return img.float().div(255)
        else:
            return img
66
67
68
69
70
71
72
73
74
75
76

    if accimage is not None and isinstance(pic, accimage.Image):
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
        pic.copyto(nppic)
        return torch.from_numpy(nppic)

    # handle PIL Image
    if pic.mode == 'I':
        img = torch.from_numpy(np.array(pic, np.int32, copy=False))
    elif pic.mode == 'I;16':
        img = torch.from_numpy(np.array(pic, np.int16, copy=False))
77
78
    elif pic.mode == 'F':
        img = torch.from_numpy(np.array(pic, np.float32, copy=False))
79
80
    elif pic.mode == '1':
        img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
81
82
    else:
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
83
84

    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
85
    # put it from HWC to CHW format
86
    img = img.permute((2, 0, 1)).contiguous()
87
88
89
90
91
92
93
94
95
    if isinstance(img, torch.ByteTensor):
        return img.float().div(255)
    else:
        return img


def to_pil_image(pic, mode=None):
    """Convert a tensor or an ndarray to PIL Image.

96
    See :class:`~torchvision.transforms.ToPILImage` for more details.
97
98
99
100
101

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

102
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
103
104
105
106

    Returns:
        PIL Image: Image converted to PIL Image.
    """
Varun Agrawal's avatar
Varun Agrawal committed
107
    if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
108
109
        raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))

Varun Agrawal's avatar
Varun Agrawal committed
110
111
112
113
114
115
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
116
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
117
118
119
120
121
122
123
124
125

    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

126
    npimg = pic
127
    if isinstance(pic, torch.FloatTensor) and mode != 'F':
128
        pic = pic.mul(255).byte()
Varun Agrawal's avatar
Varun Agrawal committed
129
    if isinstance(pic, torch.Tensor):
130
131
132
133
134
135
136
137
138
139
140
        npimg = np.transpose(pic.numpy(), (1, 2, 0))

    if not isinstance(npimg, np.ndarray):
        raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
                        'not {}'.format(type(npimg)))

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
            expected_mode = 'L'
vfdev's avatar
vfdev committed
141
        elif npimg.dtype == np.int16:
142
            expected_mode = 'I;16'
vfdev's avatar
vfdev committed
143
        elif npimg.dtype == np.int32:
144
145
146
147
148
149
150
151
            expected_mode = 'I'
        elif npimg.dtype == np.float32:
            expected_mode = 'F'
        if mode is not None and mode != expected_mode:
            raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
                             .format(mode, np.dtype, expected_mode))
        mode = expected_mode

surgan12's avatar
surgan12 committed
152
153
154
155
156
157
158
159
    elif npimg.shape[2] == 2:
        permitted_2_channel_modes = ['LA']
        if mode is not None and mode not in permitted_2_channel_modes:
            raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'LA'

160
    elif npimg.shape[2] == 4:
surgan12's avatar
surgan12 committed
161
        permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
        if mode is not None and mode not in permitted_4_channel_modes:
            raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGBA'
    else:
        permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
        if mode is not None and mode not in permitted_3_channel_modes:
            raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGB'

    if mode is None:
        raise TypeError('Input type {} is not supported'.format(npimg.dtype))

    return Image.fromarray(npimg, mode=mode)


surgan12's avatar
surgan12 committed
180
def normalize(tensor, mean, std, inplace=False):
181
182
    """Normalize a tensor image with mean and standard deviation.

183
    .. note::
surgan12's avatar
surgan12 committed
184
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
185

186
    See :class:`~torchvision.transforms.Normalize` for more details.
187
188
189
190

    Args:
        tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
        mean (sequence): Sequence of means for each channel.
191
        std (sequence): Sequence of standard deviations for each channel.
192
        inplace(bool,optional): Bool to make this operation inplace.
193
194
195
196

    Returns:
        Tensor: Normalized Tensor image.
    """
197
198
199
200
201
202
    if not torch.is_tensor(tensor):
        raise TypeError('tensor should be a torch tensor. Got {}.'.format(type(tensor)))
    
    if tensor.ndimension() != 3:
        raise ValueError('Expected tensor to be a tensor image of size (C, H, W). Got tensor.size() = '
                         '{}.'.format(tensor.size()))
203

surgan12's avatar
surgan12 committed
204
205
206
    if not inplace:
        tensor = tensor.clone()

207
208
209
    dtype = tensor.dtype
    mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
210
211
    if (std == 0).any():
        raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
212
213
214
215
216
    if mean.ndim == 1:
        mean = mean[:, None, None]
    if std.ndim == 1:
        std = std[:, None, None]
    tensor.sub_(mean).div_(std)
217
    return tensor
218
219
220


def resize(img, size, interpolation=Image.BILINEAR):
221
    r"""Resize the input PIL Image to the given size.
222
223
224
225
226
227
228

    Args:
        img (PIL Image): Image to be resized.
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
            the smaller edge of the image will be matched to this number maintaing
            the aspect ratio. i.e, if height > width, then image will be rescaled to
229
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
230
231
232
233
234
235
236
237
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``

    Returns:
        PIL Image: Resized image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
Tongzhou Wang's avatar
Tongzhou Wang committed
238
    if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
        raise TypeError('Got inappropriate size arg: {}'.format(size))

    if isinstance(size, int):
        w, h = img.size
        if (w <= h and w == size) or (h <= w and h == size):
            return img
        if w < h:
            ow = size
            oh = int(size * h / w)
            return img.resize((ow, oh), interpolation)
        else:
            oh = size
            ow = int(size * w / h)
            return img.resize((ow, oh), interpolation)
    else:
        return img.resize(size[::-1], interpolation)


def scale(*args, **kwargs):
    warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                  "please use transforms.Resize instead.")
    return resize(*args, **kwargs)


263
def pad(img, padding, fill=0, padding_mode='constant'):
264
    r"""Pad the given PIL Image on all sides with specified padding mode and fill value.
265
266
267
268
269
270
271
272

    Args:
        img (PIL Image): Image to be padded.
        padding (int or tuple): Padding on each border. If a single int is provided this
            is used to pad all borders. If tuple of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple of length 4 is provided
            this is the padding for the left, top, right and bottom borders
            respectively.
273
        fill: Pixel fill value for constant fill. Default is 0. If a tuple of
274
            length 3, it is used to fill R, G, B channels respectively.
275
276
            This value is only used when the padding_mode is constant
        padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
277
278
279
280
281
282
283
284
285
286
287
288
289
290

            - constant: pads with a constant value, this value is specified with fill

            - edge: pads with the last value on the edge of the image

            - reflect: pads with reflection of image (without repeating the last value on the edge)

                       padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
                       will result in [3, 2, 1, 2, 3, 4, 3, 2]

            - symmetric: pads with reflection of image (repeating the last value on the edge)

                         padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
                         will result in [2, 1, 1, 2, 3, 4, 4, 3]
291
292
293
294
295
296
297
298
299
300
301

    Returns:
        PIL Image: Padded image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if not isinstance(padding, (numbers.Number, tuple)):
        raise TypeError('Got inappropriate padding arg')
    if not isinstance(fill, (numbers.Number, str, tuple)):
        raise TypeError('Got inappropriate fill arg')
302
303
    if not isinstance(padding_mode, str):
        raise TypeError('Got inappropriate padding_mode arg')
304

Tongzhou Wang's avatar
Tongzhou Wang committed
305
    if isinstance(padding, Sequence) and len(padding) not in [2, 4]:
306
307
308
        raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
                         "{} element tuple".format(len(padding)))

309
310
311
312
    assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
        'Padding mode should be either constant, edge, reflect or symmetric'

    if padding_mode == 'constant':
surgan12's avatar
surgan12 committed
313
314
315
316
317
318
        if img.mode == 'P':
            palette = img.getpalette()
            image = ImageOps.expand(img, border=padding, fill=fill)
            image.putpalette(palette)
            return image

319
320
321
322
        return ImageOps.expand(img, border=padding, fill=fill)
    else:
        if isinstance(padding, int):
            pad_left = pad_right = pad_top = pad_bottom = padding
Tongzhou Wang's avatar
Tongzhou Wang committed
323
        if isinstance(padding, Sequence) and len(padding) == 2:
324
325
            pad_left = pad_right = padding[0]
            pad_top = pad_bottom = padding[1]
Tongzhou Wang's avatar
Tongzhou Wang committed
326
        if isinstance(padding, Sequence) and len(padding) == 4:
327
328
329
330
331
            pad_left = padding[0]
            pad_top = padding[1]
            pad_right = padding[2]
            pad_bottom = padding[3]

surgan12's avatar
surgan12 committed
332
333
334
335
336
337
338
339
        if img.mode == 'P':
            palette = img.getpalette()
            img = np.asarray(img)
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
            img = Image.fromarray(img)
            img.putpalette(palette)
            return img

340
341
342
343
344
345
346
347
348
        img = np.asarray(img)
        # RGB image
        if len(img.shape) == 3:
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
        # Grayscale image
        if len(img.shape) == 2:
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)

        return Image.fromarray(img)
349
350


351
def crop(img, top, left, height, width):
352
    """Crop the given PIL Image.
353
    
354
    Args:
355
356
357
358
359
        img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
360
361
362
363
364
365
    Returns:
        PIL Image: Cropped image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

366
    return img.crop((left, top, left + width, top + height))
367
368
369


def center_crop(img, output_size):
370
371
372
373
374
375
376
377
378
    """Crop the given PIL Image and resize it to desired size.

        Args:
            img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
            output_size (sequence or int): (height, width) of the crop box. If int,
                it is used for both directions
        Returns:
            PIL Image: Cropped image.
        """
379
380
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
381
382
383
384
385
    image_width, image_height = img.size
    crop_height, crop_width = output_size
    crop_top = int(round((image_height - crop_height) / 2.))
    crop_left = int(round((image_width - crop_width) / 2.))
    return crop(img, crop_top, crop_left, crop_height, crop_width)
386
387


388
def resized_crop(img, top, left, height, width, size, interpolation=Image.BILINEAR):
389
390
    """Crop the given PIL Image and resize it to desired size.

391
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
392
393

    Args:
394
395
396
397
398
        img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
399
        size (sequence or int): Desired output size. Same semantics as ``resize``.
400
401
402
403
404
405
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``.
    Returns:
        PIL Image: Cropped image.
    """
    assert _is_pil_image(img), 'img should be PIL Image'
406
    img = crop(img, top, left, height, width)
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
    img = resize(img, size, interpolation)
    return img


def hflip(img):
    """Horizontally flip the given PIL Image.

    Args:
        img (PIL Image): Image to be flipped.

    Returns:
        PIL Image:  Horizontall flipped image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    return img.transpose(Image.FLIP_LEFT_RIGHT)


426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
def _parse_fill(fill, img, min_pil_version):
    """Helper function to get the fill color for rotate and perspective transforms.

    Args:
        fill (n-tuple or int or float): Pixel fill value for area outside the transformed
            image. If int or float, the value is used for all bands respectively.
            Defaults to 0 for all bands.
        img (PIL Image): Image to be filled.
        min_pil_version (str): The minimum PILLOW version for when the ``fillcolor`` option 
            was first introduced in the calling function. (e.g. rotate->5.2.0, perspective->5.0.0)

    Returns:
        dict: kwarg for ``fillcolor``
    """
    if PILLOW_VERSION < min_pil_version:
        if fill is None:
            return {}
        else:
            msg = ("The option to fill background area of the transformed image, "
                   "requires pillow>={}")
            raise RuntimeError(msg.format(min_pil_version))

    num_bands = len(img.getbands())
    if fill is None:
        fill = 0
    if isinstance(fill, (int, float)) and num_bands > 1:
        fill = tuple([fill] * num_bands)
    if not isinstance(fill, (int, float)) and len(fill) != num_bands:
        msg = ("The number of elements in 'fill' does not match the number of "
               "bands of the image ({} != {})")
        raise ValueError(msg.format(len(fill), num_bands))

    return {"fillcolor": fill}


461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
def _get_perspective_coeffs(startpoints, endpoints):
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

    In Perspective Transform each pixel (x, y) in the orignal image gets transformed as,
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
        List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
        List containing [top-left, top-right, bottom-right, bottom-left] of the transformed
                   image
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
    matrix = []

    for p1, p2 in zip(endpoints, startpoints):
        matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])

    A = torch.tensor(matrix, dtype=torch.float)
    B = torch.tensor(startpoints, dtype=torch.float).view(8)
482
    res = torch.lstsq(B, A)[0]
483
484
485
    return res.squeeze_(1).tolist()


486
def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC, fill=None):
487
488
489
490
    """Perform perspective transform of the given PIL Image.

    Args:
        img (PIL Image): Image to be transformed.
491
492
        startpoints: List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image
        endpoints: List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image
493
        interpolation: Default- Image.BICUBIC
494
495
496
497
        fill (n-tuple or int or float): Pixel fill value for area outside the rotated
            image. If int or float, the value is used for all bands respectively.
            This option is only available for ``pillow>=5.0.0``.

498
499
500
    Returns:
        PIL Image:  Perspectively transformed Image.
    """
501

502
503
504
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

505
506
    opts = _parse_fill(fill, img, '5.0.0')

507
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
508
    return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation, **opts)
509
510


511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
def vflip(img):
    """Vertically flip the given PIL Image.

    Args:
        img (PIL Image): Image to be flipped.

    Returns:
        PIL Image:  Vertically flipped image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    return img.transpose(Image.FLIP_TOP_BOTTOM)


def five_crop(img, size):
    """Crop the given PIL Image into four corners and the central crop.

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
       size (sequence or int): Desired output size of the crop. If size is an
           int instead of sequence like (h, w), a square crop (size, size) is
           made.
537

538
    Returns:
539
540
       tuple: tuple (tl, tr, bl, br, center)
                Corresponding top left, top right, bottom left, bottom right and center crop.
541
542
543
544
545
546
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
    else:
        assert len(size) == 2, "Please provide only two dimensions (h, w) for size."

547
548
549
550
551
552
553
554
555
556
557
558
    image_width, image_height = img.size
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

    tl = img.crop((0, 0, crop_width, crop_height))
    tr = img.crop((image_width - crop_width, 0, image_width, crop_height))
    bl = img.crop((0, image_height - crop_height, crop_width, image_height))
    br = img.crop((image_width - crop_width, image_height - crop_height,
                   image_width, image_height))
    center = center_crop(img, (crop_height, crop_width))
559
560
561
562
    return (tl, tr, bl, br, center)


def ten_crop(img, size, vertical_flip=False):
563
564
    r"""Crop the given PIL Image into four corners and the central crop plus the
        flipped version of these (horizontal flipping is used by default).
565
566
567
568
569

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

570
571
572
573
574
575
576
577
578
579
    Args:
       size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
            made.
       vertical_flip (bool): Use vertical flipping instead of horizontal

    Returns:
       tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
                Corresponding top left, top right, bottom left, bottom right and center crop
                and same for the flipped image.
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
    else:
        assert len(size) == 2, "Please provide only two dimensions (h, w) for size."

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.

    Args:
        img (PIL Image): PIL Image to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
        PIL Image: Brightness adjusted image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Brightness(img)
    img = enhancer.enhance(brightness_factor)
    return img


def adjust_contrast(img, contrast_factor):
    """Adjust contrast of an Image.

    Args:
        img (PIL Image): PIL Image to be adjusted.
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
        PIL Image: Contrast adjusted image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Contrast(img)
    img = enhancer.enhance(contrast_factor)
    return img


def adjust_saturation(img, saturation_factor):
    """Adjust color saturation of an image.

    Args:
        img (PIL Image): PIL Image to be adjusted.
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
        PIL Image: Saturation adjusted image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Color(img)
    img = enhancer.enhance(saturation_factor)
    return img


def adjust_hue(img, hue_factor):
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

667
668
669
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704

    Args:
        img (PIL Image): PIL Image to be adjusted.
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
        PIL Image: Hue adjusted image.
    """
    if not(-0.5 <= hue_factor <= 0.5):
        raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))

    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    input_mode = img.mode
    if input_mode in {'L', '1', 'I', 'F'}:
        return img

    h, s, v = img.convert('HSV').split()

    np_h = np.array(h, dtype=np.uint8)
    # uint8 addition take cares of rotation across boundaries
    with np.errstate(over='ignore'):
        np_h += np.uint8(hue_factor * 255)
    h = Image.fromarray(np_h, 'L')

    img = Image.merge('HSV', (h, s, v)).convert(input_mode)
    return img


def adjust_gamma(img, gamma, gain=1):
705
    r"""Perform gamma correction on an image.
706
707
708
709

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

710
711
712
713
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
714

715
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
716
717
718

    Args:
        img (PIL Image): PIL Image to be adjusted.
719
720
721
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
722
723
724
725
726
727
728
729
730
731
732
        gain (float): The constant multiplier.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if gamma < 0:
        raise ValueError('Gamma should be a non-negative real number')

    input_mode = img.mode
    img = img.convert('RGB')

733
734
    gamma_map = [255 * gain * pow(ele / 255., gamma) for ele in range(256)] * 3
    img = img.point(gamma_map)  # use PIL's point-function to accelerate this part
735

736
    img = img.convert(input_mode)
737
    return img
738
739


Philip Meier's avatar
Philip Meier committed
740
def rotate(img, angle, resample=False, expand=False, center=None, fill=None):
741
    """Rotate the image by angle.
742
743
744
745


    Args:
        img (PIL Image): PIL Image to be rotated.
746
747
748
749
        angle (float or int): In degrees degrees counter clockwise order.
        resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
            An optional resampling filter. See `filters`_ for more information.
            If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
750
751
752
753
754
755
756
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
        center (2-tuple, optional): Optional center of rotation.
            Origin is the upper left corner.
            Default is the center of the image.
Philip Meier's avatar
Philip Meier committed
757
758
759
        fill (n-tuple or int or float): Pixel fill value for area outside the rotated
            image. If int or float, the value is used for all bands respectively.
            Defaults to 0 for all bands. This option is only available for ``pillow>=5.2.0``.
760

761
    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
762

763
764
765
766
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

767
    opts = _parse_fill(fill, img, '5.2.0')
768

Philip Meier's avatar
Philip Meier committed
769
    return img.rotate(angle, resample, expand, center, **opts)
770
771


772
773
774
775
776
777
778
779
def _get_inverse_affine_matrix(center, angle, translate, scale, shear):
    # Helper method to compute inverse matrix for affine transformation

    # As it is explained in PIL.Image.rotate
    # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
    #       RSS is rotation with scale and shear matrix
780
781
782
783
784
785
786
787
788
789
    #       RSS(a, s, (sx, sy)) =
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ]
    #         [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ]
    #         [ 0                    , 0                                      , 1 ]
    #
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
790
791
    # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1

792
    if isinstance(shear, numbers.Number):
ptrblck's avatar
ptrblck committed
793
        shear = [shear, 0]
794
795

    if not isinstance(shear, (tuple, list)) and len(shear) == 2:
ptrblck's avatar
ptrblck committed
796
797
798
        raise ValueError(
            "Shear should be a single value or a tuple/list containing " +
            "two values. Got {}".format(shear))
799
800
801
802
803
804
805
806
807
808
809
810

    rot = math.radians(angle)
    sx, sy = [math.radians(s) for s in shear]

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
    a = cos(rot - sy) / cos(sy)
    b = -cos(rot - sy) * tan(sx) / cos(sy) - sin(rot)
    c = sin(rot - sy) / cos(sy)
    d = -sin(rot - sy) * tan(sx) / cos(sy) + cos(rot)
811
812

    # Inverted rotation matrix with scale and shear
813
814
815
816
    # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
    M = [d, -b, 0,
         -c, a, 0]
    M = [x / scale for x in M]
817
818

    # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
819
820
    M[2] += M[0] * (-cx - tx) + M[1] * (-cy - ty)
    M[5] += M[3] * (-cx - tx) + M[4] * (-cy - ty)
821
822

    # Apply center translation: C * RSS^-1 * C^-1 * T^-1
823
824
825
    M[2] += cx
    M[5] += cy
    return M
826
827
828
829
830
831
832


def affine(img, angle, translate, scale, shear, resample=0, fillcolor=None):
    """Apply affine transformation on the image keeping image center invariant

    Args:
        img (PIL Image): PIL Image to be rotated.
833
        angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.
834
835
        translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
        scale (float): overall scale
ptrblck's avatar
ptrblck committed
836
837
838
        shear (float or tuple or list): shear angle value in degrees between -180 to 180, clockwise direction.
        If a tuple of list is specified, the first value corresponds to a shear parallel to the x axis, while
        the second value corresponds to a shear parallel to the y axis.
839
        resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
840
            An optional resampling filter.
841
842
            See `filters`_ for more information.
            If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
843
        fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
844
845
846
847
848
849
850
851
852
853
854
855
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
        "Argument translate should be a list or tuple of length 2"

    assert scale > 0.0, "Argument scale should be positive"

    output_size = img.size
    center = (img.size[0] * 0.5 + 0.5, img.size[1] * 0.5 + 0.5)
    matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
856
    kwargs = {"fillcolor": fillcolor} if PILLOW_VERSION[0] >= '5' else {}
857
    return img.transform(output_size, Image.AFFINE, matrix, resample, **kwargs)
858
859


860
861
862
863
864
865
866
def to_grayscale(img, num_output_channels=1):
    """Convert image to grayscale version of image.

    Args:
        img (PIL Image): Image to be converted to grayscale.

    Returns:
867
868
869
870
        PIL Image: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel

            if num_output_channels = 3 : returned image is 3 channel with r = g = b
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if num_output_channels == 1:
        img = img.convert('L')
    elif num_output_channels == 3:
        img = img.convert('L')
        np_img = np.array(img, dtype=np.uint8)
        np_img = np.dstack([np_img, np_img, np_img])
        img = Image.fromarray(np_img, 'RGB')
    else:
        raise ValueError('num_output_channels should be either 1 or 3')

    return img
886
887


888
def erase(img, i, j, h, w, v, inplace=False):
889
890
891
892
893
894
895
896
897
    """ Erase the input Tensor Image with given value.

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
Zhun Zhong's avatar
Zhun Zhong committed
898
        inplace(bool, optional): For in-place operations. By default is set False.
899
900
901
902
903
904
905

    Returns:
        Tensor Image: Erased image.
    """
    if not isinstance(img, torch.Tensor):
        raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))

906
907
908
    if not inplace:
        img = img.clone()

909
910
    img[:, i:i + h, j:j + w] = v
    return img