functional.py 35 KB
Newer Older
1
2
import torch
import math
3
from PIL import Image, ImageOps, ImageEnhance, __version__ as PILLOW_VERSION
4
5
6
7
8
try:
    import accimage
except ImportError:
    accimage = None
import numpy as np
9
from numpy import sin, cos, tan
10
import numbers
11
from collections.abc import Sequence, Iterable
12
13
14
15
16
17
18
19
20
21
import warnings


def _is_pil_image(img):
    if accimage is not None:
        return isinstance(img, (Image.Image, accimage.Image))
    else:
        return isinstance(img, Image.Image)


22
23
24
25
def _is_numpy(img):
    return isinstance(img, np.ndarray)


26
def _is_numpy_image(img):
27
    return img.ndim in {2, 3}
28
29
30
31
32
33
34
35
36
37
38
39
40


def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.

    See ``ToTensor`` for more details.

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
41
    if not(_is_pil_image(pic) or _is_numpy(pic)):
42
43
        raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))

44
45
46
    if _is_numpy(pic) and not _is_numpy_image(pic):
        raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

47
48
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
49
50
51
        if pic.ndim == 2:
            pic = pic[:, :, None]

52
53
        img = torch.from_numpy(pic.transpose((2, 0, 1)))
        # backward compatibility
54
55
56
57
        if isinstance(img, torch.ByteTensor):
            return img.float().div(255)
        else:
            return img
58
59
60
61
62
63
64
65
66
67
68

    if accimage is not None and isinstance(pic, accimage.Image):
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
        pic.copyto(nppic)
        return torch.from_numpy(nppic)

    # handle PIL Image
    if pic.mode == 'I':
        img = torch.from_numpy(np.array(pic, np.int32, copy=False))
    elif pic.mode == 'I;16':
        img = torch.from_numpy(np.array(pic, np.int16, copy=False))
69
70
    elif pic.mode == 'F':
        img = torch.from_numpy(np.array(pic, np.float32, copy=False))
71
72
    elif pic.mode == '1':
        img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
73
74
    else:
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
75
76

    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
77
    # put it from HWC to CHW format
78
    img = img.permute((2, 0, 1)).contiguous()
79
80
81
82
83
84
    if isinstance(img, torch.ByteTensor):
        return img.float().div(255)
    else:
        return img


85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
def pil_to_tensor(pic):
    """Convert a ``PIL Image`` to a tensor of the same type.

    See ``AsTensor`` for more details.

    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
    if not(_is_pil_image(pic)):
        raise TypeError('pic should be PIL Image. Got {}'.format(type(pic)))

    if accimage is not None and isinstance(pic, accimage.Image):
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
    img = torch.as_tensor(np.asarray(pic))
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


112
113
114
def to_pil_image(pic, mode=None):
    """Convert a tensor or an ndarray to PIL Image.

115
    See :class:`~torchvision.transforms.ToPILImage` for more details.
116
117
118
119
120

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

121
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
122
123
124
125

    Returns:
        PIL Image: Image converted to PIL Image.
    """
Varun Agrawal's avatar
Varun Agrawal committed
126
    if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
127
128
        raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))

Varun Agrawal's avatar
Varun Agrawal committed
129
130
131
132
133
134
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
135
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
136
137
138
139
140
141
142
143
144

    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

145
    npimg = pic
146
    if isinstance(pic, torch.FloatTensor) and mode != 'F':
147
        pic = pic.mul(255).byte()
Varun Agrawal's avatar
Varun Agrawal committed
148
    if isinstance(pic, torch.Tensor):
149
150
151
152
153
154
155
156
157
158
159
        npimg = np.transpose(pic.numpy(), (1, 2, 0))

    if not isinstance(npimg, np.ndarray):
        raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
                        'not {}'.format(type(npimg)))

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
            expected_mode = 'L'
vfdev's avatar
vfdev committed
160
        elif npimg.dtype == np.int16:
161
            expected_mode = 'I;16'
vfdev's avatar
vfdev committed
162
        elif npimg.dtype == np.int32:
163
164
165
166
167
168
169
170
            expected_mode = 'I'
        elif npimg.dtype == np.float32:
            expected_mode = 'F'
        if mode is not None and mode != expected_mode:
            raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
                             .format(mode, np.dtype, expected_mode))
        mode = expected_mode

surgan12's avatar
surgan12 committed
171
172
173
174
175
176
177
178
    elif npimg.shape[2] == 2:
        permitted_2_channel_modes = ['LA']
        if mode is not None and mode not in permitted_2_channel_modes:
            raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'LA'

179
    elif npimg.shape[2] == 4:
surgan12's avatar
surgan12 committed
180
        permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
        if mode is not None and mode not in permitted_4_channel_modes:
            raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGBA'
    else:
        permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
        if mode is not None and mode not in permitted_3_channel_modes:
            raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGB'

    if mode is None:
        raise TypeError('Input type {} is not supported'.format(npimg.dtype))

    return Image.fromarray(npimg, mode=mode)


surgan12's avatar
surgan12 committed
199
def normalize(tensor, mean, std, inplace=False):
200
201
    """Normalize a tensor image with mean and standard deviation.

202
    .. note::
surgan12's avatar
surgan12 committed
203
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
204

205
    See :class:`~torchvision.transforms.Normalize` for more details.
206
207
208
209

    Args:
        tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
        mean (sequence): Sequence of means for each channel.
210
        std (sequence): Sequence of standard deviations for each channel.
211
        inplace(bool,optional): Bool to make this operation inplace.
212
213
214
215

    Returns:
        Tensor: Normalized Tensor image.
    """
216
217
    if not torch.is_tensor(tensor):
        raise TypeError('tensor should be a torch tensor. Got {}.'.format(type(tensor)))
218

219
220
221
    if tensor.ndimension() != 3:
        raise ValueError('Expected tensor to be a tensor image of size (C, H, W). Got tensor.size() = '
                         '{}.'.format(tensor.size()))
222

surgan12's avatar
surgan12 committed
223
224
225
    if not inplace:
        tensor = tensor.clone()

226
227
228
    dtype = tensor.dtype
    mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
229
230
    if (std == 0).any():
        raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
231
232
233
234
235
    if mean.ndim == 1:
        mean = mean[:, None, None]
    if std.ndim == 1:
        std = std[:, None, None]
    tensor.sub_(mean).div_(std)
236
    return tensor
237
238
239


def resize(img, size, interpolation=Image.BILINEAR):
240
    r"""Resize the input PIL Image to the given size.
241
242
243
244
245
246
247

    Args:
        img (PIL Image): Image to be resized.
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
            the smaller edge of the image will be matched to this number maintaing
            the aspect ratio. i.e, if height > width, then image will be rescaled to
248
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`
249
250
251
252
253
254
255
256
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``

    Returns:
        PIL Image: Resized image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
Tongzhou Wang's avatar
Tongzhou Wang committed
257
    if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)):
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
        raise TypeError('Got inappropriate size arg: {}'.format(size))

    if isinstance(size, int):
        w, h = img.size
        if (w <= h and w == size) or (h <= w and h == size):
            return img
        if w < h:
            ow = size
            oh = int(size * h / w)
            return img.resize((ow, oh), interpolation)
        else:
            oh = size
            ow = int(size * w / h)
            return img.resize((ow, oh), interpolation)
    else:
        return img.resize(size[::-1], interpolation)


def scale(*args, **kwargs):
    warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                  "please use transforms.Resize instead.")
    return resize(*args, **kwargs)


282
def pad(img, padding, fill=0, padding_mode='constant'):
283
    r"""Pad the given PIL Image on all sides with specified padding mode and fill value.
284
285
286
287
288
289
290
291

    Args:
        img (PIL Image): Image to be padded.
        padding (int or tuple): Padding on each border. If a single int is provided this
            is used to pad all borders. If tuple of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple of length 4 is provided
            this is the padding for the left, top, right and bottom borders
            respectively.
292
        fill: Pixel fill value for constant fill. Default is 0. If a tuple of
293
            length 3, it is used to fill R, G, B channels respectively.
294
295
            This value is only used when the padding_mode is constant
        padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
296
297
298
299
300
301
302
303
304
305
306
307
308
309

            - constant: pads with a constant value, this value is specified with fill

            - edge: pads with the last value on the edge of the image

            - reflect: pads with reflection of image (without repeating the last value on the edge)

                       padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
                       will result in [3, 2, 1, 2, 3, 4, 3, 2]

            - symmetric: pads with reflection of image (repeating the last value on the edge)

                         padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
                         will result in [2, 1, 1, 2, 3, 4, 4, 3]
310
311
312
313
314
315
316
317
318
319
320

    Returns:
        PIL Image: Padded image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if not isinstance(padding, (numbers.Number, tuple)):
        raise TypeError('Got inappropriate padding arg')
    if not isinstance(fill, (numbers.Number, str, tuple)):
        raise TypeError('Got inappropriate fill arg')
321
322
    if not isinstance(padding_mode, str):
        raise TypeError('Got inappropriate padding_mode arg')
323

Tongzhou Wang's avatar
Tongzhou Wang committed
324
    if isinstance(padding, Sequence) and len(padding) not in [2, 4]:
325
326
327
        raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
                         "{} element tuple".format(len(padding)))

328
329
330
331
    assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'], \
        'Padding mode should be either constant, edge, reflect or symmetric'

    if padding_mode == 'constant':
332
333
334
335
336
337
        if isinstance(fill, numbers.Number):
            fill = (fill,) * len(img.getbands())
        if len(fill) != len(img.getbands()):
            raise ValueError('fill should have the same number of elements '
                             'as the number of channels in the image '
                             '({}), got {} instead'.format(len(img.getbands()), len(fill)))
surgan12's avatar
surgan12 committed
338
339
340
341
342
343
        if img.mode == 'P':
            palette = img.getpalette()
            image = ImageOps.expand(img, border=padding, fill=fill)
            image.putpalette(palette)
            return image

344
345
346
347
        return ImageOps.expand(img, border=padding, fill=fill)
    else:
        if isinstance(padding, int):
            pad_left = pad_right = pad_top = pad_bottom = padding
Tongzhou Wang's avatar
Tongzhou Wang committed
348
        if isinstance(padding, Sequence) and len(padding) == 2:
349
350
            pad_left = pad_right = padding[0]
            pad_top = pad_bottom = padding[1]
Tongzhou Wang's avatar
Tongzhou Wang committed
351
        if isinstance(padding, Sequence) and len(padding) == 4:
352
353
354
355
356
            pad_left = padding[0]
            pad_top = padding[1]
            pad_right = padding[2]
            pad_bottom = padding[3]

surgan12's avatar
surgan12 committed
357
358
359
360
361
362
363
364
        if img.mode == 'P':
            palette = img.getpalette()
            img = np.asarray(img)
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)
            img = Image.fromarray(img)
            img.putpalette(palette)
            return img

365
366
367
368
369
370
371
372
373
        img = np.asarray(img)
        # RGB image
        if len(img.shape) == 3:
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode)
        # Grayscale image
        if len(img.shape) == 2:
            img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode)

        return Image.fromarray(img)
374
375


376
def crop(img, top, left, height, width):
377
    """Crop the given PIL Image.
378

379
    Args:
380
381
382
383
384
        img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
385

386
387
388
389
390
391
    Returns:
        PIL Image: Cropped image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

392
    return img.crop((left, top, left + width, top + height))
393
394
395


def center_crop(img, output_size):
396
397
    """Crop the given PIL Image and resize it to desired size.

398
399
400
401
402
403
404
    Args:
        img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
        output_size (sequence or int): (height, width) of the crop box. If int,
            it is used for both directions
    Returns:
        PIL Image: Cropped image.
    """
405
406
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
407
408
409
410
411
    image_width, image_height = img.size
    crop_height, crop_width = output_size
    crop_top = int(round((image_height - crop_height) / 2.))
    crop_left = int(round((image_width - crop_width) / 2.))
    return crop(img, crop_top, crop_left, crop_height, crop_width)
412
413


414
def resized_crop(img, top, left, height, width, size, interpolation=Image.BILINEAR):
415
416
    """Crop the given PIL Image and resize it to desired size.

417
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
418
419

    Args:
420
421
422
423
424
        img (PIL Image): Image to be cropped. (0,0) denotes the top left corner of the image.
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
425
        size (sequence or int): Desired output size. Same semantics as ``resize``.
426
427
428
429
430
431
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``.
    Returns:
        PIL Image: Cropped image.
    """
    assert _is_pil_image(img), 'img should be PIL Image'
432
    img = crop(img, top, left, height, width)
433
434
435
436
437
438
439
440
441
442
443
    img = resize(img, size, interpolation)
    return img


def hflip(img):
    """Horizontally flip the given PIL Image.

    Args:
        img (PIL Image): Image to be flipped.

    Returns:
Oscar Mañas's avatar
Oscar Mañas committed
444
        PIL Image:  Horizontally flipped image.
445
446
447
448
449
450
451
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    return img.transpose(Image.FLIP_LEFT_RIGHT)


452
453
454
455
456
457
458
459
def _parse_fill(fill, img, min_pil_version):
    """Helper function to get the fill color for rotate and perspective transforms.

    Args:
        fill (n-tuple or int or float): Pixel fill value for area outside the transformed
            image. If int or float, the value is used for all bands respectively.
            Defaults to 0 for all bands.
        img (PIL Image): Image to be filled.
460
        min_pil_version (str): The minimum PILLOW version for when the ``fillcolor`` option
461
462
463
464
465
            was first introduced in the calling function. (e.g. rotate->5.2.0, perspective->5.0.0)

    Returns:
        dict: kwarg for ``fillcolor``
    """
466
467
468
    major_found, minor_found = (int(v) for v in PILLOW_VERSION.split('.')[:2])
    major_required, minor_required = (int(v) for v in min_pil_version.split('.')[:2])
    if major_found < major_required or (major_found == major_required and minor_found < minor_required):
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
        if fill is None:
            return {}
        else:
            msg = ("The option to fill background area of the transformed image, "
                   "requires pillow>={}")
            raise RuntimeError(msg.format(min_pil_version))

    num_bands = len(img.getbands())
    if fill is None:
        fill = 0
    if isinstance(fill, (int, float)) and num_bands > 1:
        fill = tuple([fill] * num_bands)
    if not isinstance(fill, (int, float)) and len(fill) != num_bands:
        msg = ("The number of elements in 'fill' does not match the number of "
               "bands of the image ({} != {})")
        raise ValueError(msg.format(len(fill), num_bands))

    return {"fillcolor": fill}


489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
def _get_perspective_coeffs(startpoints, endpoints):
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

    In Perspective Transform each pixel (x, y) in the orignal image gets transformed as,
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
        List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image,
        List containing [top-left, top-right, bottom-right, bottom-left] of the transformed
                   image
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
    matrix = []

    for p1, p2 in zip(endpoints, startpoints):
        matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])

    A = torch.tensor(matrix, dtype=torch.float)
    B = torch.tensor(startpoints, dtype=torch.float).view(8)
510
    res = torch.lstsq(B, A)[0]
511
512
513
    return res.squeeze_(1).tolist()


514
def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC, fill=None):
515
516
517
518
    """Perform perspective transform of the given PIL Image.

    Args:
        img (PIL Image): Image to be transformed.
519
520
        startpoints: List containing [top-left, top-right, bottom-right, bottom-left] of the orignal image
        endpoints: List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image
521
        interpolation: Default- Image.BICUBIC
522
523
524
525
        fill (n-tuple or int or float): Pixel fill value for area outside the rotated
            image. If int or float, the value is used for all bands respectively.
            This option is only available for ``pillow>=5.0.0``.

526
527
528
    Returns:
        PIL Image:  Perspectively transformed Image.
    """
529

530
531
532
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

533
534
    opts = _parse_fill(fill, img, '5.0.0')

535
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
536
    return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation, **opts)
537
538


539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
def vflip(img):
    """Vertically flip the given PIL Image.

    Args:
        img (PIL Image): Image to be flipped.

    Returns:
        PIL Image:  Vertically flipped image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    return img.transpose(Image.FLIP_TOP_BOTTOM)


def five_crop(img, size):
    """Crop the given PIL Image into four corners and the central crop.

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
       size (sequence or int): Desired output size of the crop. If size is an
           int instead of sequence like (h, w), a square crop (size, size) is
           made.
565

566
    Returns:
567
568
       tuple: tuple (tl, tr, bl, br, center)
                Corresponding top left, top right, bottom left, bottom right and center crop.
569
570
571
572
573
574
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
    else:
        assert len(size) == 2, "Please provide only two dimensions (h, w) for size."

575
576
577
578
579
580
581
582
583
584
585
586
    image_width, image_height = img.size
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

    tl = img.crop((0, 0, crop_width, crop_height))
    tr = img.crop((image_width - crop_width, 0, image_width, crop_height))
    bl = img.crop((0, image_height - crop_height, crop_width, image_height))
    br = img.crop((image_width - crop_width, image_height - crop_height,
                   image_width, image_height))
    center = center_crop(img, (crop_height, crop_width))
587
588
589
590
    return (tl, tr, bl, br, center)


def ten_crop(img, size, vertical_flip=False):
591
592
593
    """Generate ten cropped images from the given PIL Image.
    Crop the given PIL Image into four corners and the central crop plus the
    flipped version of these (horizontal flipping is used by default).
594
595
596
597
598

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

599
    Args:
600
        size (sequence or int): Desired output size of the crop. If size is an
601
602
            int instead of sequence like (h, w), a square crop (size, size) is
            made.
603
        vertical_flip (bool): Use vertical flipping instead of horizontal
604
605

    Returns:
606
607
608
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
            Corresponding top left, top right, bottom left, bottom right and
            center crop and same for the flipped image.
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
    else:
        assert len(size) == 2, "Please provide only two dimensions (h, w) for size."

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.

    Args:
        img (PIL Image): PIL Image to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
        PIL Image: Brightness adjusted image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Brightness(img)
    img = enhancer.enhance(brightness_factor)
    return img


def adjust_contrast(img, contrast_factor):
    """Adjust contrast of an Image.

    Args:
        img (PIL Image): PIL Image to be adjusted.
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
        PIL Image: Contrast adjusted image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Contrast(img)
    img = enhancer.enhance(contrast_factor)
    return img


def adjust_saturation(img, saturation_factor):
    """Adjust color saturation of an image.

    Args:
        img (PIL Image): PIL Image to be adjusted.
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
        PIL Image: Saturation adjusted image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Color(img)
    img = enhancer.enhance(saturation_factor)
    return img


def adjust_hue(img, hue_factor):
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

696
697
698
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
699
700
701
702
703
704
705
706
707
708
709
710
711

    Args:
        img (PIL Image): PIL Image to be adjusted.
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
        PIL Image: Hue adjusted image.
    """
    if not(-0.5 <= hue_factor <= 0.5):
Francisco Massa's avatar
Francisco Massa committed
712
        raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733

    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    input_mode = img.mode
    if input_mode in {'L', '1', 'I', 'F'}:
        return img

    h, s, v = img.convert('HSV').split()

    np_h = np.array(h, dtype=np.uint8)
    # uint8 addition take cares of rotation across boundaries
    with np.errstate(over='ignore'):
        np_h += np.uint8(hue_factor * 255)
    h = Image.fromarray(np_h, 'L')

    img = Image.merge('HSV', (h, s, v)).convert(input_mode)
    return img


def adjust_gamma(img, gamma, gain=1):
734
    r"""Perform gamma correction on an image.
735
736
737
738

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

739
740
741
742
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
743

744
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
745
746
747

    Args:
        img (PIL Image): PIL Image to be adjusted.
748
749
750
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
751
752
753
754
755
756
757
758
759
760
761
        gain (float): The constant multiplier.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if gamma < 0:
        raise ValueError('Gamma should be a non-negative real number')

    input_mode = img.mode
    img = img.convert('RGB')

762
763
    gamma_map = [255 * gain * pow(ele / 255., gamma) for ele in range(256)] * 3
    img = img.point(gamma_map)  # use PIL's point-function to accelerate this part
764

765
    img = img.convert(input_mode)
766
    return img
767
768


Philip Meier's avatar
Philip Meier committed
769
def rotate(img, angle, resample=False, expand=False, center=None, fill=None):
770
    """Rotate the image by angle.
771
772
773
774


    Args:
        img (PIL Image): PIL Image to be rotated.
775
776
777
778
        angle (float or int): In degrees degrees counter clockwise order.
        resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
            An optional resampling filter. See `filters`_ for more information.
            If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
779
780
781
782
783
784
785
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
        center (2-tuple, optional): Optional center of rotation.
            Origin is the upper left corner.
            Default is the center of the image.
Philip Meier's avatar
Philip Meier committed
786
787
788
        fill (n-tuple or int or float): Pixel fill value for area outside the rotated
            image. If int or float, the value is used for all bands respectively.
            Defaults to 0 for all bands. This option is only available for ``pillow>=5.2.0``.
789

790
    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
791

792
793
794
795
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

796
    opts = _parse_fill(fill, img, '5.2.0')
797

Philip Meier's avatar
Philip Meier committed
798
    return img.rotate(angle, resample, expand, center, **opts)
799
800


801
802
803
804
805
806
807
808
def _get_inverse_affine_matrix(center, angle, translate, scale, shear):
    # Helper method to compute inverse matrix for affine transformation

    # As it is explained in PIL.Image.rotate
    # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
    #       RSS is rotation with scale and shear matrix
809
810
811
812
813
814
815
816
817
818
    #       RSS(a, s, (sx, sy)) =
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ]
    #         [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ]
    #         [ 0                    , 0                                      , 1 ]
    #
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
819
820
    # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1

821
    if isinstance(shear, numbers.Number):
ptrblck's avatar
ptrblck committed
822
        shear = [shear, 0]
823
824

    if not isinstance(shear, (tuple, list)) and len(shear) == 2:
ptrblck's avatar
ptrblck committed
825
826
827
        raise ValueError(
            "Shear should be a single value or a tuple/list containing " +
            "two values. Got {}".format(shear))
828
829
830
831
832
833
834
835
836
837
838
839

    rot = math.radians(angle)
    sx, sy = [math.radians(s) for s in shear]

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
    a = cos(rot - sy) / cos(sy)
    b = -cos(rot - sy) * tan(sx) / cos(sy) - sin(rot)
    c = sin(rot - sy) / cos(sy)
    d = -sin(rot - sy) * tan(sx) / cos(sy) + cos(rot)
840
841

    # Inverted rotation matrix with scale and shear
842
843
844
845
    # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
    M = [d, -b, 0,
         -c, a, 0]
    M = [x / scale for x in M]
846
847

    # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
848
849
    M[2] += M[0] * (-cx - tx) + M[1] * (-cy - ty)
    M[5] += M[3] * (-cx - tx) + M[4] * (-cy - ty)
850
851

    # Apply center translation: C * RSS^-1 * C^-1 * T^-1
852
853
854
    M[2] += cx
    M[5] += cy
    return M
855
856
857
858
859
860
861


def affine(img, angle, translate, scale, shear, resample=0, fillcolor=None):
    """Apply affine transformation on the image keeping image center invariant

    Args:
        img (PIL Image): PIL Image to be rotated.
862
        angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.
863
864
        translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
        scale (float): overall scale
ptrblck's avatar
ptrblck committed
865
866
867
        shear (float or tuple or list): shear angle value in degrees between -180 to 180, clockwise direction.
        If a tuple of list is specified, the first value corresponds to a shear parallel to the x axis, while
        the second value corresponds to a shear parallel to the y axis.
868
        resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
869
            An optional resampling filter.
870
871
            See `filters`_ for more information.
            If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
872
        fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
873
874
875
876
877
878
879
880
881
882
883
884
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
        "Argument translate should be a list or tuple of length 2"

    assert scale > 0.0, "Argument scale should be positive"

    output_size = img.size
    center = (img.size[0] * 0.5 + 0.5, img.size[1] * 0.5 + 0.5)
    matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
885
    kwargs = {"fillcolor": fillcolor} if int(PILLOW_VERSION.split('.')[0]) >= 5 else {}
886
    return img.transform(output_size, Image.AFFINE, matrix, resample, **kwargs)
887
888


889
890
891
892
893
894
895
def to_grayscale(img, num_output_channels=1):
    """Convert image to grayscale version of image.

    Args:
        img (PIL Image): Image to be converted to grayscale.

    Returns:
896
897
898
899
        PIL Image: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel

            if num_output_channels = 3 : returned image is 3 channel with r = g = b
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if num_output_channels == 1:
        img = img.convert('L')
    elif num_output_channels == 3:
        img = img.convert('L')
        np_img = np.array(img, dtype=np.uint8)
        np_img = np.dstack([np_img, np_img, np_img])
        img = Image.fromarray(np_img, 'RGB')
    else:
        raise ValueError('num_output_channels should be either 1 or 3')

    return img
915
916


917
def erase(img, i, j, h, w, v, inplace=False):
918
919
920
921
922
923
924
925
926
    """ Erase the input Tensor Image with given value.

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
Zhun Zhong's avatar
Zhun Zhong committed
927
        inplace(bool, optional): For in-place operations. By default is set False.
928
929
930
931
932
933
934

    Returns:
        Tensor Image: Erased image.
    """
    if not isinstance(img, torch.Tensor):
        raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))

935
936
937
    if not inplace:
        img = img.clone()

938
939
    img[:, i:i + h, j:j + w] = v
    return img