functional.py 37.1 KB
Newer Older
1
import math
2
3
4
import numbers
import warnings
from collections.abc import Iterable
vfdev's avatar
vfdev committed
5
from typing import Any
6
7
8

import numpy as np
from numpy import sin, cos, tan
9
from PIL import Image, ImageOps, ImageEnhance, __version__ as PILLOW_VERSION
10
11
12

import torch
from torch import Tensor
vfdev's avatar
vfdev committed
13
from torch.jit.annotations import List, Tuple
14

15
16
17
18
19
try:
    import accimage
except ImportError:
    accimage = None

20
21
22
from . import functional_pil as F_pil
from . import functional_tensor as F_t

23

vfdev's avatar
vfdev committed
24
25
26
27
28
29
30
31
_is_pil_image = F_pil._is_pil_image


def _get_image_size(img: Tensor) -> List[int]:
    """Returns image sizea as (w, h)
    """
    if isinstance(img, torch.Tensor):
        return F_t._get_image_size(img)
32

vfdev's avatar
vfdev committed
33
    return F_pil._get_image_size(img)
34

vfdev's avatar
vfdev committed
35
36
37

@torch.jit.unused
def _is_numpy(img: Any) -> bool:
38
39
40
    return isinstance(img, np.ndarray)


vfdev's avatar
vfdev committed
41
42
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
43
    return img.ndim in {2, 3}
44
45
46
47
48
49
50
51
52
53
54
55
56


def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.

    See ``ToTensor`` for more details.

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
vfdev's avatar
vfdev committed
57
    if not(F_pil._is_pil_image(pic) or _is_numpy(pic)):
58
59
        raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))

60
61
62
    if _is_numpy(pic) and not _is_numpy_image(pic):
        raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

63
64
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
65
66
67
        if pic.ndim == 2:
            pic = pic[:, :, None]

68
69
        img = torch.from_numpy(pic.transpose((2, 0, 1)))
        # backward compatibility
70
71
72
73
        if isinstance(img, torch.ByteTensor):
            return img.float().div(255)
        else:
            return img
74
75
76
77
78
79
80
81
82
83
84

    if accimage is not None and isinstance(pic, accimage.Image):
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
        pic.copyto(nppic)
        return torch.from_numpy(nppic)

    # handle PIL Image
    if pic.mode == 'I':
        img = torch.from_numpy(np.array(pic, np.int32, copy=False))
    elif pic.mode == 'I;16':
        img = torch.from_numpy(np.array(pic, np.int16, copy=False))
85
86
    elif pic.mode == 'F':
        img = torch.from_numpy(np.array(pic, np.float32, copy=False))
87
88
    elif pic.mode == '1':
        img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
89
90
    else:
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
91
92

    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
93
    # put it from HWC to CHW format
94
    img = img.permute((2, 0, 1)).contiguous()
95
96
97
98
99
100
    if isinstance(img, torch.ByteTensor):
        return img.float().div(255)
    else:
        return img


101
102
103
104
105
106
107
108
109
110
111
def pil_to_tensor(pic):
    """Convert a ``PIL Image`` to a tensor of the same type.

    See ``AsTensor`` for more details.

    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
vfdev's avatar
vfdev committed
112
    if not(F_pil._is_pil_image(pic)):
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
        raise TypeError('pic should be PIL Image. Got {}'.format(type(pic)))

    if accimage is not None and isinstance(pic, accimage.Image):
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
    img = torch.as_tensor(np.asarray(pic))
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
        (torch.Tensor): Converted image

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
    if image.dtype == dtype:
        return image

    if image.dtype.is_floating_point:
        # float to float
        if dtype.is_floating_point:
            return image.to(dtype)

        # float to int
        if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or (
            image.dtype == torch.float64 and dtype == torch.int64
        ):
            msg = f"The cast from {image.dtype} to {dtype} cannot be performed safely."
            raise RuntimeError(msg)

        eps = 1e-3
        return image.mul(torch.iinfo(dtype).max + 1 - eps).to(dtype)
    else:
        # int to float
        if dtype.is_floating_point:
            max = torch.iinfo(image.dtype).max
            image = image.to(dtype)
            return image / max

        # int to int
        input_max = torch.iinfo(image.dtype).max
        output_max = torch.iinfo(dtype).max

        if input_max > output_max:
            factor = (input_max + 1) // (output_max + 1)
            image = image // factor
            return image.to(dtype)
        else:
            factor = (output_max + 1) // (input_max + 1)
            image = image.to(dtype)
            return image * factor


187
188
189
def to_pil_image(pic, mode=None):
    """Convert a tensor or an ndarray to PIL Image.

190
    See :class:`~torchvision.transforms.ToPILImage` for more details.
191
192
193
194
195

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

196
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
197
198
199
200

    Returns:
        PIL Image: Image converted to PIL Image.
    """
Varun Agrawal's avatar
Varun Agrawal committed
201
    if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
202
203
        raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))

Varun Agrawal's avatar
Varun Agrawal committed
204
205
206
207
208
209
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
210
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
211
212
213
214
215
216
217
218
219

    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

220
    npimg = pic
221
    if isinstance(pic, torch.FloatTensor) and mode != 'F':
222
        pic = pic.mul(255).byte()
Varun Agrawal's avatar
Varun Agrawal committed
223
    if isinstance(pic, torch.Tensor):
224
225
226
227
228
229
230
231
232
233
234
        npimg = np.transpose(pic.numpy(), (1, 2, 0))

    if not isinstance(npimg, np.ndarray):
        raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
                        'not {}'.format(type(npimg)))

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
            expected_mode = 'L'
vfdev's avatar
vfdev committed
235
        elif npimg.dtype == np.int16:
236
            expected_mode = 'I;16'
vfdev's avatar
vfdev committed
237
        elif npimg.dtype == np.int32:
238
239
240
241
242
243
244
245
            expected_mode = 'I'
        elif npimg.dtype == np.float32:
            expected_mode = 'F'
        if mode is not None and mode != expected_mode:
            raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
                             .format(mode, np.dtype, expected_mode))
        mode = expected_mode

surgan12's avatar
surgan12 committed
246
247
248
249
250
251
252
253
    elif npimg.shape[2] == 2:
        permitted_2_channel_modes = ['LA']
        if mode is not None and mode not in permitted_2_channel_modes:
            raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'LA'

254
    elif npimg.shape[2] == 4:
surgan12's avatar
surgan12 committed
255
        permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
        if mode is not None and mode not in permitted_4_channel_modes:
            raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGBA'
    else:
        permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
        if mode is not None and mode not in permitted_3_channel_modes:
            raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGB'

    if mode is None:
        raise TypeError('Input type {} is not supported'.format(npimg.dtype))

    return Image.fromarray(npimg, mode=mode)


surgan12's avatar
surgan12 committed
274
def normalize(tensor, mean, std, inplace=False):
275
276
    """Normalize a tensor image with mean and standard deviation.

277
    .. note::
surgan12's avatar
surgan12 committed
278
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
279

280
    See :class:`~torchvision.transforms.Normalize` for more details.
281
282
283
284

    Args:
        tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
        mean (sequence): Sequence of means for each channel.
285
        std (sequence): Sequence of standard deviations for each channel.
286
        inplace(bool,optional): Bool to make this operation inplace.
287
288
289
290

    Returns:
        Tensor: Normalized Tensor image.
    """
291
292
    if not torch.is_tensor(tensor):
        raise TypeError('tensor should be a torch tensor. Got {}.'.format(type(tensor)))
293

294
295
296
    if tensor.ndimension() != 3:
        raise ValueError('Expected tensor to be a tensor image of size (C, H, W). Got tensor.size() = '
                         '{}.'.format(tensor.size()))
297

surgan12's avatar
surgan12 committed
298
299
300
    if not inplace:
        tensor = tensor.clone()

301
302
303
    dtype = tensor.dtype
    mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
304
305
    if (std == 0).any():
        raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
306
307
308
309
310
    if mean.ndim == 1:
        mean = mean[:, None, None]
    if std.ndim == 1:
        std = std[:, None, None]
    tensor.sub_(mean).div_(std)
311
    return tensor
312
313


vfdev's avatar
vfdev committed
314
315
316
317
def resize(img: Tensor, size: List[int], interpolation: int = 2) -> Tensor:
    r"""Resize the input image to the given size.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
318
319

    Args:
vfdev's avatar
vfdev committed
320
        img (PIL Image or Tensor): Image to be resized.
321
322
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
Vitaliy Chiley's avatar
Vitaliy Chiley committed
323
            the smaller edge of the image will be matched to this number maintaining
324
            the aspect ratio. i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
325
326
327
328
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
            In torchscript mode padding as single int is not supported, use a tuple or
            list of length 1: ``[size, ]``.
        interpolation (int, optional): Desired interpolation. Default is bilinear.
329
330

    Returns:
vfdev's avatar
vfdev committed
331
        PIL Image or Tensor: Resized image.
332
    """
vfdev's avatar
vfdev committed
333
334
335
336
    if not isinstance(img, torch.Tensor):
        return F_pil.resize(img, size=size, interpolation=interpolation)

    return F_t.resize(img, size=size, interpolation=interpolation)
337
338
339
340
341
342
343
344


def scale(*args, **kwargs):
    warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                  "please use transforms.Resize instead.")
    return resize(*args, **kwargs)


345
346
347
348
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
    r"""Pad the given image on all sides with the given "pad" value.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
349
350

    Args:
351
352
        img (PIL Image or Tensor): Image to be padded.
        padding (int or tuple or list): Padding on each border. If a single int is provided this
353
354
            is used to pad all borders. If tuple of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple of length 4 is provided
355
356
357
358
            this is the padding for the left, top, right and bottom borders respectively.
            In torchscript mode padding as single int is not supported, use a tuple or
            list of length 1: ``[padding, ]``.
        fill (int or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
359
            length 3, it is used to fill R, G, B channels respectively.
360
            This value is only used when the padding_mode is constant. Only int value is supported for Tensors.
361
        padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
vfdev's avatar
vfdev committed
362
            Mode symmetric is not yet supported for Tensor inputs.
363
364
365
366
367
368
369
370
371
372
373
374
375
376

            - constant: pads with a constant value, this value is specified with fill

            - edge: pads with the last value on the edge of the image

            - reflect: pads with reflection of image (without repeating the last value on the edge)

                       padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
                       will result in [3, 2, 1, 2, 3, 4, 3, 2]

            - symmetric: pads with reflection of image (repeating the last value on the edge)

                         padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
                         will result in [2, 1, 1, 2, 3, 4, 4, 3]
377
378

    Returns:
379
        PIL Image or Tensor: Padded image.
380
    """
381
382
    if not isinstance(img, torch.Tensor):
        return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
383

384
    return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
385
386


vfdev's avatar
vfdev committed
387
388
389
390
391
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
    """Crop the given image at specified location and output size.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
392

393
    Args:
vfdev's avatar
vfdev committed
394
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
395
396
397
398
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
399

400
    Returns:
vfdev's avatar
vfdev committed
401
        PIL Image or Tensor: Cropped image.
402
403
    """

vfdev's avatar
vfdev committed
404
405
    if not isinstance(img, torch.Tensor):
        return F_pil.crop(img, top, left, height, width)
406

vfdev's avatar
vfdev committed
407
    return F_t.crop(img, top, left, height, width)
408

vfdev's avatar
vfdev committed
409
410
411
412
413

def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
    """Crops the given image at the center.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
414

415
    Args:
vfdev's avatar
vfdev committed
416
417
418
419
        img (PIL Image or Tensor): Image to be cropped.
        output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int
            it is used for both directions.

420
    Returns:
vfdev's avatar
vfdev committed
421
        PIL Image or Tensor: Cropped image.
422
    """
423
424
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
vfdev's avatar
vfdev committed
425
426
427
428
    elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
        output_size = (output_size[0], output_size[0])

    image_width, image_height = _get_image_size(img)
429
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
430
431
432
433
434
435
436
437
438

    # crop_top = int(round((image_height - crop_height) / 2.))
    # Result can be different between python func and scripted func
    # Temporary workaround:
    crop_top = int((image_height - crop_height + 1) * 0.5)
    # crop_left = int(round((image_width - crop_width) / 2.))
    # Result can be different between python func and scripted func
    # Temporary workaround:
    crop_left = int((image_width - crop_width + 1) * 0.5)
439
    return crop(img, crop_top, crop_left, crop_height, crop_width)
440
441


442
443
444
445
446
447
def resized_crop(
        img: Tensor, top: int, left: int, height: int, width: int, size: List[int], interpolation: int = Image.BILINEAR
) -> Tensor:
    """Crop the given image and resize it to desired size.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
448

449
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
450
451

    Args:
452
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
453
454
455
456
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
457
        size (sequence or int): Desired output size. Same semantics as ``resize``.
458
        interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR``.
459
    Returns:
460
        PIL Image or Tensor: Cropped image.
461
    """
462
    img = crop(img, top, left, height, width)
463
464
465
466
    img = resize(img, size, interpolation)
    return img


467
def hflip(img: Tensor) -> Tensor:
vfdev's avatar
vfdev committed
468
    """Horizontally flip the given PIL Image or Tensor.
469
470

    Args:
vfdev's avatar
vfdev committed
471
        img (PIL Image or Tensor): Image to be flipped. If img
472
473
474
            is a Tensor, it is expected to be in [..., H, W] format,
            where ... means it can have an arbitrary number of trailing
            dimensions.
475
476

    Returns:
vfdev's avatar
vfdev committed
477
        PIL Image or Tensor:  Horizontally flipped image.
478
    """
479
480
    if not isinstance(img, torch.Tensor):
        return F_pil.hflip(img)
481

482
    return F_t.hflip(img)
483
484


485
486
487
488
489
490
491
492
def _parse_fill(fill, img, min_pil_version):
    """Helper function to get the fill color for rotate and perspective transforms.

    Args:
        fill (n-tuple or int or float): Pixel fill value for area outside the transformed
            image. If int or float, the value is used for all bands respectively.
            Defaults to 0 for all bands.
        img (PIL Image): Image to be filled.
493
        min_pil_version (str): The minimum PILLOW version for when the ``fillcolor`` option
494
495
496
497
498
            was first introduced in the calling function. (e.g. rotate->5.2.0, perspective->5.0.0)

    Returns:
        dict: kwarg for ``fillcolor``
    """
499
500
501
    major_found, minor_found = (int(v) for v in PILLOW_VERSION.split('.')[:2])
    major_required, minor_required = (int(v) for v in min_pil_version.split('.')[:2])
    if major_found < major_required or (major_found == major_required and minor_found < minor_required):
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
        if fill is None:
            return {}
        else:
            msg = ("The option to fill background area of the transformed image, "
                   "requires pillow>={}")
            raise RuntimeError(msg.format(min_pil_version))

    num_bands = len(img.getbands())
    if fill is None:
        fill = 0
    if isinstance(fill, (int, float)) and num_bands > 1:
        fill = tuple([fill] * num_bands)
    if not isinstance(fill, (int, float)) and len(fill) != num_bands:
        msg = ("The number of elements in 'fill' does not match the number of "
               "bands of the image ({} != {})")
        raise ValueError(msg.format(len(fill), num_bands))

    return {"fillcolor": fill}


522
523
524
def _get_perspective_coeffs(startpoints, endpoints):
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

Vitaliy Chiley's avatar
Vitaliy Chiley committed
525
    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
526
527
528
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
Vitaliy Chiley's avatar
Vitaliy Chiley committed
529
        List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
vfdev's avatar
vfdev committed
530
        List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image
531
532
533
534
535
536
537
538
539
540
541
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
    matrix = []

    for p1, p2 in zip(endpoints, startpoints):
        matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])

    A = torch.tensor(matrix, dtype=torch.float)
    B = torch.tensor(startpoints, dtype=torch.float).view(8)
542
    res = torch.lstsq(B, A)[0]
543
544
545
    return res.squeeze_(1).tolist()


546
def perspective(img, startpoints, endpoints, interpolation=Image.BICUBIC, fill=None):
547
548
549
550
    """Perform perspective transform of the given PIL Image.

    Args:
        img (PIL Image): Image to be transformed.
Vitaliy Chiley's avatar
Vitaliy Chiley committed
551
        startpoints: List containing [top-left, top-right, bottom-right, bottom-left] of the original image
552
        endpoints: List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image
553
        interpolation: Default- Image.BICUBIC
554
555
556
557
        fill (n-tuple or int or float): Pixel fill value for area outside the rotated
            image. If int or float, the value is used for all bands respectively.
            This option is only available for ``pillow>=5.0.0``.

558
559
560
    Returns:
        PIL Image:  Perspectively transformed Image.
    """
561

vfdev's avatar
vfdev committed
562
    if not F_pil._is_pil_image(img):
563
564
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

565
566
    opts = _parse_fill(fill, img, '5.0.0')

567
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
568
    return img.transform(img.size, Image.PERSPECTIVE, coeffs, interpolation, **opts)
569
570


571
572
def vflip(img: Tensor) -> Tensor:
    """Vertically flip the given PIL Image or torch Tensor.
573
574

    Args:
vfdev's avatar
vfdev committed
575
        img (PIL Image or Tensor): Image to be flipped. If img
576
577
578
            is a Tensor, it is expected to be in [..., H, W] format,
            where ... means it can have an arbitrary number of trailing
            dimensions.
579
580
581
582

    Returns:
        PIL Image:  Vertically flipped image.
    """
583
584
    if not isinstance(img, torch.Tensor):
        return F_pil.vflip(img)
585

586
    return F_t.vflip(img)
587
588


vfdev's avatar
vfdev committed
589
590
591
592
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
    """Crop the given image into four corners and the central crop.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
593
594
595
596
597
598

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
599
600
601
602
        img (PIL Image or Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
            made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
603

604
    Returns:
605
606
       tuple: tuple (tl, tr, bl, br, center)
                Corresponding top left, top right, bottom left, bottom right and center crop.
607
608
609
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
610
611
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])
612

vfdev's avatar
vfdev committed
613
614
615
616
    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")

    image_width, image_height = _get_image_size(img)
617
618
619
620
621
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

vfdev's avatar
vfdev committed
622
623
624
625
626
627
628
629
    tl = crop(img, 0, 0, crop_height, crop_width)
    tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
    bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
    br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)

    center = center_crop(img, [crop_height, crop_width])

    return tl, tr, bl, br, center
630
631


vfdev's avatar
vfdev committed
632
633
634
def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]:
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
635
    flipped version of these (horizontal flipping is used by default).
vfdev's avatar
vfdev committed
636
637
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
638
639
640
641
642

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

643
    Args:
vfdev's avatar
vfdev committed
644
        img (PIL Image or Tensor): Image to be cropped.
645
        size (sequence or int): Desired output size of the crop. If size is an
646
            int instead of sequence like (h, w), a square crop (size, size) is
vfdev's avatar
vfdev committed
647
            made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
648
        vertical_flip (bool): Use vertical flipping instead of horizontal
649
650

    Returns:
651
652
653
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
            Corresponding top left, top right, bottom left, bottom right and
            center crop and same for the flipped image.
654
655
656
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
657
658
659
660
661
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])

    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")
662
663
664
665
666
667
668
669
670
671
672
673

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


674
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
675
676
677
    """Adjust brightness of an Image.

    Args:
vfdev's avatar
vfdev committed
678
        img (PIL Image or Tensor): Image to be adjusted.
679
680
681
682
683
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
684
        PIL Image or Tensor: Brightness adjusted image.
685
    """
686
687
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_brightness(img, brightness_factor)
688

689
    return F_t.adjust_brightness(img, brightness_factor)
690
691


692
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
693
694
695
    """Adjust contrast of an Image.

    Args:
vfdev's avatar
vfdev committed
696
        img (PIL Image or Tensor): Image to be adjusted.
697
698
699
700
701
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
702
        PIL Image or Tensor: Contrast adjusted image.
703
    """
704
705
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_contrast(img, contrast_factor)
706

707
    return F_t.adjust_contrast(img, contrast_factor)
708
709


710
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
711
712
713
    """Adjust color saturation of an image.

    Args:
vfdev's avatar
vfdev committed
714
        img (PIL Image or Tensor): Image to be adjusted.
715
716
717
718
719
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
720
        PIL Image or Tensor: Saturation adjusted image.
721
    """
722
723
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_saturation(img, saturation_factor)
724

725
    return F_t.adjust_saturation(img, saturation_factor)
726
727


728
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
729
730
731
732
733
734
735
736
737
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

738
739
740
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
741
742
743
744
745
746
747
748
749
750
751
752

    Args:
        img (PIL Image): PIL Image to be adjusted.
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
        PIL Image: Hue adjusted image.
    """
753
754
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_hue(img, hue_factor)
755

756
    raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
757
758
759


def adjust_gamma(img, gamma, gain=1):
760
    r"""Perform gamma correction on an image.
761
762
763
764

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

765
766
767
768
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
769

770
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
771
772
773

    Args:
        img (PIL Image): PIL Image to be adjusted.
774
775
776
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
777
778
        gain (float): The constant multiplier.
    """
vfdev's avatar
vfdev committed
779
    if not F_pil._is_pil_image(img):
780
781
782
783
784
785
786
787
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if gamma < 0:
        raise ValueError('Gamma should be a non-negative real number')

    input_mode = img.mode
    img = img.convert('RGB')

788
789
    gamma_map = [255 * gain * pow(ele / 255., gamma) for ele in range(256)] * 3
    img = img.point(gamma_map)  # use PIL's point-function to accelerate this part
790

791
    img = img.convert(input_mode)
792
    return img
793
794


Philip Meier's avatar
Philip Meier committed
795
def rotate(img, angle, resample=False, expand=False, center=None, fill=None):
796
    """Rotate the image by angle.
797
798
799
800


    Args:
        img (PIL Image): PIL Image to be rotated.
801
802
803
804
        angle (float or int): In degrees degrees counter clockwise order.
        resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
            An optional resampling filter. See `filters`_ for more information.
            If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
805
806
807
808
809
810
811
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
        center (2-tuple, optional): Optional center of rotation.
            Origin is the upper left corner.
            Default is the center of the image.
Philip Meier's avatar
Philip Meier committed
812
813
814
        fill (n-tuple or int or float): Pixel fill value for area outside the rotated
            image. If int or float, the value is used for all bands respectively.
            Defaults to 0 for all bands. This option is only available for ``pillow>=5.2.0``.
815

816
    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters
817

818
    """
vfdev's avatar
vfdev committed
819
    if not F_pil._is_pil_image(img):
820
821
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

822
    opts = _parse_fill(fill, img, '5.2.0')
823

Philip Meier's avatar
Philip Meier committed
824
    return img.rotate(angle, resample, expand, center, **opts)
825
826


827
828
829
830
831
832
833
834
def _get_inverse_affine_matrix(center, angle, translate, scale, shear):
    # Helper method to compute inverse matrix for affine transformation

    # As it is explained in PIL.Image.rotate
    # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
    #       RSS is rotation with scale and shear matrix
835
836
837
838
839
840
841
842
843
844
    #       RSS(a, s, (sx, sy)) =
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ]
    #         [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ]
    #         [ 0                    , 0                                      , 1 ]
    #
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
845
846
    # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1

847
    if isinstance(shear, numbers.Number):
ptrblck's avatar
ptrblck committed
848
        shear = [shear, 0]
849
850

    if not isinstance(shear, (tuple, list)) and len(shear) == 2:
ptrblck's avatar
ptrblck committed
851
852
853
        raise ValueError(
            "Shear should be a single value or a tuple/list containing " +
            "two values. Got {}".format(shear))
854
855
856
857
858
859
860
861
862
863
864
865

    rot = math.radians(angle)
    sx, sy = [math.radians(s) for s in shear]

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
    a = cos(rot - sy) / cos(sy)
    b = -cos(rot - sy) * tan(sx) / cos(sy) - sin(rot)
    c = sin(rot - sy) / cos(sy)
    d = -sin(rot - sy) * tan(sx) / cos(sy) + cos(rot)
866
867

    # Inverted rotation matrix with scale and shear
868
869
870
871
    # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
    M = [d, -b, 0,
         -c, a, 0]
    M = [x / scale for x in M]
872
873

    # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
874
875
    M[2] += M[0] * (-cx - tx) + M[1] * (-cy - ty)
    M[5] += M[3] * (-cx - tx) + M[4] * (-cy - ty)
876
877

    # Apply center translation: C * RSS^-1 * C^-1 * T^-1
878
879
880
    M[2] += cx
    M[5] += cy
    return M
881
882
883
884
885
886
887


def affine(img, angle, translate, scale, shear, resample=0, fillcolor=None):
    """Apply affine transformation on the image keeping image center invariant

    Args:
        img (PIL Image): PIL Image to be rotated.
888
        angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.
889
890
        translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
        scale (float): overall scale
ptrblck's avatar
ptrblck committed
891
892
893
        shear (float or tuple or list): shear angle value in degrees between -180 to 180, clockwise direction.
        If a tuple of list is specified, the first value corresponds to a shear parallel to the x axis, while
        the second value corresponds to a shear parallel to the y axis.
894
        resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
895
            An optional resampling filter.
896
897
            See `filters`_ for more information.
            If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
898
        fillcolor (int): Optional fill color for the area outside the transform in the output image. (Pillow>=5.0.0)
899
    """
vfdev's avatar
vfdev committed
900
    if not F_pil._is_pil_image(img):
901
902
903
904
905
906
907
908
909
910
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
        "Argument translate should be a list or tuple of length 2"

    assert scale > 0.0, "Argument scale should be positive"

    output_size = img.size
    center = (img.size[0] * 0.5 + 0.5, img.size[1] * 0.5 + 0.5)
    matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
911
    kwargs = {"fillcolor": fillcolor} if int(PILLOW_VERSION.split('.')[0]) >= 5 else {}
912
    return img.transform(output_size, Image.AFFINE, matrix, resample, **kwargs)
913
914


915
916
917
918
919
920
921
def to_grayscale(img, num_output_channels=1):
    """Convert image to grayscale version of image.

    Args:
        img (PIL Image): Image to be converted to grayscale.

    Returns:
922
923
924
925
        PIL Image: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel

            if num_output_channels = 3 : returned image is 3 channel with r = g = b
926
    """
vfdev's avatar
vfdev committed
927
    if not F_pil._is_pil_image(img):
928
929
930
931
932
933
934
935
936
937
938
939
940
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if num_output_channels == 1:
        img = img.convert('L')
    elif num_output_channels == 3:
        img = img.convert('L')
        np_img = np.array(img, dtype=np.uint8)
        np_img = np.dstack([np_img, np_img, np_img])
        img = Image.fromarray(np_img, 'RGB')
    else:
        raise ValueError('num_output_channels should be either 1 or 3')

    return img
941
942


943
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
944
945
946
947
948
949
950
951
952
    """ Erase the input Tensor Image with given value.

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
Zhun Zhong's avatar
Zhun Zhong committed
953
        inplace(bool, optional): For in-place operations. By default is set False.
954
955
956
957
958
959
960

    Returns:
        Tensor Image: Erased image.
    """
    if not isinstance(img, torch.Tensor):
        raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))

961
962
963
    if not inplace:
        img = img.clone()

964
965
    img[:, i:i + h, j:j + w] = v
    return img