functional.py 55.4 KB
Newer Older
1
import math
2
3
import numbers
import warnings
4
from enum import Enum
5
6

import numpy as np
vfdev's avatar
vfdev committed
7
from PIL import Image
8
9
10

import torch
from torch import Tensor
11
from typing import List, Tuple, Any, Optional
12

13
14
15
16
17
try:
    import accimage
except ImportError:
    accimage = None

18
19
20
from . import functional_pil as F_pil
from . import functional_tensor as F_t

21

22
class InterpolationMode(Enum):
23
24
25
26
27
28
29
30
31
32
33
34
    """Interpolation modes
    """
    NEAREST = "nearest"
    BILINEAR = "bilinear"
    BICUBIC = "bicubic"
    # For PIL compatibility
    BOX = "box"
    HAMMING = "hamming"
    LANCZOS = "lanczos"


# TODO: Once torchscript supports Enums with staticmethod
35
36
# this can be put into InterpolationMode as staticmethod
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
37
    inverse_modes_mapping = {
38
39
40
41
42
43
        0: InterpolationMode.NEAREST,
        2: InterpolationMode.BILINEAR,
        3: InterpolationMode.BICUBIC,
        4: InterpolationMode.BOX,
        5: InterpolationMode.HAMMING,
        1: InterpolationMode.LANCZOS,
44
45
46
47
48
    }
    return inverse_modes_mapping[i]


pil_modes_mapping = {
49
50
51
52
53
54
    InterpolationMode.NEAREST: 0,
    InterpolationMode.BILINEAR: 2,
    InterpolationMode.BICUBIC: 3,
    InterpolationMode.BOX: 4,
    InterpolationMode.HAMMING: 5,
    InterpolationMode.LANCZOS: 1,
55
56
}

vfdev's avatar
vfdev committed
57
58
59
60
_is_pil_image = F_pil._is_pil_image


def _get_image_size(img: Tensor) -> List[int]:
61
    """Returns image size as [w, h]
vfdev's avatar
vfdev committed
62
63
64
    """
    if isinstance(img, torch.Tensor):
        return F_t._get_image_size(img)
65

vfdev's avatar
vfdev committed
66
    return F_pil._get_image_size(img)
67

vfdev's avatar
vfdev committed
68

69
def _get_image_num_channels(img: Tensor) -> int:
70
71
    """Returns number of image channels
    """
72
73
74
75
76
77
    if isinstance(img, torch.Tensor):
        return F_t._get_image_num_channels(img)

    return F_pil._get_image_num_channels(img)


vfdev's avatar
vfdev committed
78
79
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
80
81
82
    return isinstance(img, np.ndarray)


vfdev's avatar
vfdev committed
83
84
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
85
    return img.ndim in {2, 3}
86
87
88
89


def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
90
    This function does not support torchscript.
91

92
    See :class:`~torchvision.transforms.ToTensor` for more details.
93
94
95
96
97
98
99

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
vfdev's avatar
vfdev committed
100
    if not(F_pil._is_pil_image(pic) or _is_numpy(pic)):
101
102
        raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))

103
104
105
    if _is_numpy(pic) and not _is_numpy_image(pic):
        raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

106
107
    default_float_dtype = torch.get_default_dtype()

108
109
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
110
111
112
        if pic.ndim == 2:
            pic = pic[:, :, None]

113
        img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
114
        # backward compatibility
115
        if isinstance(img, torch.ByteTensor):
116
            return img.to(dtype=default_float_dtype).div(255)
117
118
        else:
            return img
119
120

    if accimage is not None and isinstance(pic, accimage.Image):
121
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
122
        pic.copyto(nppic)
123
        return torch.from_numpy(nppic).to(dtype=default_float_dtype)
124
125
126
127
128
129

    # handle PIL Image
    if pic.mode == 'I':
        img = torch.from_numpy(np.array(pic, np.int32, copy=False))
    elif pic.mode == 'I;16':
        img = torch.from_numpy(np.array(pic, np.int16, copy=False))
130
131
    elif pic.mode == 'F':
        img = torch.from_numpy(np.array(pic, np.float32, copy=False))
132
133
    elif pic.mode == '1':
        img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
134
135
    else:
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
136
137

    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
138
    # put it from HWC to CHW format
139
    img = img.permute((2, 0, 1)).contiguous()
140
    if isinstance(img, torch.ByteTensor):
141
        return img.to(dtype=default_float_dtype).div(255)
142
143
144
145
    else:
        return img


146
147
def pil_to_tensor(pic):
    """Convert a ``PIL Image`` to a tensor of the same type.
148
    This function does not support torchscript.
149

vfdev's avatar
vfdev committed
150
    See :class:`~torchvision.transforms.PILToTensor` for more details.
151
152
153
154
155
156
157

    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
158
    if not F_pil._is_pil_image(pic):
159
160
161
        raise TypeError('pic should be PIL Image. Got {}'.format(type(pic)))

    if accimage is not None and isinstance(pic, accimage.Image):
162
163
        # accimage format is always uint8 internally, so always return uint8 here
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
164
165
166
167
168
169
170
171
172
173
174
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
    img = torch.as_tensor(np.asarray(pic))
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


175
176
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly
177
    This function does not support PIL Image.
178
179
180
181
182
183

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
vfdev's avatar
vfdev committed
184
        Tensor: Converted image
185
186
187
188
189
190
191
192
193
194
195
196

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
197
198
199
200
    if not isinstance(image, torch.Tensor):
        raise TypeError('Input img should be Tensor Image')

    return F_t.convert_image_dtype(image, dtype)
201
202


203
def to_pil_image(pic, mode=None):
204
    """Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
205

206
    See :class:`~torchvision.transforms.ToPILImage` for more details.
207
208
209
210
211

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

212
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
213
214
215
216

    Returns:
        PIL Image: Image converted to PIL Image.
    """
Varun Agrawal's avatar
Varun Agrawal committed
217
    if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
218
219
        raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))

Varun Agrawal's avatar
Varun Agrawal committed
220
221
222
223
224
225
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
226
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
227

228
229
230
231
        # check number of channels
        if pic.shape[-3] > 4:
            raise ValueError('pic should not have > 4 channels. Got {} channels.'.format(pic.shape[-3]))

Varun Agrawal's avatar
Varun Agrawal committed
232
233
234
235
236
237
238
239
    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

240
241
242
243
        # check number of channels
        if pic.shape[-1] > 4:
            raise ValueError('pic should not have > 4 channels. Got {} channels.'.format(pic.shape[-1]))

244
    npimg = pic
Varun Agrawal's avatar
Varun Agrawal committed
245
    if isinstance(pic, torch.Tensor):
246
247
248
        if pic.is_floating_point() and mode != 'F':
            pic = pic.mul(255).byte()
        npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
249
250
251
252
253
254
255
256
257
258

    if not isinstance(npimg, np.ndarray):
        raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
                        'not {}'.format(type(npimg)))

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
            expected_mode = 'L'
vfdev's avatar
vfdev committed
259
        elif npimg.dtype == np.int16:
260
            expected_mode = 'I;16'
vfdev's avatar
vfdev committed
261
        elif npimg.dtype == np.int32:
262
263
264
265
266
267
268
269
            expected_mode = 'I'
        elif npimg.dtype == np.float32:
            expected_mode = 'F'
        if mode is not None and mode != expected_mode:
            raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
                             .format(mode, np.dtype, expected_mode))
        mode = expected_mode

surgan12's avatar
surgan12 committed
270
271
272
273
274
275
276
277
    elif npimg.shape[2] == 2:
        permitted_2_channel_modes = ['LA']
        if mode is not None and mode not in permitted_2_channel_modes:
            raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'LA'

278
    elif npimg.shape[2] == 4:
surgan12's avatar
surgan12 committed
279
        permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
        if mode is not None and mode not in permitted_4_channel_modes:
            raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGBA'
    else:
        permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
        if mode is not None and mode not in permitted_3_channel_modes:
            raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGB'

    if mode is None:
        raise TypeError('Input type {} is not supported'.format(npimg.dtype))

    return Image.fromarray(npimg, mode=mode)


298
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
299
    """Normalize a float tensor image with mean and standard deviation.
300
    This transform does not support PIL Image.
301

302
    .. note::
surgan12's avatar
surgan12 committed
303
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
304

305
    See :class:`~torchvision.transforms.Normalize` for more details.
306
307

    Args:
308
        tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
309
        mean (sequence): Sequence of means for each channel.
310
        std (sequence): Sequence of standard deviations for each channel.
311
        inplace(bool,optional): Bool to make this operation inplace.
312
313
314
315

    Returns:
        Tensor: Normalized Tensor image.
    """
316
317
    if not isinstance(tensor, torch.Tensor):
        raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))
318

319
320
321
    if not tensor.is_floating_point():
        raise TypeError('Input tensor should be a float tensor. Got {}.'.format(tensor.dtype))

322
323
    if tensor.ndim < 3:
        raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '
324
                         '{}.'.format(tensor.size()))
325

surgan12's avatar
surgan12 committed
326
327
328
    if not inplace:
        tensor = tensor.clone()

329
330
331
    dtype = tensor.dtype
    mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
332
333
    if (std == 0).any():
        raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
334
    if mean.ndim == 1:
335
        mean = mean.view(-1, 1, 1)
336
    if std.ndim == 1:
337
        std = std.view(-1, 1, 1)
338
    tensor.sub_(mean).div_(std)
339
    return tensor
340
341


342
343
def resize(img: Tensor, size: List[int], interpolation: InterpolationMode = InterpolationMode.BILINEAR,
           max_size: Optional[int] = None) -> Tensor:
vfdev's avatar
vfdev committed
344
    r"""Resize the input image to the given size.
345
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
346
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
347

348
349
350
351
352
353
    .. warning::
        The output image might be different depending on its type: when downsampling, the interpolation of PIL images
        and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
        in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
        types.

354
    Args:
vfdev's avatar
vfdev committed
355
        img (PIL Image or Tensor): Image to be resized.
356
357
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
Vitaliy Chiley's avatar
Vitaliy Chiley committed
358
            the smaller edge of the image will be matched to this number maintaining
359
            the aspect ratio. i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
360
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
361
            In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
362
363
364
365
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
366
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
367
368
369
370
371
372
373
374
        max_size (int, optional): The maximum allowed for the longer edge of
            the resized image: if the longer edge of the image is greater
            than ``max_size`` after being resized according to ``size``, then
            the image is resized again so that the longer edge is equal to
            ``max_size``. As a result, ```size` might be overruled, i.e the
            smaller edge may be shorter than ``size``. This is only supported
            if ``size`` is an int (or a sequence of length 1 in torchscript
            mode).
375
376

    Returns:
vfdev's avatar
vfdev committed
377
        PIL Image or Tensor: Resized image.
378
    """
379
380
381
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
382
383
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
384
385
386
        )
        interpolation = _interpolation_modes_from_int(interpolation)

387
388
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
389

vfdev's avatar
vfdev committed
390
    if not isinstance(img, torch.Tensor):
391
        pil_interpolation = pil_modes_mapping[interpolation]
392
        return F_pil.resize(img, size=size, interpolation=pil_interpolation, max_size=max_size)
vfdev's avatar
vfdev committed
393

394
    return F_t.resize(img, size=size, interpolation=interpolation.value, max_size=max_size)
395
396
397
398
399
400
401
402


def scale(*args, **kwargs):
    warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                  "please use transforms.Resize instead.")
    return resize(*args, **kwargs)


403
404
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
    r"""Pad the given image on all sides with the given "pad" value.
405
    If the image is torch Tensor, it is expected
406
407
408
    to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
    at most 3 leading dimensions for mode edge,
    and an arbitrary number of leading dimensions for mode constant
409
410

    Args:
411
        img (PIL Image or Tensor): Image to be padded.
412
413
414
        padding (int or sequence): Padding on each border. If a single int is provided this
            is used to pad all borders. If sequence of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a sequence of length 4 is provided
415
            this is the padding for the left, top, right and bottom borders respectively.
416
417
418
419
420
421
            In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``.
        fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
            Only number is supported for torch Tensor.
            Only int or str or tuple value is supported for PIL Image.
422
        padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
423
424
425

            - constant: pads with a constant value, this value is specified with fill

426
427
            - edge: pads with the last value on the edge of the image,
                    if input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
428
429
430
431
432
433
434
435
436
437

            - reflect: pads with reflection of image (without repeating the last value on the edge)

                       padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
                       will result in [3, 2, 1, 2, 3, 4, 3, 2]

            - symmetric: pads with reflection of image (repeating the last value on the edge)

                         padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
                         will result in [2, 1, 1, 2, 3, 4, 4, 3]
438
439

    Returns:
440
        PIL Image or Tensor: Padded image.
441
    """
442
443
    if not isinstance(img, torch.Tensor):
        return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
444

445
    return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
446
447


vfdev's avatar
vfdev committed
448
449
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
    """Crop the given image at specified location and output size.
450
451
    If the image is torch Tensor, it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
452

453
    Args:
vfdev's avatar
vfdev committed
454
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
455
456
457
458
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
459

460
    Returns:
vfdev's avatar
vfdev committed
461
        PIL Image or Tensor: Cropped image.
462
463
    """

vfdev's avatar
vfdev committed
464
465
    if not isinstance(img, torch.Tensor):
        return F_pil.crop(img, top, left, height, width)
466

vfdev's avatar
vfdev committed
467
    return F_t.crop(img, top, left, height, width)
468

vfdev's avatar
vfdev committed
469
470
471

def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
    """Crops the given image at the center.
472
    If the image is torch Tensor, it is expected
473
474
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
475

476
    Args:
vfdev's avatar
vfdev committed
477
        img (PIL Image or Tensor): Image to be cropped.
478
        output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
vfdev's avatar
vfdev committed
479
480
            it is used for both directions.

481
    Returns:
vfdev's avatar
vfdev committed
482
        PIL Image or Tensor: Cropped image.
483
    """
484
485
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
vfdev's avatar
vfdev committed
486
487
488
489
    elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
        output_size = (output_size[0], output_size[0])

    image_width, image_height = _get_image_size(img)
490
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
491

492
493
494
495
496
497
498
499
500
501
502
503
    if crop_width > image_width or crop_height > image_height:
        padding_ltrb = [
            (crop_width - image_width) // 2 if crop_width > image_width else 0,
            (crop_height - image_height) // 2 if crop_height > image_height else 0,
            (crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
            (crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
        ]
        img = pad(img, padding_ltrb, fill=0)  # PIL uses fill value 0
        image_width, image_height = _get_image_size(img)
        if crop_width == image_width and crop_height == image_height:
            return img

504
505
    crop_top = int(round((image_height - crop_height) / 2.))
    crop_left = int(round((image_width - crop_width) / 2.))
506
    return crop(img, crop_top, crop_left, crop_height, crop_width)
507
508


509
def resized_crop(
510
        img: Tensor, top: int, left: int, height: int, width: int, size: List[int],
511
        interpolation: InterpolationMode = InterpolationMode.BILINEAR
512
513
) -> Tensor:
    """Crop the given image and resize it to desired size.
514
    If the image is torch Tensor, it is expected
515
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
516

517
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
518
519

    Args:
520
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
521
522
523
524
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
525
        size (sequence or int): Desired output size. Same semantics as ``resize``.
526
527
528
529
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
530
531
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.

532
    Returns:
533
        PIL Image or Tensor: Cropped image.
534
    """
535
    img = crop(img, top, left, height, width)
536
537
538
539
    img = resize(img, size, interpolation)
    return img


540
def hflip(img: Tensor) -> Tensor:
541
    """Horizontally flip the given image.
542
543

    Args:
vfdev's avatar
vfdev committed
544
        img (PIL Image or Tensor): Image to be flipped. If img
545
            is a Tensor, it is expected to be in [..., H, W] format,
546
            where ... means it can have an arbitrary number of leading
547
            dimensions.
548
549

    Returns:
vfdev's avatar
vfdev committed
550
        PIL Image or Tensor:  Horizontally flipped image.
551
    """
552
553
    if not isinstance(img, torch.Tensor):
        return F_pil.hflip(img)
554

555
    return F_t.hflip(img)
556
557


558
559
560
def _get_perspective_coeffs(
        startpoints: List[List[int]], endpoints: List[List[int]]
) -> List[float]:
561
562
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

Vitaliy Chiley's avatar
Vitaliy Chiley committed
563
    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
564
565
566
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
567
568
569
570
571
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.

572
573
574
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
575
576
577
578
579
    a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float)

    for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
        a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
580

581
582
    b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8)
    res = torch.lstsq(b_matrix, a_matrix)[0]
583

584
585
    output: List[float] = res.squeeze(1).tolist()
    return output
586
587


588
589
590
591
def perspective(
        img: Tensor,
        startpoints: List[List[int]],
        endpoints: List[List[int]],
592
        interpolation: InterpolationMode = InterpolationMode.BILINEAR,
593
        fill: Optional[List[float]] = None
594
595
) -> Tensor:
    """Perform perspective transform of the given image.
596
    If the image is torch Tensor, it is expected
597
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
598
599

    Args:
600
601
602
603
604
        img (PIL Image or Tensor): Image to be transformed.
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
605
606
607
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
608
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
609
610
611
612
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
            In torchscript mode single int/float value is not supported, please use a sequence
            of length 1: ``[value, ]``.
613
            If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.
614

615
    Returns:
616
        PIL Image or Tensor: transformed Image.
617
    """
618

619
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
620

621
622
623
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
624
625
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
626
627
628
        )
        interpolation = _interpolation_modes_from_int(interpolation)

629
630
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
631

632
    if not isinstance(img, torch.Tensor):
633
634
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill)
635

636
    return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill)
637
638


639
def vflip(img: Tensor) -> Tensor:
640
    """Vertically flip the given image.
641
642

    Args:
vfdev's avatar
vfdev committed
643
        img (PIL Image or Tensor): Image to be flipped. If img
644
            is a Tensor, it is expected to be in [..., H, W] format,
645
            where ... means it can have an arbitrary number of leading
646
            dimensions.
647
648

    Returns:
649
        PIL Image or Tensor:  Vertically flipped image.
650
    """
651
652
    if not isinstance(img, torch.Tensor):
        return F_pil.vflip(img)
653

654
    return F_t.vflip(img)
655
656


vfdev's avatar
vfdev committed
657
658
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
    """Crop the given image into four corners and the central crop.
659
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
660
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
661
662
663
664
665
666

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
667
668
669
        img (PIL Image or Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
670
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
671

672
    Returns:
673
674
       tuple: tuple (tl, tr, bl, br, center)
                Corresponding top left, top right, bottom left, bottom right and center crop.
675
676
677
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
678
679
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])
680

vfdev's avatar
vfdev committed
681
682
683
684
    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")

    image_width, image_height = _get_image_size(img)
685
686
687
688
689
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

vfdev's avatar
vfdev committed
690
691
692
693
694
695
696
697
    tl = crop(img, 0, 0, crop_height, crop_width)
    tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
    bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
    br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)

    center = center_crop(img, [crop_height, crop_width])

    return tl, tr, bl, br, center
698
699


vfdev's avatar
vfdev committed
700
701
702
def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]:
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
703
    flipped version of these (horizontal flipping is used by default).
704
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
705
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
706
707
708
709
710

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

711
    Args:
vfdev's avatar
vfdev committed
712
        img (PIL Image or Tensor): Image to be cropped.
713
        size (sequence or int): Desired output size of the crop. If size is an
714
            int instead of sequence like (h, w), a square crop (size, size) is
715
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
716
        vertical_flip (bool): Use vertical flipping instead of horizontal
717
718

    Returns:
719
720
721
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
            Corresponding top left, top right, bottom left, bottom right and
            center crop and same for the flipped image.
722
723
724
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
725
726
727
728
729
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])

    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")
730
731
732
733
734
735
736
737
738
739
740
741

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


742
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
743
    """Adjust brightness of an image.
744
745

    Args:
vfdev's avatar
vfdev committed
746
        img (PIL Image or Tensor): Image to be adjusted.
747
        If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
748
        where ... means it can have an arbitrary number of leading dimensions.
749
750
751
752
753
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
754
        PIL Image or Tensor: Brightness adjusted image.
755
    """
756
757
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_brightness(img, brightness_factor)
758

759
    return F_t.adjust_brightness(img, brightness_factor)
760
761


762
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
763
    """Adjust contrast of an image.
764
765

    Args:
vfdev's avatar
vfdev committed
766
        img (PIL Image or Tensor): Image to be adjusted.
767
768
        If img is torch Tensor, it is expected to be in [..., 3, H, W] format,
        where ... means it can have an arbitrary number of leading dimensions.
769
770
771
772
773
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
774
        PIL Image or Tensor: Contrast adjusted image.
775
    """
776
777
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_contrast(img, contrast_factor)
778

779
    return F_t.adjust_contrast(img, contrast_factor)
780
781


782
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
783
784
785
    """Adjust color saturation of an image.

    Args:
vfdev's avatar
vfdev committed
786
        img (PIL Image or Tensor): Image to be adjusted.
787
788
        If img is torch Tensor, it is expected to be in [..., 3, H, W] format,
        where ... means it can have an arbitrary number of leading dimensions.
789
790
791
792
793
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
794
        PIL Image or Tensor: Saturation adjusted image.
795
    """
796
797
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_saturation(img, saturation_factor)
798

799
    return F_t.adjust_saturation(img, saturation_factor)
800
801


802
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
803
804
805
806
807
808
809
810
811
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

812
813
814
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
815
816

    Args:
817
        img (PIL Image or Tensor): Image to be adjusted.
818
819
820
        If img is torch Tensor, it is expected to be in [..., 3, H, W] format,
        where ... means it can have an arbitrary number of leading dimensions.
        If img is PIL Image mode "1", "L", "I", "F" and modes with transparency (alpha channel) are not supported.
821
822
823
824
825
826
827
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
828
        PIL Image or Tensor: Hue adjusted image.
829
    """
830
831
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_hue(img, hue_factor)
832

833
    return F_t.adjust_hue(img, hue_factor)
834
835


836
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
837
    r"""Perform gamma correction on an image.
838
839
840
841

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

842
843
844
845
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
846

847
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
848
849

    Args:
850
        img (PIL Image or Tensor): PIL Image to be adjusted.
851
        If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
852
        where ... means it can have an arbitrary number of leading dimensions.
853
        If img is PIL Image, modes with transparency (alpha channel) are not supported.
854
855
856
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
857
        gain (float): The constant multiplier.
858
859
    Returns:
        PIL Image or Tensor: Gamma correction adjusted image.
860
    """
861
862
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_gamma(img, gamma, gain)
863

864
    return F_t.adjust_gamma(img, gamma, gain)
865
866


vfdev's avatar
vfdev committed
867
def _get_inverse_affine_matrix(
vfdev's avatar
vfdev committed
868
        center: List[float], angle: float, translate: List[float], scale: float, shear: List[float]
vfdev's avatar
vfdev committed
869
) -> List[float]:
870
871
872
873
874
875
876
    # Helper method to compute inverse matrix for affine transformation

    # As it is explained in PIL.Image.rotate
    # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
    #       RSS is rotation with scale and shear matrix
877
878
879
880
881
882
883
884
885
886
    #       RSS(a, s, (sx, sy)) =
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ]
    #         [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ]
    #         [ 0                    , 0                                      , 1 ]
    #
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
887
888
    # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1

889
890
891
892
893
894
895
    rot = math.radians(angle)
    sx, sy = [math.radians(s) for s in shear]

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
vfdev's avatar
vfdev committed
896
897
898
899
    a = math.cos(rot - sy) / math.cos(sy)
    b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
    c = math.sin(rot - sy) / math.cos(sy)
    d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
900
901

    # Inverted rotation matrix with scale and shear
902
    # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
vfdev's avatar
vfdev committed
903
904
    matrix = [d, -b, 0.0, -c, a, 0.0]
    matrix = [x / scale for x in matrix]
905
906

    # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
vfdev's avatar
vfdev committed
907
908
    matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
    matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
909
910

    # Apply center translation: C * RSS^-1 * C^-1 * T^-1
vfdev's avatar
vfdev committed
911
912
    matrix[2] += cx
    matrix[5] += cy
913

vfdev's avatar
vfdev committed
914
    return matrix
915

vfdev's avatar
vfdev committed
916

vfdev's avatar
vfdev committed
917
def rotate(
918
        img: Tensor, angle: float, interpolation: InterpolationMode = InterpolationMode.NEAREST,
919
        expand: bool = False, center: Optional[List[int]] = None,
920
        fill: Optional[List[float]] = None, resample: Optional[int] = None
vfdev's avatar
vfdev committed
921
922
) -> Tensor:
    """Rotate the image by angle.
923
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
924
925
926
927
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.

    Args:
        img (PIL Image or Tensor): image to be rotated.
928
        angle (number): rotation angle value in degrees, counter-clockwise.
929
930
931
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
932
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
vfdev's avatar
vfdev committed
933
934
935
936
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
937
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
vfdev's avatar
vfdev committed
938
            Default is the center of the image.
939
940
941
942
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
            In torchscript mode single int/float value is not supported, please use a sequence
            of length 1: ``[value, ]``.
943
            If input is PIL Image, the options is only available for ``Pillow>=5.2.0``.
vfdev's avatar
vfdev committed
944
945
946
947
948
949
950

    Returns:
        PIL Image or Tensor: Rotated image.

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

    """
951
952
953
954
955
956
957
958
959
    if resample is not None:
        warnings.warn(
            "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
        )
        interpolation = _interpolation_modes_from_int(resample)

    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
960
961
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
962
963
964
        )
        interpolation = _interpolation_modes_from_int(interpolation)

vfdev's avatar
vfdev committed
965
966
967
968
969
970
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

971
972
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
973

vfdev's avatar
vfdev committed
974
    if not isinstance(img, torch.Tensor):
975
976
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill)
vfdev's avatar
vfdev committed
977
978
979
980

    center_f = [0.0, 0.0]
    if center is not None:
        img_size = _get_image_size(img)
981
982
983
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, img_size)]

vfdev's avatar
vfdev committed
984
985
986
    # due to current incoherence of rotation angle direction between affine and rotate implementations
    # we need to set -angle.
    matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
987
    return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill)
vfdev's avatar
vfdev committed
988
989


vfdev's avatar
vfdev committed
990
991
def affine(
        img: Tensor, angle: float, translate: List[int], scale: float, shear: List[float],
992
993
        interpolation: InterpolationMode = InterpolationMode.NEAREST, fill: Optional[List[float]] = None,
        resample: Optional[int] = None, fillcolor: Optional[List[float]] = None
vfdev's avatar
vfdev committed
994
995
) -> Tensor:
    """Apply affine transformation on the image keeping image center invariant.
996
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
997
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
998
999

    Args:
vfdev's avatar
vfdev committed
1000
        img (PIL Image or Tensor): image to transform.
1001
1002
        angle (number): rotation angle in degrees between -180 and 180, clockwise direction.
        translate (sequence of integers): horizontal and vertical translations (post-rotation translation)
1003
        scale (float): overall scale
1004
1005
        shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction.
            If a sequence is specified, the first value corresponds to a shear parallel to the x axis, while
vfdev's avatar
vfdev committed
1006
            the second value corresponds to a shear parallel to the y axis.
1007
1008
1009
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1010
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
1011
1012
1013
1014
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
            In torchscript mode single int/float value is not supported, please use a sequence
            of length 1: ``[value, ]``.
1015
1016
            If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.
        fillcolor (sequence, int, float): deprecated argument and will be removed since v0.10.0.
1017
            Please use the ``fill`` parameter instead.
1018
        resample (int, optional): deprecated argument and will be removed since v0.10.0.
1019
            Please use the ``interpolation`` parameter instead.
vfdev's avatar
vfdev committed
1020
1021
1022

    Returns:
        PIL Image or Tensor: Transformed image.
1023
    """
1024
1025
1026
1027
1028
1029
1030
1031
1032
    if resample is not None:
        warnings.warn(
            "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
        )
        interpolation = _interpolation_modes_from_int(resample)

    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
1033
1034
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
1035
1036
1037
1038
1039
1040
1041
1042
1043
        )
        interpolation = _interpolation_modes_from_int(interpolation)

    if fillcolor is not None:
        warnings.warn(
            "Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead"
        )
        fill = fillcolor

vfdev's avatar
vfdev committed
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if not isinstance(translate, (list, tuple)):
        raise TypeError("Argument translate should be a sequence")

    if len(translate) != 2:
        raise ValueError("Argument translate should be a sequence of length 2")

    if scale <= 0.0:
        raise ValueError("Argument scale should be positive")

    if not isinstance(shear, (numbers.Number, (list, tuple))):
        raise TypeError("Shear should be either a single value or a sequence of two values")

1059
1060
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
1061

vfdev's avatar
vfdev committed
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
    if isinstance(angle, int):
        angle = float(angle)

    if isinstance(translate, tuple):
        translate = list(translate)

    if isinstance(shear, numbers.Number):
        shear = [shear, 0.0]

    if isinstance(shear, tuple):
        shear = list(shear)

    if len(shear) == 1:
        shear = [shear[0], shear[0]]

    if len(shear) != 2:
        raise ValueError("Shear should be a sequence containing two values. Got {}".format(shear))

    img_size = _get_image_size(img)
    if not isinstance(img, torch.Tensor):
        # center = (img_size[0] * 0.5 + 0.5, img_size[1] * 0.5 + 0.5)
        # it is visually better to estimate the center without 0.5 offset
        # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
        center = [img_size[0] * 0.5, img_size[1] * 0.5]
        matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
1087
1088
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill)
1089

1090
1091
    translate_f = [1.0 * t for t in translate]
    matrix = _get_inverse_affine_matrix([0.0, 0.0], angle, translate_f, scale, shear)
1092
    return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill)
1093
1094


1095
@torch.jit.unused
1096
def to_grayscale(img, num_output_channels=1):
1097
    """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
1098
    This transform does not support torch Tensor.
1099
1100

    Args:
1101
        img (PIL Image): PIL Image to be converted to grayscale.
1102
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1.
1103
1104

    Returns:
1105
1106
1107
1108
        PIL Image: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel

            if num_output_channels = 3 : returned image is 3 channel with r = g = b
1109
    """
1110
1111
    if isinstance(img, Image.Image):
        return F_pil.to_grayscale(img, num_output_channels)
1112

1113
1114
1115
1116
1117
    raise TypeError("Input should be PIL Image")


def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    """Convert RGB image to grayscale version of image.
1118
1119
    If the image is torch Tensor, it is expected
    to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138

    Note:
        Please, note that this method supports only RGB images as input. For inputs in other color spaces,
        please, consider using meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.

    Args:
        img (PIL Image or Tensor): RGB Image to be converted to grayscale.
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.

    Returns:
        PIL Image or Tensor: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel

            if num_output_channels = 3 : returned image is 3 channel with r = g = b
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.to_grayscale(img, num_output_channels)

    return F_t.rgb_to_grayscale(img, num_output_channels)
1139
1140


1141
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
1142
    """ Erase the input Tensor Image with given value.
1143
    This transform does not support PIL Image.
1144
1145
1146
1147
1148
1149
1150
1151

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
Zhun Zhong's avatar
Zhun Zhong committed
1152
        inplace(bool, optional): For in-place operations. By default is set False.
1153
1154
1155
1156
1157
1158
1159

    Returns:
        Tensor Image: Erased image.
    """
    if not isinstance(img, torch.Tensor):
        raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))

1160
1161
1162
    if not inplace:
        img = img.clone()

vfdev's avatar
vfdev committed
1163
    img[..., i:i + h, j:j + w] = v
1164
    return img
1165
1166
1167


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
1168
1169
1170
    """Performs Gaussian blurring on the image by given kernel.
    If the image is torch Tensor, it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1171
1172
1173
1174
1175

    Args:
        img (PIL Image or Tensor): Image to be blurred
        kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
            like ``(kx, ky)`` or a single integer for square kernels.
1176
            In torchscript mode kernel_size as single int is not supported, use a sequence of length 1: ``[ksize, ]``.
1177
1178
1179
1180
1181
        sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
            sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
            same sigma in both X/Y directions. If None, then it is computed using
            ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
            Default, None. In torchscript mode sigma as single float is
1182
            not supported, use a sequence of length 1: ``[sigma, ]``.
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223

    Returns:
        PIL Image or Tensor: Gaussian Blurred version of the image.
    """
    if not isinstance(kernel_size, (int, list, tuple)):
        raise TypeError('kernel_size should be int or a sequence of integers. Got {}'.format(type(kernel_size)))
    if isinstance(kernel_size, int):
        kernel_size = [kernel_size, kernel_size]
    if len(kernel_size) != 2:
        raise ValueError('If kernel_size is a sequence its length should be 2. Got {}'.format(len(kernel_size)))
    for ksize in kernel_size:
        if ksize % 2 == 0 or ksize < 0:
            raise ValueError('kernel_size should have odd and positive integers. Got {}'.format(kernel_size))

    if sigma is None:
        sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]

    if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
        raise TypeError('sigma should be either float or sequence of floats. Got {}'.format(type(sigma)))
    if isinstance(sigma, (int, float)):
        sigma = [float(sigma), float(sigma)]
    if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
        sigma = [sigma[0], sigma[0]]
    if len(sigma) != 2:
        raise ValueError('If sigma is a sequence, its length should be 2. Got {}'.format(len(sigma)))
    for s in sigma:
        if s <= 0.:
            raise ValueError('sigma should have positive values. Got {}'.format(sigma))

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
            raise TypeError('img should be PIL Image or Tensor. Got {}'.format(type(img)))

        t_img = to_tensor(img)

    output = F_t.gaussian_blur(t_img, kernel_size, sigma)

    if not isinstance(img, torch.Tensor):
        output = to_pil_image(output)
    return output
1224
1225
1226


def invert(img: Tensor) -> Tensor:
1227
    """Invert the colors of an RGB/grayscale image.
1228
1229
1230

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1231
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1232
1233
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244

    Returns:
        PIL Image or Tensor: Color inverted image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.invert(img)

    return F_t.invert(img)


def posterize(img: Tensor, bits: int) -> Tensor:
1245
    """Posterize an image by reducing the number of bits for each color channel.
1246
1247
1248

    Args:
        img (PIL Image or Tensor): Image to have its colors posterized.
1249
            If img is torch Tensor, it should be of type torch.uint8 and
1250
1251
1252
            it is expected to be in [..., 1 or 3, H, W] format, where ... means
            it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
        bits (int): The number of bits to keep for each channel (0-8).
    Returns:
        PIL Image or Tensor: Posterized image.
    """
    if not (0 <= bits <= 8):
        raise ValueError('The number if bits should be between 0 and 8. Got {}'.format(bits))

    if not isinstance(img, torch.Tensor):
        return F_pil.posterize(img, bits)

    return F_t.posterize(img, bits)


def solarize(img: Tensor, threshold: float) -> Tensor:
1267
    """Solarize an RGB/grayscale image by inverting all pixel values above a threshold.
1268
1269
1270

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1271
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1272
1273
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
        threshold (float): All pixels equal or above this value are inverted.
    Returns:
        PIL Image or Tensor: Solarized image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.solarize(img, threshold)

    return F_t.solarize(img, threshold)


def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
1285
    """Adjust the sharpness of an image.
1286
1287
1288

    Args:
        img (PIL Image or Tensor): Image to be adjusted.
1289
        If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1290
        where ... means it can have an arbitrary number of leading dimensions.
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
        sharpness_factor (float):  How much to adjust the sharpness. Can be
            any non negative number. 0 gives a blurred image, 1 gives the
            original image while 2 increases the sharpness by a factor of 2.

    Returns:
        PIL Image or Tensor: Sharpness adjusted image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_sharpness(img, sharpness_factor)

    return F_t.adjust_sharpness(img, sharpness_factor)


def autocontrast(img: Tensor) -> Tensor:
1305
    """Maximize contrast of an image by remapping its
1306
1307
1308
1309
1310
    pixels per channel so that the lowest becomes black and the lightest
    becomes white.

    Args:
        img (PIL Image or Tensor): Image on which autocontrast is applied.
1311
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1312
1313
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324

    Returns:
        PIL Image or Tensor: An image that was autocontrasted.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.autocontrast(img)

    return F_t.autocontrast(img)


def equalize(img: Tensor) -> Tensor:
1325
    """Equalize the histogram of an image by applying
1326
1327
1328
1329
1330
    a non-linear mapping to the input in order to create a uniform
    distribution of grayscale values in the output.

    Args:
        img (PIL Image or Tensor): Image on which equalize is applied.
1331
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1332
1333
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1334
1335
1336
1337
1338
1339
1340
1341

    Returns:
        PIL Image or Tensor: An image that was equalized.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.equalize(img)

    return F_t.equalize(img)