functional.py 56.3 KB
Newer Older
1
import math
2
3
import numbers
import warnings
4
from enum import Enum
5
6

import numpy as np
vfdev's avatar
vfdev committed
7
from PIL import Image
8
9
10

import torch
from torch import Tensor
11
from typing import List, Tuple, Any, Optional
12

13
14
15
16
17
try:
    import accimage
except ImportError:
    accimage = None

18
19
20
from . import functional_pil as F_pil
from . import functional_tensor as F_t

21

22
class InterpolationMode(Enum):
23
    """Interpolation modes
24
    Available interpolation methods are ``nearest``, ``bilinear``, ``bicubic``, ``box``, ``hamming``, and ``lanczos``.
25
26
27
28
29
30
31
32
33
34
35
    """
    NEAREST = "nearest"
    BILINEAR = "bilinear"
    BICUBIC = "bicubic"
    # For PIL compatibility
    BOX = "box"
    HAMMING = "hamming"
    LANCZOS = "lanczos"


# TODO: Once torchscript supports Enums with staticmethod
36
37
# this can be put into InterpolationMode as staticmethod
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
38
    inverse_modes_mapping = {
39
40
41
42
43
44
        0: InterpolationMode.NEAREST,
        2: InterpolationMode.BILINEAR,
        3: InterpolationMode.BICUBIC,
        4: InterpolationMode.BOX,
        5: InterpolationMode.HAMMING,
        1: InterpolationMode.LANCZOS,
45
46
47
48
49
    }
    return inverse_modes_mapping[i]


pil_modes_mapping = {
50
51
52
53
54
55
    InterpolationMode.NEAREST: 0,
    InterpolationMode.BILINEAR: 2,
    InterpolationMode.BICUBIC: 3,
    InterpolationMode.BOX: 4,
    InterpolationMode.HAMMING: 5,
    InterpolationMode.LANCZOS: 1,
56
57
}

vfdev's avatar
vfdev committed
58
59
60
61
_is_pil_image = F_pil._is_pil_image


def _get_image_size(img: Tensor) -> List[int]:
62
    """Returns image size as [w, h]
vfdev's avatar
vfdev committed
63
64
65
    """
    if isinstance(img, torch.Tensor):
        return F_t._get_image_size(img)
66

vfdev's avatar
vfdev committed
67
    return F_pil._get_image_size(img)
68

vfdev's avatar
vfdev committed
69

70
def _get_image_num_channels(img: Tensor) -> int:
71
72
    """Returns number of image channels
    """
73
74
75
76
77
78
    if isinstance(img, torch.Tensor):
        return F_t._get_image_num_channels(img)

    return F_pil._get_image_num_channels(img)


vfdev's avatar
vfdev committed
79
80
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
81
82
83
    return isinstance(img, np.ndarray)


vfdev's avatar
vfdev committed
84
85
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
86
    return img.ndim in {2, 3}
87
88
89
90


def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
91
    This function does not support torchscript.
92

93
    See :class:`~torchvision.transforms.ToTensor` for more details.
94
95
96
97
98
99
100

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
vfdev's avatar
vfdev committed
101
    if not(F_pil._is_pil_image(pic) or _is_numpy(pic)):
102
103
        raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))

104
105
106
    if _is_numpy(pic) and not _is_numpy_image(pic):
        raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

107
108
    default_float_dtype = torch.get_default_dtype()

109
110
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
111
112
113
        if pic.ndim == 2:
            pic = pic[:, :, None]

114
        img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
115
        # backward compatibility
116
        if isinstance(img, torch.ByteTensor):
117
            return img.to(dtype=default_float_dtype).div(255)
118
119
        else:
            return img
120
121

    if accimage is not None and isinstance(pic, accimage.Image):
122
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
123
        pic.copyto(nppic)
124
        return torch.from_numpy(nppic).to(dtype=default_float_dtype)
125
126

    # handle PIL Image
127
128
129
130
    mode_to_nptype = {'I': np.int32, 'I;16': np.int16, 'F': np.float32}
    img = torch.from_numpy(
        np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True)
    )
131

132
133
    if pic.mode == '1':
        img = 255 * img
134
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
135
    # put it from HWC to CHW format
136
    img = img.permute((2, 0, 1)).contiguous()
137
    if isinstance(img, torch.ByteTensor):
138
        return img.to(dtype=default_float_dtype).div(255)
139
140
141
142
    else:
        return img


143
144
def pil_to_tensor(pic):
    """Convert a ``PIL Image`` to a tensor of the same type.
145
    This function does not support torchscript.
146

vfdev's avatar
vfdev committed
147
    See :class:`~torchvision.transforms.PILToTensor` for more details.
148
149
150
151
152
153
154

    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
155
    if not F_pil._is_pil_image(pic):
156
157
158
        raise TypeError('pic should be PIL Image. Got {}'.format(type(pic)))

    if accimage is not None and isinstance(pic, accimage.Image):
159
160
        # accimage format is always uint8 internally, so always return uint8 here
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
161
162
163
164
165
166
167
168
169
170
171
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
    img = torch.as_tensor(np.asarray(pic))
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


172
173
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly
174
    This function does not support PIL Image.
175
176
177
178
179
180

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
vfdev's avatar
vfdev committed
181
        Tensor: Converted image
182
183
184
185
186
187
188
189
190
191
192
193

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
194
195
196
197
    if not isinstance(image, torch.Tensor):
        raise TypeError('Input img should be Tensor Image')

    return F_t.convert_image_dtype(image, dtype)
198
199


200
def to_pil_image(pic, mode=None):
201
    """Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
202

203
    See :class:`~torchvision.transforms.ToPILImage` for more details.
204
205
206
207
208

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

209
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
210
211
212
213

    Returns:
        PIL Image: Image converted to PIL Image.
    """
Varun Agrawal's avatar
Varun Agrawal committed
214
    if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
215
216
        raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))

Varun Agrawal's avatar
Varun Agrawal committed
217
218
219
220
221
222
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
223
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
224

225
226
227
228
        # check number of channels
        if pic.shape[-3] > 4:
            raise ValueError('pic should not have > 4 channels. Got {} channels.'.format(pic.shape[-3]))

Varun Agrawal's avatar
Varun Agrawal committed
229
230
231
232
233
234
235
236
    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

237
238
239
240
        # check number of channels
        if pic.shape[-1] > 4:
            raise ValueError('pic should not have > 4 channels. Got {} channels.'.format(pic.shape[-1]))

241
    npimg = pic
Varun Agrawal's avatar
Varun Agrawal committed
242
    if isinstance(pic, torch.Tensor):
243
244
245
        if pic.is_floating_point() and mode != 'F':
            pic = pic.mul(255).byte()
        npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
246
247
248
249
250
251
252
253
254
255

    if not isinstance(npimg, np.ndarray):
        raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
                        'not {}'.format(type(npimg)))

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
            expected_mode = 'L'
vfdev's avatar
vfdev committed
256
        elif npimg.dtype == np.int16:
257
            expected_mode = 'I;16'
vfdev's avatar
vfdev committed
258
        elif npimg.dtype == np.int32:
259
260
261
262
263
264
265
266
            expected_mode = 'I'
        elif npimg.dtype == np.float32:
            expected_mode = 'F'
        if mode is not None and mode != expected_mode:
            raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
                             .format(mode, np.dtype, expected_mode))
        mode = expected_mode

surgan12's avatar
surgan12 committed
267
268
269
270
271
272
273
274
    elif npimg.shape[2] == 2:
        permitted_2_channel_modes = ['LA']
        if mode is not None and mode not in permitted_2_channel_modes:
            raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'LA'

275
    elif npimg.shape[2] == 4:
surgan12's avatar
surgan12 committed
276
        permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
        if mode is not None and mode not in permitted_4_channel_modes:
            raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGBA'
    else:
        permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
        if mode is not None and mode not in permitted_3_channel_modes:
            raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGB'

    if mode is None:
        raise TypeError('Input type {} is not supported'.format(npimg.dtype))

    return Image.fromarray(npimg, mode=mode)


295
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
296
    """Normalize a float tensor image with mean and standard deviation.
297
    This transform does not support PIL Image.
298

299
    .. note::
surgan12's avatar
surgan12 committed
300
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
301

302
    See :class:`~torchvision.transforms.Normalize` for more details.
303
304

    Args:
305
        tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
306
        mean (sequence): Sequence of means for each channel.
307
        std (sequence): Sequence of standard deviations for each channel.
308
        inplace(bool,optional): Bool to make this operation inplace.
309
310
311
312

    Returns:
        Tensor: Normalized Tensor image.
    """
313
314
    if not isinstance(tensor, torch.Tensor):
        raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))
315

316
317
318
    if not tensor.is_floating_point():
        raise TypeError('Input tensor should be a float tensor. Got {}.'.format(tensor.dtype))

319
320
    if tensor.ndim < 3:
        raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '
321
                         '{}.'.format(tensor.size()))
322

surgan12's avatar
surgan12 committed
323
324
325
    if not inplace:
        tensor = tensor.clone()

326
327
328
    dtype = tensor.dtype
    mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
329
330
    if (std == 0).any():
        raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
331
    if mean.ndim == 1:
332
        mean = mean.view(-1, 1, 1)
333
    if std.ndim == 1:
334
        std = std.view(-1, 1, 1)
335
    tensor.sub_(mean).div_(std)
336
    return tensor
337
338


339
def resize(img: Tensor, size: List[int], interpolation: InterpolationMode = InterpolationMode.BILINEAR,
340
           max_size: Optional[int] = None, antialias: Optional[bool] = None) -> Tensor:
vfdev's avatar
vfdev committed
341
    r"""Resize the input image to the given size.
342
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
343
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
344

345
346
347
348
    .. warning::
        The output image might be different depending on its type: when downsampling, the interpolation of PIL images
        and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
        in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
349
350
        types. See also below the ``antialias`` parameter, which can help making the output of PIL images and tensors
        closer.
351

352
    Args:
vfdev's avatar
vfdev committed
353
        img (PIL Image or Tensor): Image to be resized.
354
355
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
Vitaliy Chiley's avatar
Vitaliy Chiley committed
356
            the smaller edge of the image will be matched to this number maintaining
357
            the aspect ratio. i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
358
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
359
360
361

            .. note::
                In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
362
363
364
365
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
366
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
367
368
369
370
        max_size (int, optional): The maximum allowed for the longer edge of
            the resized image: if the longer edge of the image is greater
            than ``max_size`` after being resized according to ``size``, then
            the image is resized again so that the longer edge is equal to
371
            ``max_size``. As a result, ``size`` might be overruled, i.e the
372
373
374
            smaller edge may be shorter than ``size``. This is only supported
            if ``size`` is an int (or a sequence of length 1 in torchscript
            mode).
375
        antialias (bool, optional): antialias flag. If ``img`` is PIL Image, the flag is ignored and anti-alias
376
377
378
            is always used. If ``img`` is Tensor, the flag is False by default and can be set to True for
            ``InterpolationMode.BILINEAR`` only mode. This can help making the output for PIL images and tensors
            closer.
379
380
381

            .. warning::
                There is no autodiff support for ``antialias=True`` option with input ``img`` as Tensor.
382
383

    Returns:
vfdev's avatar
vfdev committed
384
        PIL Image or Tensor: Resized image.
385
    """
386
387
388
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
389
390
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
391
392
393
        )
        interpolation = _interpolation_modes_from_int(interpolation)

394
395
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
396

vfdev's avatar
vfdev committed
397
    if not isinstance(img, torch.Tensor):
398
399
400
401
        if antialias is not None and not antialias:
            warnings.warn(
                "Anti-alias option is always applied for PIL Image input. Argument antialias is ignored."
            )
402
        pil_interpolation = pil_modes_mapping[interpolation]
403
        return F_pil.resize(img, size=size, interpolation=pil_interpolation, max_size=max_size)
vfdev's avatar
vfdev committed
404

405
    return F_t.resize(img, size=size, interpolation=interpolation.value, max_size=max_size, antialias=antialias)
406
407
408
409
410
411
412
413


def scale(*args, **kwargs):
    warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                  "please use transforms.Resize instead.")
    return resize(*args, **kwargs)


414
415
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
    r"""Pad the given image on all sides with the given "pad" value.
416
    If the image is torch Tensor, it is expected
417
418
419
    to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
    at most 3 leading dimensions for mode edge,
    and an arbitrary number of leading dimensions for mode constant
420
421

    Args:
422
        img (PIL Image or Tensor): Image to be padded.
423
424
425
        padding (int or sequence): Padding on each border. If a single int is provided this
            is used to pad all borders. If sequence of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a sequence of length 4 is provided
426
            this is the padding for the left, top, right and bottom borders respectively.
427
428
429
430

            .. note::
                In torchscript mode padding as single int is not supported, use a sequence of
                length 1: ``[padding, ]``.
431
432
433
434
435
        fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
            Only number is supported for torch Tensor.
            Only int or str or tuple value is supported for PIL Image.
436
437
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
            Default is constant.
438
439
440

            - constant: pads with a constant value, this value is specified with fill

441
442
            - edge: pads with the last value at the edge of the image.
              If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
443

444
445
446
            - reflect: pads with reflection of image without repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
              will result in [3, 2, 1, 2, 3, 4, 3, 2]
447

448
449
450
            - symmetric: pads with reflection of image repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
              will result in [2, 1, 1, 2, 3, 4, 4, 3]
451
452

    Returns:
453
        PIL Image or Tensor: Padded image.
454
    """
455
456
    if not isinstance(img, torch.Tensor):
        return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
457

458
    return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
459
460


vfdev's avatar
vfdev committed
461
462
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
    """Crop the given image at specified location and output size.
463
    If the image is torch Tensor, it is expected
464
465
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then cropped.
466

467
    Args:
vfdev's avatar
vfdev committed
468
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
469
470
471
472
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
473

474
    Returns:
vfdev's avatar
vfdev committed
475
        PIL Image or Tensor: Cropped image.
476
477
    """

vfdev's avatar
vfdev committed
478
479
    if not isinstance(img, torch.Tensor):
        return F_pil.crop(img, top, left, height, width)
480

vfdev's avatar
vfdev committed
481
    return F_t.crop(img, top, left, height, width)
482

vfdev's avatar
vfdev committed
483
484
485

def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
    """Crops the given image at the center.
486
    If the image is torch Tensor, it is expected
487
488
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
489

490
    Args:
vfdev's avatar
vfdev committed
491
        img (PIL Image or Tensor): Image to be cropped.
492
        output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
vfdev's avatar
vfdev committed
493
494
            it is used for both directions.

495
    Returns:
vfdev's avatar
vfdev committed
496
        PIL Image or Tensor: Cropped image.
497
    """
498
499
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
vfdev's avatar
vfdev committed
500
501
502
503
    elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
        output_size = (output_size[0], output_size[0])

    image_width, image_height = _get_image_size(img)
504
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
505

506
507
508
509
510
511
512
513
514
515
516
517
    if crop_width > image_width or crop_height > image_height:
        padding_ltrb = [
            (crop_width - image_width) // 2 if crop_width > image_width else 0,
            (crop_height - image_height) // 2 if crop_height > image_height else 0,
            (crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
            (crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
        ]
        img = pad(img, padding_ltrb, fill=0)  # PIL uses fill value 0
        image_width, image_height = _get_image_size(img)
        if crop_width == image_width and crop_height == image_height:
            return img

518
519
    crop_top = int(round((image_height - crop_height) / 2.))
    crop_left = int(round((image_width - crop_width) / 2.))
520
    return crop(img, crop_top, crop_left, crop_height, crop_width)
521
522


523
def resized_crop(
524
        img: Tensor, top: int, left: int, height: int, width: int, size: List[int],
525
        interpolation: InterpolationMode = InterpolationMode.BILINEAR
526
527
) -> Tensor:
    """Crop the given image and resize it to desired size.
528
    If the image is torch Tensor, it is expected
529
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
530

531
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
532
533

    Args:
534
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
535
536
537
538
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
539
        size (sequence or int): Desired output size. Same semantics as ``resize``.
540
541
542
543
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
544
545
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.

546
    Returns:
547
        PIL Image or Tensor: Cropped image.
548
    """
549
    img = crop(img, top, left, height, width)
550
551
552
553
    img = resize(img, size, interpolation)
    return img


554
def hflip(img: Tensor) -> Tensor:
555
    """Horizontally flip the given image.
556
557

    Args:
vfdev's avatar
vfdev committed
558
        img (PIL Image or Tensor): Image to be flipped. If img
559
            is a Tensor, it is expected to be in [..., H, W] format,
560
            where ... means it can have an arbitrary number of leading
561
            dimensions.
562
563

    Returns:
vfdev's avatar
vfdev committed
564
        PIL Image or Tensor:  Horizontally flipped image.
565
    """
566
567
    if not isinstance(img, torch.Tensor):
        return F_pil.hflip(img)
568

569
    return F_t.hflip(img)
570
571


572
573
574
def _get_perspective_coeffs(
        startpoints: List[List[int]], endpoints: List[List[int]]
) -> List[float]:
575
576
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

Vitaliy Chiley's avatar
Vitaliy Chiley committed
577
    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
578
579
580
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
581
582
583
584
585
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.

586
587
588
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
589
590
591
592
593
    a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float)

    for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
        a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
594

595
    b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8)
596
    res = torch.linalg.lstsq(a_matrix, b_matrix, driver='gels').solution
597

598
    output: List[float] = res.tolist()
599
    return output
600
601


602
603
604
605
def perspective(
        img: Tensor,
        startpoints: List[List[int]],
        endpoints: List[List[int]],
606
        interpolation: InterpolationMode = InterpolationMode.BILINEAR,
607
        fill: Optional[List[float]] = None
608
609
) -> Tensor:
    """Perform perspective transform of the given image.
610
    If the image is torch Tensor, it is expected
611
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
612
613

    Args:
614
615
616
617
618
        img (PIL Image or Tensor): Image to be transformed.
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
619
620
621
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
622
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
623
624
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
625
626
627
628

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
629

630
    Returns:
631
        PIL Image or Tensor: transformed Image.
632
    """
633

634
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
635

636
637
638
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
639
640
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
641
642
643
        )
        interpolation = _interpolation_modes_from_int(interpolation)

644
645
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
646

647
    if not isinstance(img, torch.Tensor):
648
649
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill)
650

651
    return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill)
652
653


654
def vflip(img: Tensor) -> Tensor:
655
    """Vertically flip the given image.
656
657

    Args:
vfdev's avatar
vfdev committed
658
        img (PIL Image or Tensor): Image to be flipped. If img
659
            is a Tensor, it is expected to be in [..., H, W] format,
660
            where ... means it can have an arbitrary number of leading
661
            dimensions.
662
663

    Returns:
664
        PIL Image or Tensor:  Vertically flipped image.
665
    """
666
667
    if not isinstance(img, torch.Tensor):
        return F_pil.vflip(img)
668

669
    return F_t.vflip(img)
670
671


vfdev's avatar
vfdev committed
672
673
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
    """Crop the given image into four corners and the central crop.
674
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
675
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
676
677
678
679
680
681

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
682
683
684
        img (PIL Image or Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
685
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
686

687
    Returns:
688
       tuple: tuple (tl, tr, bl, br, center)
689
       Corresponding top left, top right, bottom left, bottom right and center crop.
690
691
692
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
693
694
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])
695

vfdev's avatar
vfdev committed
696
697
698
699
    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")

    image_width, image_height = _get_image_size(img)
700
701
702
703
704
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

vfdev's avatar
vfdev committed
705
706
707
708
709
710
711
712
    tl = crop(img, 0, 0, crop_height, crop_width)
    tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
    bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
    br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)

    center = center_crop(img, [crop_height, crop_width])

    return tl, tr, bl, br, center
713
714


vfdev's avatar
vfdev committed
715
716
717
def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]:
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
718
    flipped version of these (horizontal flipping is used by default).
719
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
720
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
721
722
723
724
725

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

726
    Args:
vfdev's avatar
vfdev committed
727
        img (PIL Image or Tensor): Image to be cropped.
728
        size (sequence or int): Desired output size of the crop. If size is an
729
            int instead of sequence like (h, w), a square crop (size, size) is
730
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
731
        vertical_flip (bool): Use vertical flipping instead of horizontal
732
733

    Returns:
734
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
735
736
        Corresponding top left, top right, bottom left, bottom right and
        center crop and same for the flipped image.
737
738
739
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
740
741
742
743
744
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])

    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")
745
746
747
748
749
750
751
752
753
754
755
756

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


757
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
758
    """Adjust brightness of an image.
759
760

    Args:
vfdev's avatar
vfdev committed
761
        img (PIL Image or Tensor): Image to be adjusted.
762
763
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
764
765
766
767
768
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
769
        PIL Image or Tensor: Brightness adjusted image.
770
    """
771
772
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_brightness(img, brightness_factor)
773

774
    return F_t.adjust_brightness(img, brightness_factor)
775
776


777
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
778
    """Adjust contrast of an image.
779
780

    Args:
vfdev's avatar
vfdev committed
781
        img (PIL Image or Tensor): Image to be adjusted.
782
783
            If img is torch Tensor, it is expected to be in [..., 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
784
785
786
787
788
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
789
        PIL Image or Tensor: Contrast adjusted image.
790
    """
791
792
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_contrast(img, contrast_factor)
793

794
    return F_t.adjust_contrast(img, contrast_factor)
795
796


797
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
798
799
800
    """Adjust color saturation of an image.

    Args:
vfdev's avatar
vfdev committed
801
        img (PIL Image or Tensor): Image to be adjusted.
802
803
            If img is torch Tensor, it is expected to be in [..., 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
804
805
806
807
808
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
809
        PIL Image or Tensor: Saturation adjusted image.
810
    """
811
812
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_saturation(img, saturation_factor)
813

814
    return F_t.adjust_saturation(img, saturation_factor)
815
816


817
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
818
819
820
821
822
823
824
825
826
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

827
828
829
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
830
831

    Args:
832
        img (PIL Image or Tensor): Image to be adjusted.
833
834
835
            If img is torch Tensor, it is expected to be in [..., 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image mode "1", "L", "I", "F" and modes with transparency (alpha channel) are not supported.
836
837
838
839
840
841
842
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
843
        PIL Image or Tensor: Hue adjusted image.
844
    """
845
846
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_hue(img, hue_factor)
847

848
    return F_t.adjust_hue(img, hue_factor)
849
850


851
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
852
    r"""Perform gamma correction on an image.
853
854
855
856

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

857
858
859
860
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
861

862
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
863
864

    Args:
865
        img (PIL Image or Tensor): PIL Image to be adjusted.
866
867
868
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, modes with transparency (alpha channel) are not supported.
869
870
871
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
872
        gain (float): The constant multiplier.
873
874
    Returns:
        PIL Image or Tensor: Gamma correction adjusted image.
875
    """
876
877
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_gamma(img, gamma, gain)
878

879
    return F_t.adjust_gamma(img, gamma, gain)
880
881


vfdev's avatar
vfdev committed
882
def _get_inverse_affine_matrix(
vfdev's avatar
vfdev committed
883
        center: List[float], angle: float, translate: List[float], scale: float, shear: List[float]
vfdev's avatar
vfdev committed
884
) -> List[float]:
885
886
887
888
889
890
891
    # Helper method to compute inverse matrix for affine transformation

    # As it is explained in PIL.Image.rotate
    # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
    #       RSS is rotation with scale and shear matrix
892
893
894
895
896
897
898
899
900
901
    #       RSS(a, s, (sx, sy)) =
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ]
    #         [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ]
    #         [ 0                    , 0                                      , 1 ]
    #
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
902
903
    # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1

904
905
906
907
908
909
910
    rot = math.radians(angle)
    sx, sy = [math.radians(s) for s in shear]

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
vfdev's avatar
vfdev committed
911
912
913
914
    a = math.cos(rot - sy) / math.cos(sy)
    b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
    c = math.sin(rot - sy) / math.cos(sy)
    d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
915
916

    # Inverted rotation matrix with scale and shear
917
    # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
vfdev's avatar
vfdev committed
918
919
    matrix = [d, -b, 0.0, -c, a, 0.0]
    matrix = [x / scale for x in matrix]
920
921

    # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
vfdev's avatar
vfdev committed
922
923
    matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
    matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
924
925

    # Apply center translation: C * RSS^-1 * C^-1 * T^-1
vfdev's avatar
vfdev committed
926
927
    matrix[2] += cx
    matrix[5] += cy
928

vfdev's avatar
vfdev committed
929
    return matrix
930

vfdev's avatar
vfdev committed
931

vfdev's avatar
vfdev committed
932
def rotate(
933
        img: Tensor, angle: float, interpolation: InterpolationMode = InterpolationMode.NEAREST,
934
        expand: bool = False, center: Optional[List[int]] = None,
935
        fill: Optional[List[float]] = None, resample: Optional[int] = None
vfdev's avatar
vfdev committed
936
937
) -> Tensor:
    """Rotate the image by angle.
938
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
939
940
941
942
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.

    Args:
        img (PIL Image or Tensor): image to be rotated.
943
        angle (number): rotation angle value in degrees, counter-clockwise.
944
945
946
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
947
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
vfdev's avatar
vfdev committed
948
949
950
951
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
952
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
vfdev's avatar
vfdev committed
953
            Default is the center of the image.
954
955
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
956
957
958
959

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
vfdev's avatar
vfdev committed
960
961
962
963
964
965
966

    Returns:
        PIL Image or Tensor: Rotated image.

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

    """
967
968
969
970
971
972
973
974
975
    if resample is not None:
        warnings.warn(
            "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
        )
        interpolation = _interpolation_modes_from_int(resample)

    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
976
977
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
978
979
980
        )
        interpolation = _interpolation_modes_from_int(interpolation)

vfdev's avatar
vfdev committed
981
982
983
984
985
986
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

987
988
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
989

vfdev's avatar
vfdev committed
990
    if not isinstance(img, torch.Tensor):
991
992
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill)
vfdev's avatar
vfdev committed
993
994
995
996

    center_f = [0.0, 0.0]
    if center is not None:
        img_size = _get_image_size(img)
997
998
999
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, img_size)]

vfdev's avatar
vfdev committed
1000
1001
1002
    # due to current incoherence of rotation angle direction between affine and rotate implementations
    # we need to set -angle.
    matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
1003
    return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill)
vfdev's avatar
vfdev committed
1004
1005


vfdev's avatar
vfdev committed
1006
1007
def affine(
        img: Tensor, angle: float, translate: List[int], scale: float, shear: List[float],
1008
1009
        interpolation: InterpolationMode = InterpolationMode.NEAREST, fill: Optional[List[float]] = None,
        resample: Optional[int] = None, fillcolor: Optional[List[float]] = None
vfdev's avatar
vfdev committed
1010
1011
) -> Tensor:
    """Apply affine transformation on the image keeping image center invariant.
1012
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1013
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1014
1015

    Args:
vfdev's avatar
vfdev committed
1016
        img (PIL Image or Tensor): image to transform.
1017
1018
        angle (number): rotation angle in degrees between -180 and 180, clockwise direction.
        translate (sequence of integers): horizontal and vertical translations (post-rotation translation)
1019
        scale (float): overall scale
1020
1021
        shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction.
            If a sequence is specified, the first value corresponds to a shear parallel to the x axis, while
vfdev's avatar
vfdev committed
1022
            the second value corresponds to a shear parallel to the y axis.
1023
1024
1025
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1026
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
1027
1028
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
1029
1030
1031
1032

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
1033
        fillcolor (sequence, int, float): deprecated argument and will be removed since v0.10.0.
1034
            Please use the ``fill`` parameter instead.
1035
        resample (int, optional): deprecated argument and will be removed since v0.10.0.
1036
            Please use the ``interpolation`` parameter instead.
vfdev's avatar
vfdev committed
1037
1038
1039

    Returns:
        PIL Image or Tensor: Transformed image.
1040
    """
1041
1042
1043
1044
1045
1046
1047
1048
1049
    if resample is not None:
        warnings.warn(
            "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
        )
        interpolation = _interpolation_modes_from_int(resample)

    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
1050
1051
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
1052
1053
1054
1055
1056
1057
1058
1059
1060
        )
        interpolation = _interpolation_modes_from_int(interpolation)

    if fillcolor is not None:
        warnings.warn(
            "Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead"
        )
        fill = fillcolor

vfdev's avatar
vfdev committed
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if not isinstance(translate, (list, tuple)):
        raise TypeError("Argument translate should be a sequence")

    if len(translate) != 2:
        raise ValueError("Argument translate should be a sequence of length 2")

    if scale <= 0.0:
        raise ValueError("Argument scale should be positive")

    if not isinstance(shear, (numbers.Number, (list, tuple))):
        raise TypeError("Shear should be either a single value or a sequence of two values")

1076
1077
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
1078

vfdev's avatar
vfdev committed
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
    if isinstance(angle, int):
        angle = float(angle)

    if isinstance(translate, tuple):
        translate = list(translate)

    if isinstance(shear, numbers.Number):
        shear = [shear, 0.0]

    if isinstance(shear, tuple):
        shear = list(shear)

    if len(shear) == 1:
        shear = [shear[0], shear[0]]

    if len(shear) != 2:
        raise ValueError("Shear should be a sequence containing two values. Got {}".format(shear))

    img_size = _get_image_size(img)
    if not isinstance(img, torch.Tensor):
        # center = (img_size[0] * 0.5 + 0.5, img_size[1] * 0.5 + 0.5)
        # it is visually better to estimate the center without 0.5 offset
        # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
        center = [img_size[0] * 0.5, img_size[1] * 0.5]
        matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
1104
1105
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill)
1106

1107
1108
    translate_f = [1.0 * t for t in translate]
    matrix = _get_inverse_affine_matrix([0.0, 0.0], angle, translate_f, scale, shear)
1109
    return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill)
1110
1111


1112
@torch.jit.unused
1113
def to_grayscale(img, num_output_channels=1):
1114
    """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
1115
    This transform does not support torch Tensor.
1116
1117

    Args:
1118
        img (PIL Image): PIL Image to be converted to grayscale.
1119
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1.
1120
1121

    Returns:
1122
1123
        PIL Image: Grayscale version of the image.

1124
1125
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1126
    """
1127
1128
    if isinstance(img, Image.Image):
        return F_pil.to_grayscale(img, num_output_channels)
1129

1130
1131
1132
1133
1134
    raise TypeError("Input should be PIL Image")


def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    """Convert RGB image to grayscale version of image.
1135
1136
    If the image is torch Tensor, it is expected
    to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148

    Note:
        Please, note that this method supports only RGB images as input. For inputs in other color spaces,
        please, consider using meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.

    Args:
        img (PIL Image or Tensor): RGB Image to be converted to grayscale.
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.

    Returns:
        PIL Image or Tensor: Grayscale version of the image.

1149
1150
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1151
1152
1153
1154
1155
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.to_grayscale(img, num_output_channels)

    return F_t.rgb_to_grayscale(img, num_output_channels)
1156
1157


1158
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
1159
    """ Erase the input Tensor Image with given value.
1160
    This transform does not support PIL Image.
1161
1162
1163
1164
1165
1166
1167
1168

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
Zhun Zhong's avatar
Zhun Zhong committed
1169
        inplace(bool, optional): For in-place operations. By default is set False.
1170
1171
1172
1173
1174
1175
1176

    Returns:
        Tensor Image: Erased image.
    """
    if not isinstance(img, torch.Tensor):
        raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))

1177
1178
1179
    if not inplace:
        img = img.clone()

vfdev's avatar
vfdev committed
1180
    img[..., i:i + h, j:j + w] = v
1181
    return img
1182
1183
1184


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
1185
1186
1187
    """Performs Gaussian blurring on the image by given kernel.
    If the image is torch Tensor, it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1188
1189
1190
1191
1192

    Args:
        img (PIL Image or Tensor): Image to be blurred
        kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
            like ``(kx, ky)`` or a single integer for square kernels.
1193
1194
1195
1196

            .. note::
                In torchscript mode kernel_size as single int is not supported, use a sequence of
                length 1: ``[ksize, ]``.
1197
1198
1199
1200
        sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
            sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
            same sigma in both X/Y directions. If None, then it is computed using
            ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
1201
1202
1203
1204
1205
            Default, None.

            .. note::
                In torchscript mode sigma as single float is
                not supported, use a sequence of length 1: ``[sigma, ]``.
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246

    Returns:
        PIL Image or Tensor: Gaussian Blurred version of the image.
    """
    if not isinstance(kernel_size, (int, list, tuple)):
        raise TypeError('kernel_size should be int or a sequence of integers. Got {}'.format(type(kernel_size)))
    if isinstance(kernel_size, int):
        kernel_size = [kernel_size, kernel_size]
    if len(kernel_size) != 2:
        raise ValueError('If kernel_size is a sequence its length should be 2. Got {}'.format(len(kernel_size)))
    for ksize in kernel_size:
        if ksize % 2 == 0 or ksize < 0:
            raise ValueError('kernel_size should have odd and positive integers. Got {}'.format(kernel_size))

    if sigma is None:
        sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]

    if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
        raise TypeError('sigma should be either float or sequence of floats. Got {}'.format(type(sigma)))
    if isinstance(sigma, (int, float)):
        sigma = [float(sigma), float(sigma)]
    if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
        sigma = [sigma[0], sigma[0]]
    if len(sigma) != 2:
        raise ValueError('If sigma is a sequence, its length should be 2. Got {}'.format(len(sigma)))
    for s in sigma:
        if s <= 0.:
            raise ValueError('sigma should have positive values. Got {}'.format(sigma))

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
            raise TypeError('img should be PIL Image or Tensor. Got {}'.format(type(img)))

        t_img = to_tensor(img)

    output = F_t.gaussian_blur(t_img, kernel_size, sigma)

    if not isinstance(img, torch.Tensor):
        output = to_pil_image(output)
    return output
1247
1248
1249


def invert(img: Tensor) -> Tensor:
1250
    """Invert the colors of an RGB/grayscale image.
1251
1252
1253

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1254
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1255
1256
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267

    Returns:
        PIL Image or Tensor: Color inverted image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.invert(img)

    return F_t.invert(img)


def posterize(img: Tensor, bits: int) -> Tensor:
1268
    """Posterize an image by reducing the number of bits for each color channel.
1269
1270
1271

    Args:
        img (PIL Image or Tensor): Image to have its colors posterized.
1272
            If img is torch Tensor, it should be of type torch.uint8 and
1273
1274
1275
            it is expected to be in [..., 1 or 3, H, W] format, where ... means
            it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
        bits (int): The number of bits to keep for each channel (0-8).
    Returns:
        PIL Image or Tensor: Posterized image.
    """
    if not (0 <= bits <= 8):
        raise ValueError('The number if bits should be between 0 and 8. Got {}'.format(bits))

    if not isinstance(img, torch.Tensor):
        return F_pil.posterize(img, bits)

    return F_t.posterize(img, bits)


def solarize(img: Tensor, threshold: float) -> Tensor:
1290
    """Solarize an RGB/grayscale image by inverting all pixel values above a threshold.
1291
1292
1293

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1294
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1295
1296
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
        threshold (float): All pixels equal or above this value are inverted.
    Returns:
        PIL Image or Tensor: Solarized image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.solarize(img, threshold)

    return F_t.solarize(img, threshold)


def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
1308
    """Adjust the sharpness of an image.
1309
1310
1311

    Args:
        img (PIL Image or Tensor): Image to be adjusted.
1312
1313
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
        sharpness_factor (float):  How much to adjust the sharpness. Can be
            any non negative number. 0 gives a blurred image, 1 gives the
            original image while 2 increases the sharpness by a factor of 2.

    Returns:
        PIL Image or Tensor: Sharpness adjusted image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_sharpness(img, sharpness_factor)

    return F_t.adjust_sharpness(img, sharpness_factor)


def autocontrast(img: Tensor) -> Tensor:
1328
    """Maximize contrast of an image by remapping its
1329
1330
1331
1332
1333
    pixels per channel so that the lowest becomes black and the lightest
    becomes white.

    Args:
        img (PIL Image or Tensor): Image on which autocontrast is applied.
1334
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1335
1336
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347

    Returns:
        PIL Image or Tensor: An image that was autocontrasted.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.autocontrast(img)

    return F_t.autocontrast(img)


def equalize(img: Tensor) -> Tensor:
1348
    """Equalize the histogram of an image by applying
1349
1350
1351
1352
1353
    a non-linear mapping to the input in order to create a uniform
    distribution of grayscale values in the output.

    Args:
        img (PIL Image or Tensor): Image on which equalize is applied.
1354
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1355
            where ... means it can have an arbitrary number of leading dimensions.
1356
            The tensor dtype must be ``torch.uint8`` and values are expected to be in ``[0, 255]``.
1357
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1358
1359
1360
1361
1362
1363
1364
1365

    Returns:
        PIL Image or Tensor: An image that was equalized.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.equalize(img)

    return F_t.equalize(img)