functional.py 56.6 KB
Newer Older
1
import math
2
3
import numbers
import warnings
4
from enum import Enum
5
from typing import List, Tuple, Any, Optional
6
7
8

import numpy as np
import torch
9
from PIL import Image
10
11
from torch import Tensor

12
13
14
15
16
try:
    import accimage
except ImportError:
    accimage = None

17
18
19
from . import functional_pil as F_pil
from . import functional_tensor as F_t

20

21
class InterpolationMode(Enum):
22
    """Interpolation modes
23
    Available interpolation methods are ``nearest``, ``bilinear``, ``bicubic``, ``box``, ``hamming``, and ``lanczos``.
24
    """
25

26
27
28
29
30
31
32
33
34
35
    NEAREST = "nearest"
    BILINEAR = "bilinear"
    BICUBIC = "bicubic"
    # For PIL compatibility
    BOX = "box"
    HAMMING = "hamming"
    LANCZOS = "lanczos"


# TODO: Once torchscript supports Enums with staticmethod
36
37
# this can be put into InterpolationMode as staticmethod
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
38
    inverse_modes_mapping = {
39
40
41
42
43
44
        0: InterpolationMode.NEAREST,
        2: InterpolationMode.BILINEAR,
        3: InterpolationMode.BICUBIC,
        4: InterpolationMode.BOX,
        5: InterpolationMode.HAMMING,
        1: InterpolationMode.LANCZOS,
45
46
47
48
49
    }
    return inverse_modes_mapping[i]


pil_modes_mapping = {
50
51
52
53
54
55
    InterpolationMode.NEAREST: 0,
    InterpolationMode.BILINEAR: 2,
    InterpolationMode.BICUBIC: 3,
    InterpolationMode.BOX: 4,
    InterpolationMode.HAMMING: 5,
    InterpolationMode.LANCZOS: 1,
56
57
}

vfdev's avatar
vfdev committed
58
59
60
_is_pil_image = F_pil._is_pil_image


61
62
63
64
65
66
67
68
def get_image_size(img: Tensor) -> List[int]:
    """Returns the size of an image as [width, height].

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        List[int]: The image size.
vfdev's avatar
vfdev committed
69
70
    """
    if isinstance(img, torch.Tensor):
71
        return F_t.get_image_size(img)
72

73
    return F_pil.get_image_size(img)
74

vfdev's avatar
vfdev committed
75

76
77
78
79
80
81
82
83
def get_image_num_channels(img: Tensor) -> int:
    """Returns the number of channels of an image.

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        int: The number of channels.
84
    """
85
    if isinstance(img, torch.Tensor):
86
        return F_t.get_image_num_channels(img)
87

88
    return F_pil.get_image_num_channels(img)
89
90


vfdev's avatar
vfdev committed
91
92
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
93
94
95
    return isinstance(img, np.ndarray)


vfdev's avatar
vfdev committed
96
97
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
98
    return img.ndim in {2, 3}
99
100
101
102


def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
103
    This function does not support torchscript.
104

105
    See :class:`~torchvision.transforms.ToTensor` for more details.
106
107
108
109
110
111
112

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
113
114
    if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
        raise TypeError("pic should be PIL Image or ndarray. Got {}".format(type(pic)))
115

116
    if _is_numpy(pic) and not _is_numpy_image(pic):
117
        raise ValueError("pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndim))
118

119
120
    default_float_dtype = torch.get_default_dtype()

121
122
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
123
124
125
        if pic.ndim == 2:
            pic = pic[:, :, None]

126
        img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
127
        # backward compatibility
128
        if isinstance(img, torch.ByteTensor):
129
            return img.to(dtype=default_float_dtype).div(255)
130
131
        else:
            return img
132
133

    if accimage is not None and isinstance(pic, accimage.Image):
134
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
135
        pic.copyto(nppic)
136
        return torch.from_numpy(nppic).to(dtype=default_float_dtype)
137
138

    # handle PIL Image
139
140
    mode_to_nptype = {"I": np.int32, "I;16": np.int16, "F": np.float32}
    img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True))
141

142
    if pic.mode == "1":
143
        img = 255 * img
144
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
145
    # put it from HWC to CHW format
146
    img = img.permute((2, 0, 1)).contiguous()
147
    if isinstance(img, torch.ByteTensor):
148
        return img.to(dtype=default_float_dtype).div(255)
149
150
151
152
    else:
        return img


153
154
def pil_to_tensor(pic):
    """Convert a ``PIL Image`` to a tensor of the same type.
155
    This function does not support torchscript.
156

vfdev's avatar
vfdev committed
157
    See :class:`~torchvision.transforms.PILToTensor` for more details.
158

159
160
161
162
    .. note::

        A deep copy of the underlying array is performed.

163
164
165
166
167
168
    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
169
    if not F_pil._is_pil_image(pic):
170
        raise TypeError("pic should be PIL Image. Got {}".format(type(pic)))
171
172

    if accimage is not None and isinstance(pic, accimage.Image):
173
174
        # accimage format is always uint8 internally, so always return uint8 here
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
175
176
177
178
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
179
    img = torch.as_tensor(np.array(pic, copy=True))
180
181
182
183
184
185
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


186
187
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly
188
    This function does not support PIL Image.
189
190
191
192
193
194

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
vfdev's avatar
vfdev committed
195
        Tensor: Converted image
196
197
198
199
200
201
202
203
204
205
206
207

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
208
    if not isinstance(image, torch.Tensor):
209
        raise TypeError("Input img should be Tensor Image")
210
211

    return F_t.convert_image_dtype(image, dtype)
212
213


214
def to_pil_image(pic, mode=None):
215
    """Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
216

217
    See :class:`~torchvision.transforms.ToPILImage` for more details.
218
219
220
221
222

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

223
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
224
225
226
227

    Returns:
        PIL Image: Image converted to PIL Image.
    """
228
229
    if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
        raise TypeError("pic should be Tensor or ndarray. Got {}.".format(type(pic)))
230

Varun Agrawal's avatar
Varun Agrawal committed
231
232
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
233
            raise ValueError("pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndimension()))
Varun Agrawal's avatar
Varun Agrawal committed
234
235
236

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
237
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
238

239
240
        # check number of channels
        if pic.shape[-3] > 4:
241
            raise ValueError("pic should not have > 4 channels. Got {} channels.".format(pic.shape[-3]))
242

Varun Agrawal's avatar
Varun Agrawal committed
243
244
    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
245
            raise ValueError("pic should be 2/3 dimensional. Got {} dimensions.".format(pic.ndim))
Varun Agrawal's avatar
Varun Agrawal committed
246
247
248
249
250

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

251
252
        # check number of channels
        if pic.shape[-1] > 4:
253
            raise ValueError("pic should not have > 4 channels. Got {} channels.".format(pic.shape[-1]))
254

255
    npimg = pic
Varun Agrawal's avatar
Varun Agrawal committed
256
    if isinstance(pic, torch.Tensor):
257
        if pic.is_floating_point() and mode != "F":
258
259
            pic = pic.mul(255).byte()
        npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
260
261

    if not isinstance(npimg, np.ndarray):
262
        raise TypeError("Input pic must be a torch.Tensor or NumPy ndarray, " + "not {}".format(type(npimg)))
263
264
265
266
267

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
268
            expected_mode = "L"
vfdev's avatar
vfdev committed
269
        elif npimg.dtype == np.int16:
270
            expected_mode = "I;16"
vfdev's avatar
vfdev committed
271
        elif npimg.dtype == np.int32:
272
            expected_mode = "I"
273
        elif npimg.dtype == np.float32:
274
            expected_mode = "F"
275
        if mode is not None and mode != expected_mode:
276
277
278
            raise ValueError(
                "Incorrect mode ({}) supplied for input type {}. Should be {}".format(mode, np.dtype, expected_mode)
            )
279
280
        mode = expected_mode

surgan12's avatar
surgan12 committed
281
    elif npimg.shape[2] == 2:
282
        permitted_2_channel_modes = ["LA"]
surgan12's avatar
surgan12 committed
283
284
285
286
        if mode is not None and mode not in permitted_2_channel_modes:
            raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
287
            mode = "LA"
surgan12's avatar
surgan12 committed
288

289
    elif npimg.shape[2] == 4:
290
        permitted_4_channel_modes = ["RGBA", "CMYK", "RGBX"]
291
292
293
294
        if mode is not None and mode not in permitted_4_channel_modes:
            raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
295
            mode = "RGBA"
296
    else:
297
        permitted_3_channel_modes = ["RGB", "YCbCr", "HSV"]
298
299
300
        if mode is not None and mode not in permitted_3_channel_modes:
            raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
        if mode is None and npimg.dtype == np.uint8:
301
            mode = "RGB"
302
303

    if mode is None:
304
        raise TypeError("Input type {} is not supported".format(npimg.dtype))
305
306
307
308

    return Image.fromarray(npimg, mode=mode)


309
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
310
    """Normalize a float tensor image with mean and standard deviation.
311
    This transform does not support PIL Image.
312

313
    .. note::
surgan12's avatar
surgan12 committed
314
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
315

316
    See :class:`~torchvision.transforms.Normalize` for more details.
317
318

    Args:
319
        tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
320
        mean (sequence): Sequence of means for each channel.
321
        std (sequence): Sequence of standard deviations for each channel.
322
        inplace(bool,optional): Bool to make this operation inplace.
323
324
325
326

    Returns:
        Tensor: Normalized Tensor image.
    """
327
    if not isinstance(tensor, torch.Tensor):
328
        raise TypeError("Input tensor should be a torch tensor. Got {}.".format(type(tensor)))
329

330
    if not tensor.is_floating_point():
331
        raise TypeError("Input tensor should be a float tensor. Got {}.".format(tensor.dtype))
332

333
    if tensor.ndim < 3:
334
335
336
337
        raise ValueError(
            "Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = "
            "{}.".format(tensor.size())
        )
338

surgan12's avatar
surgan12 committed
339
340
341
    if not inplace:
        tensor = tensor.clone()

342
343
344
    dtype = tensor.dtype
    mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
345
    if (std == 0).any():
346
        raise ValueError("std evaluated to zero after conversion to {}, leading to division by zero.".format(dtype))
347
    if mean.ndim == 1:
348
        mean = mean.view(-1, 1, 1)
349
    if std.ndim == 1:
350
        std = std.view(-1, 1, 1)
351
    tensor.sub_(mean).div_(std)
352
    return tensor
353
354


355
356
357
358
359
360
361
def resize(
    img: Tensor,
    size: List[int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    max_size: Optional[int] = None,
    antialias: Optional[bool] = None,
) -> Tensor:
vfdev's avatar
vfdev committed
362
    r"""Resize the input image to the given size.
363
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
364
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
365

366
367
368
369
    .. warning::
        The output image might be different depending on its type: when downsampling, the interpolation of PIL images
        and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
        in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
370
371
        types. See also below the ``antialias`` parameter, which can help making the output of PIL images and tensors
        closer.
372

373
    Args:
vfdev's avatar
vfdev committed
374
        img (PIL Image or Tensor): Image to be resized.
375
376
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
Vitaliy Chiley's avatar
Vitaliy Chiley committed
377
            the smaller edge of the image will be matched to this number maintaining
378
            the aspect ratio. i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
379
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
380
381
382

            .. note::
                In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
383
384
385
386
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
387
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
388
389
390
391
        max_size (int, optional): The maximum allowed for the longer edge of
            the resized image: if the longer edge of the image is greater
            than ``max_size`` after being resized according to ``size``, then
            the image is resized again so that the longer edge is equal to
392
            ``max_size``. As a result, ``size`` might be overruled, i.e the
393
394
395
            smaller edge may be shorter than ``size``. This is only supported
            if ``size`` is an int (or a sequence of length 1 in torchscript
            mode).
396
        antialias (bool, optional): antialias flag. If ``img`` is PIL Image, the flag is ignored and anti-alias
397
398
399
            is always used. If ``img`` is Tensor, the flag is False by default and can be set to True for
            ``InterpolationMode.BILINEAR`` only mode. This can help making the output for PIL images and tensors
            closer.
400
401
402

            .. warning::
                There is no autodiff support for ``antialias=True`` option with input ``img`` as Tensor.
403
404

    Returns:
vfdev's avatar
vfdev committed
405
        PIL Image or Tensor: Resized image.
406
    """
407
408
409
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
410
411
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
412
413
414
        )
        interpolation = _interpolation_modes_from_int(interpolation)

415
416
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
417

vfdev's avatar
vfdev committed
418
    if not isinstance(img, torch.Tensor):
419
        if antialias is not None and not antialias:
420
            warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.")
421
        pil_interpolation = pil_modes_mapping[interpolation]
422
        return F_pil.resize(img, size=size, interpolation=pil_interpolation, max_size=max_size)
vfdev's avatar
vfdev committed
423

424
    return F_t.resize(img, size=size, interpolation=interpolation.value, max_size=max_size, antialias=antialias)
425
426
427


def scale(*args, **kwargs):
428
    warnings.warn("The use of the transforms.Scale transform is deprecated, " + "please use transforms.Resize instead.")
429
430
431
    return resize(*args, **kwargs)


432
433
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
    r"""Pad the given image on all sides with the given "pad" value.
434
    If the image is torch Tensor, it is expected
435
436
437
    to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
    at most 3 leading dimensions for mode edge,
    and an arbitrary number of leading dimensions for mode constant
438
439

    Args:
440
        img (PIL Image or Tensor): Image to be padded.
441
442
443
        padding (int or sequence): Padding on each border. If a single int is provided this
            is used to pad all borders. If sequence of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a sequence of length 4 is provided
444
            this is the padding for the left, top, right and bottom borders respectively.
445
446
447
448

            .. note::
                In torchscript mode padding as single int is not supported, use a sequence of
                length 1: ``[padding, ]``.
449
450
451
452
453
        fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
            Only number is supported for torch Tensor.
            Only int or str or tuple value is supported for PIL Image.
454
455
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
            Default is constant.
456
457
458

            - constant: pads with a constant value, this value is specified with fill

459
460
            - edge: pads with the last value at the edge of the image.
              If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
461

462
463
464
            - reflect: pads with reflection of image without repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
              will result in [3, 2, 1, 2, 3, 4, 3, 2]
465

466
467
468
            - symmetric: pads with reflection of image repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
              will result in [2, 1, 1, 2, 3, 4, 4, 3]
469
470

    Returns:
471
        PIL Image or Tensor: Padded image.
472
    """
473
474
    if not isinstance(img, torch.Tensor):
        return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
475

476
    return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
477
478


vfdev's avatar
vfdev committed
479
480
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
    """Crop the given image at specified location and output size.
481
    If the image is torch Tensor, it is expected
482
483
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then cropped.
484

485
    Args:
vfdev's avatar
vfdev committed
486
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
487
488
489
490
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
491

492
    Returns:
vfdev's avatar
vfdev committed
493
        PIL Image or Tensor: Cropped image.
494
495
    """

vfdev's avatar
vfdev committed
496
497
    if not isinstance(img, torch.Tensor):
        return F_pil.crop(img, top, left, height, width)
498

vfdev's avatar
vfdev committed
499
    return F_t.crop(img, top, left, height, width)
500

vfdev's avatar
vfdev committed
501
502
503

def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
    """Crops the given image at the center.
504
    If the image is torch Tensor, it is expected
505
506
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
507

508
    Args:
vfdev's avatar
vfdev committed
509
        img (PIL Image or Tensor): Image to be cropped.
510
        output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
vfdev's avatar
vfdev committed
511
512
            it is used for both directions.

513
    Returns:
vfdev's avatar
vfdev committed
514
        PIL Image or Tensor: Cropped image.
515
    """
516
517
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
vfdev's avatar
vfdev committed
518
519
520
    elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
        output_size = (output_size[0], output_size[0])

521
    image_width, image_height = get_image_size(img)
522
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
523

524
525
526
527
528
529
530
531
    if crop_width > image_width or crop_height > image_height:
        padding_ltrb = [
            (crop_width - image_width) // 2 if crop_width > image_width else 0,
            (crop_height - image_height) // 2 if crop_height > image_height else 0,
            (crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
            (crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
        ]
        img = pad(img, padding_ltrb, fill=0)  # PIL uses fill value 0
532
        image_width, image_height = get_image_size(img)
533
534
535
        if crop_width == image_width and crop_height == image_height:
            return img

536
537
    crop_top = int(round((image_height - crop_height) / 2.0))
    crop_left = int(round((image_width - crop_width) / 2.0))
538
    return crop(img, crop_top, crop_left, crop_height, crop_width)
539
540


541
def resized_crop(
542
543
544
545
546
547
548
    img: Tensor,
    top: int,
    left: int,
    height: int,
    width: int,
    size: List[int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
549
550
) -> Tensor:
    """Crop the given image and resize it to desired size.
551
    If the image is torch Tensor, it is expected
552
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
553

554
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
555
556

    Args:
557
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
558
559
560
561
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
562
        size (sequence or int): Desired output size. Same semantics as ``resize``.
563
564
565
566
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
567
568
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.

569
    Returns:
570
        PIL Image or Tensor: Cropped image.
571
    """
572
    img = crop(img, top, left, height, width)
573
574
575
576
    img = resize(img, size, interpolation)
    return img


577
def hflip(img: Tensor) -> Tensor:
578
    """Horizontally flip the given image.
579
580

    Args:
vfdev's avatar
vfdev committed
581
        img (PIL Image or Tensor): Image to be flipped. If img
582
            is a Tensor, it is expected to be in [..., H, W] format,
583
            where ... means it can have an arbitrary number of leading
584
            dimensions.
585
586

    Returns:
vfdev's avatar
vfdev committed
587
        PIL Image or Tensor:  Horizontally flipped image.
588
    """
589
590
    if not isinstance(img, torch.Tensor):
        return F_pil.hflip(img)
591

592
    return F_t.hflip(img)
593
594


595
def _get_perspective_coeffs(startpoints: List[List[int]], endpoints: List[List[int]]) -> List[float]:
596
597
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

Vitaliy Chiley's avatar
Vitaliy Chiley committed
598
    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
599
600
601
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
602
603
604
605
606
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.

607
608
609
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
610
611
612
613
614
    a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float)

    for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
        a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
615

616
    b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8)
617
    res = torch.linalg.lstsq(a_matrix, b_matrix, driver="gels").solution
618

619
    output: List[float] = res.tolist()
620
    return output
621
622


623
def perspective(
624
625
626
627
628
    img: Tensor,
    startpoints: List[List[int]],
    endpoints: List[List[int]],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    fill: Optional[List[float]] = None,
629
630
) -> Tensor:
    """Perform perspective transform of the given image.
631
    If the image is torch Tensor, it is expected
632
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
633
634

    Args:
635
636
637
638
639
        img (PIL Image or Tensor): Image to be transformed.
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
640
641
642
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
643
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
644
645
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
646
647
648
649

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
650

651
    Returns:
652
        PIL Image or Tensor: transformed Image.
653
    """
654

655
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
656

657
658
659
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
660
661
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
662
663
664
        )
        interpolation = _interpolation_modes_from_int(interpolation)

665
666
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
667

668
    if not isinstance(img, torch.Tensor):
669
670
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill)
671

672
    return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill)
673
674


675
def vflip(img: Tensor) -> Tensor:
676
    """Vertically flip the given image.
677
678

    Args:
vfdev's avatar
vfdev committed
679
        img (PIL Image or Tensor): Image to be flipped. If img
680
            is a Tensor, it is expected to be in [..., H, W] format,
681
            where ... means it can have an arbitrary number of leading
682
            dimensions.
683
684

    Returns:
685
        PIL Image or Tensor:  Vertically flipped image.
686
    """
687
688
    if not isinstance(img, torch.Tensor):
        return F_pil.vflip(img)
689

690
    return F_t.vflip(img)
691
692


vfdev's avatar
vfdev committed
693
694
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
    """Crop the given image into four corners and the central crop.
695
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
696
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
697
698
699
700
701
702

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
703
704
705
        img (PIL Image or Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
706
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
707

708
    Returns:
709
       tuple: tuple (tl, tr, bl, br, center)
710
       Corresponding top left, top right, bottom left, bottom right and center crop.
711
712
713
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
714
715
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])
716

vfdev's avatar
vfdev committed
717
718
719
    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")

720
    image_width, image_height = get_image_size(img)
721
722
723
724
725
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

vfdev's avatar
vfdev committed
726
727
728
729
730
731
732
733
    tl = crop(img, 0, 0, crop_height, crop_width)
    tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
    bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
    br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)

    center = center_crop(img, [crop_height, crop_width])

    return tl, tr, bl, br, center
734
735


vfdev's avatar
vfdev committed
736
737
738
def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]:
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
739
    flipped version of these (horizontal flipping is used by default).
740
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
741
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
742
743
744
745
746

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

747
    Args:
vfdev's avatar
vfdev committed
748
        img (PIL Image or Tensor): Image to be cropped.
749
        size (sequence or int): Desired output size of the crop. If size is an
750
            int instead of sequence like (h, w), a square crop (size, size) is
751
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
752
        vertical_flip (bool): Use vertical flipping instead of horizontal
753
754

    Returns:
755
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
756
757
        Corresponding top left, top right, bottom left, bottom right and
        center crop and same for the flipped image.
758
759
760
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
761
762
763
764
765
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])

    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")
766
767
768
769
770
771
772
773
774
775
776
777

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


778
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
779
    """Adjust brightness of an image.
780
781

    Args:
vfdev's avatar
vfdev committed
782
        img (PIL Image or Tensor): Image to be adjusted.
783
784
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
785
786
787
788
789
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
790
        PIL Image or Tensor: Brightness adjusted image.
791
    """
792
793
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_brightness(img, brightness_factor)
794

795
    return F_t.adjust_brightness(img, brightness_factor)
796
797


798
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
799
    """Adjust contrast of an image.
800
801

    Args:
vfdev's avatar
vfdev committed
802
        img (PIL Image or Tensor): Image to be adjusted.
803
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
804
            where ... means it can have an arbitrary number of leading dimensions.
805
806
807
808
809
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
810
        PIL Image or Tensor: Contrast adjusted image.
811
    """
812
813
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_contrast(img, contrast_factor)
814

815
    return F_t.adjust_contrast(img, contrast_factor)
816
817


818
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
819
820
821
    """Adjust color saturation of an image.

    Args:
vfdev's avatar
vfdev committed
822
        img (PIL Image or Tensor): Image to be adjusted.
823
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
824
            where ... means it can have an arbitrary number of leading dimensions.
825
826
827
828
829
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
830
        PIL Image or Tensor: Saturation adjusted image.
831
    """
832
833
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_saturation(img, saturation_factor)
834

835
    return F_t.adjust_saturation(img, saturation_factor)
836
837


838
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
839
840
841
842
843
844
845
846
847
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

848
849
850
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
851
852

    Args:
853
        img (PIL Image or Tensor): Image to be adjusted.
854
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
855
            where ... means it can have an arbitrary number of leading dimensions.
856
            If img is PIL Image mode "1", "I", "F" and modes with transparency (alpha channel) are not supported.
857
858
859
860
861
862
863
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
864
        PIL Image or Tensor: Hue adjusted image.
865
    """
866
867
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_hue(img, hue_factor)
868

869
    return F_t.adjust_hue(img, hue_factor)
870
871


872
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
873
    r"""Perform gamma correction on an image.
874
875
876
877

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

878
879
880
881
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
882

883
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
884
885

    Args:
886
        img (PIL Image or Tensor): PIL Image to be adjusted.
887
888
889
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, modes with transparency (alpha channel) are not supported.
890
891
892
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
893
        gain (float): The constant multiplier.
894
895
    Returns:
        PIL Image or Tensor: Gamma correction adjusted image.
896
    """
897
898
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_gamma(img, gamma, gain)
899

900
    return F_t.adjust_gamma(img, gamma, gain)
901
902


vfdev's avatar
vfdev committed
903
def _get_inverse_affine_matrix(
904
    center: List[float], angle: float, translate: List[float], scale: float, shear: List[float]
vfdev's avatar
vfdev committed
905
) -> List[float]:
906
907
908
909
910
911
912
    # Helper method to compute inverse matrix for affine transformation

    # As it is explained in PIL.Image.rotate
    # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
    #       RSS is rotation with scale and shear matrix
913
914
915
916
917
918
919
920
921
922
    #       RSS(a, s, (sx, sy)) =
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ]
    #         [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ]
    #         [ 0                    , 0                                      , 1 ]
    #
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
923
924
    # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1

925
926
927
928
929
930
931
    rot = math.radians(angle)
    sx, sy = [math.radians(s) for s in shear]

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
vfdev's avatar
vfdev committed
932
933
934
935
    a = math.cos(rot - sy) / math.cos(sy)
    b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
    c = math.sin(rot - sy) / math.cos(sy)
    d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
936
937

    # Inverted rotation matrix with scale and shear
938
    # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
vfdev's avatar
vfdev committed
939
940
    matrix = [d, -b, 0.0, -c, a, 0.0]
    matrix = [x / scale for x in matrix]
941
942

    # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
vfdev's avatar
vfdev committed
943
944
    matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
    matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
945
946

    # Apply center translation: C * RSS^-1 * C^-1 * T^-1
vfdev's avatar
vfdev committed
947
948
    matrix[2] += cx
    matrix[5] += cy
949

vfdev's avatar
vfdev committed
950
    return matrix
951

vfdev's avatar
vfdev committed
952

vfdev's avatar
vfdev committed
953
def rotate(
954
955
956
957
958
959
960
    img: Tensor,
    angle: float,
    interpolation: InterpolationMode = InterpolationMode.NEAREST,
    expand: bool = False,
    center: Optional[List[int]] = None,
    fill: Optional[List[float]] = None,
    resample: Optional[int] = None,
vfdev's avatar
vfdev committed
961
962
) -> Tensor:
    """Rotate the image by angle.
963
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
964
965
966
967
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.

    Args:
        img (PIL Image or Tensor): image to be rotated.
968
        angle (number): rotation angle value in degrees, counter-clockwise.
969
970
971
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
972
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
vfdev's avatar
vfdev committed
973
974
975
976
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
977
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
vfdev's avatar
vfdev committed
978
            Default is the center of the image.
979
980
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
981
982
983
984

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
vfdev's avatar
vfdev committed
985
986
987
988
989
990
991

    Returns:
        PIL Image or Tensor: Rotated image.

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

    """
992
993
994
995
996
997
998
999
1000
    if resample is not None:
        warnings.warn(
            "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
        )
        interpolation = _interpolation_modes_from_int(resample)

    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
1001
1002
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
1003
1004
1005
        )
        interpolation = _interpolation_modes_from_int(interpolation)

vfdev's avatar
vfdev committed
1006
1007
1008
1009
1010
1011
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

1012
1013
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
1014

vfdev's avatar
vfdev committed
1015
    if not isinstance(img, torch.Tensor):
1016
1017
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill)
vfdev's avatar
vfdev committed
1018
1019
1020

    center_f = [0.0, 0.0]
    if center is not None:
1021
        img_size = get_image_size(img)
1022
1023
1024
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, img_size)]

vfdev's avatar
vfdev committed
1025
1026
1027
    # due to current incoherence of rotation angle direction between affine and rotate implementations
    # we need to set -angle.
    matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
1028
    return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill)
vfdev's avatar
vfdev committed
1029
1030


vfdev's avatar
vfdev committed
1031
def affine(
1032
1033
1034
1035
1036
1037
1038
1039
1040
    img: Tensor,
    angle: float,
    translate: List[int],
    scale: float,
    shear: List[float],
    interpolation: InterpolationMode = InterpolationMode.NEAREST,
    fill: Optional[List[float]] = None,
    resample: Optional[int] = None,
    fillcolor: Optional[List[float]] = None,
vfdev's avatar
vfdev committed
1041
1042
) -> Tensor:
    """Apply affine transformation on the image keeping image center invariant.
1043
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1044
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1045
1046

    Args:
vfdev's avatar
vfdev committed
1047
        img (PIL Image or Tensor): image to transform.
1048
1049
        angle (number): rotation angle in degrees between -180 and 180, clockwise direction.
        translate (sequence of integers): horizontal and vertical translations (post-rotation translation)
1050
        scale (float): overall scale
1051
1052
        shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction.
            If a sequence is specified, the first value corresponds to a shear parallel to the x axis, while
vfdev's avatar
vfdev committed
1053
            the second value corresponds to a shear parallel to the y axis.
1054
1055
1056
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1057
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
1058
1059
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
1060
1061
1062
1063

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
1064
        fillcolor (sequence, int, float): deprecated argument and will be removed since v0.10.0.
1065
            Please use the ``fill`` parameter instead.
1066
        resample (int, optional): deprecated argument and will be removed since v0.10.0.
1067
            Please use the ``interpolation`` parameter instead.
vfdev's avatar
vfdev committed
1068
1069
1070

    Returns:
        PIL Image or Tensor: Transformed image.
1071
    """
1072
1073
1074
1075
1076
1077
1078
1079
1080
    if resample is not None:
        warnings.warn(
            "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
        )
        interpolation = _interpolation_modes_from_int(resample)

    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
1081
1082
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
1083
1084
1085
1086
        )
        interpolation = _interpolation_modes_from_int(interpolation)

    if fillcolor is not None:
1087
        warnings.warn("Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead")
1088
1089
        fill = fillcolor

vfdev's avatar
vfdev committed
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if not isinstance(translate, (list, tuple)):
        raise TypeError("Argument translate should be a sequence")

    if len(translate) != 2:
        raise ValueError("Argument translate should be a sequence of length 2")

    if scale <= 0.0:
        raise ValueError("Argument scale should be positive")

    if not isinstance(shear, (numbers.Number, (list, tuple))):
        raise TypeError("Shear should be either a single value or a sequence of two values")

1105
1106
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
1107

vfdev's avatar
vfdev committed
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
    if isinstance(angle, int):
        angle = float(angle)

    if isinstance(translate, tuple):
        translate = list(translate)

    if isinstance(shear, numbers.Number):
        shear = [shear, 0.0]

    if isinstance(shear, tuple):
        shear = list(shear)

    if len(shear) == 1:
        shear = [shear[0], shear[0]]

    if len(shear) != 2:
        raise ValueError("Shear should be a sequence containing two values. Got {}".format(shear))

1126
    img_size = get_image_size(img)
vfdev's avatar
vfdev committed
1127
1128
1129
1130
1131
1132
    if not isinstance(img, torch.Tensor):
        # center = (img_size[0] * 0.5 + 0.5, img_size[1] * 0.5 + 0.5)
        # it is visually better to estimate the center without 0.5 offset
        # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
        center = [img_size[0] * 0.5, img_size[1] * 0.5]
        matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
1133
1134
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill)
1135

1136
1137
    translate_f = [1.0 * t for t in translate]
    matrix = _get_inverse_affine_matrix([0.0, 0.0], angle, translate_f, scale, shear)
1138
    return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill)
1139
1140


1141
@torch.jit.unused
1142
def to_grayscale(img, num_output_channels=1):
1143
    """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
1144
    This transform does not support torch Tensor.
1145
1146

    Args:
1147
        img (PIL Image): PIL Image to be converted to grayscale.
1148
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1.
1149
1150

    Returns:
1151
1152
        PIL Image: Grayscale version of the image.

1153
1154
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1155
    """
1156
1157
    if isinstance(img, Image.Image):
        return F_pil.to_grayscale(img, num_output_channels)
1158

1159
1160
1161
1162
1163
    raise TypeError("Input should be PIL Image")


def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    """Convert RGB image to grayscale version of image.
1164
1165
    If the image is torch Tensor, it is expected
    to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177

    Note:
        Please, note that this method supports only RGB images as input. For inputs in other color spaces,
        please, consider using meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.

    Args:
        img (PIL Image or Tensor): RGB Image to be converted to grayscale.
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.

    Returns:
        PIL Image or Tensor: Grayscale version of the image.

1178
1179
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1180
1181
1182
1183
1184
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.to_grayscale(img, num_output_channels)

    return F_t.rgb_to_grayscale(img, num_output_channels)
1185
1186


1187
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
1188
    """Erase the input Tensor Image with given value.
1189
    This transform does not support PIL Image.
1190
1191
1192
1193
1194
1195
1196
1197

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
Zhun Zhong's avatar
Zhun Zhong committed
1198
        inplace(bool, optional): For in-place operations. By default is set False.
1199
1200
1201
1202
1203

    Returns:
        Tensor Image: Erased image.
    """
    if not isinstance(img, torch.Tensor):
1204
        raise TypeError("img should be Tensor Image. Got {}".format(type(img)))
1205

1206
1207
1208
    if not inplace:
        img = img.clone()

1209
    img[..., i : i + h, j : j + w] = v
1210
    return img
1211
1212
1213


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
1214
1215
1216
    """Performs Gaussian blurring on the image by given kernel.
    If the image is torch Tensor, it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1217
1218
1219
1220
1221

    Args:
        img (PIL Image or Tensor): Image to be blurred
        kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
            like ``(kx, ky)`` or a single integer for square kernels.
1222
1223
1224
1225

            .. note::
                In torchscript mode kernel_size as single int is not supported, use a sequence of
                length 1: ``[ksize, ]``.
1226
1227
1228
1229
        sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
            sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
            same sigma in both X/Y directions. If None, then it is computed using
            ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
1230
1231
1232
1233
1234
            Default, None.

            .. note::
                In torchscript mode sigma as single float is
                not supported, use a sequence of length 1: ``[sigma, ]``.
1235
1236
1237
1238
1239

    Returns:
        PIL Image or Tensor: Gaussian Blurred version of the image.
    """
    if not isinstance(kernel_size, (int, list, tuple)):
1240
        raise TypeError("kernel_size should be int or a sequence of integers. Got {}".format(type(kernel_size)))
1241
1242
1243
    if isinstance(kernel_size, int):
        kernel_size = [kernel_size, kernel_size]
    if len(kernel_size) != 2:
1244
        raise ValueError("If kernel_size is a sequence its length should be 2. Got {}".format(len(kernel_size)))
1245
1246
    for ksize in kernel_size:
        if ksize % 2 == 0 or ksize < 0:
1247
            raise ValueError("kernel_size should have odd and positive integers. Got {}".format(kernel_size))
1248
1249
1250
1251
1252

    if sigma is None:
        sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]

    if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
1253
        raise TypeError("sigma should be either float or sequence of floats. Got {}".format(type(sigma)))
1254
1255
1256
1257
1258
    if isinstance(sigma, (int, float)):
        sigma = [float(sigma), float(sigma)]
    if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
        sigma = [sigma[0], sigma[0]]
    if len(sigma) != 2:
1259
        raise ValueError("If sigma is a sequence, its length should be 2. Got {}".format(len(sigma)))
1260
    for s in sigma:
1261
1262
        if s <= 0.0:
            raise ValueError("sigma should have positive values. Got {}".format(sigma))
1263
1264
1265
1266

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
1267
            raise TypeError("img should be PIL Image or Tensor. Got {}".format(type(img)))
1268
1269
1270
1271
1272
1273
1274
1275

        t_img = to_tensor(img)

    output = F_t.gaussian_blur(t_img, kernel_size, sigma)

    if not isinstance(img, torch.Tensor):
        output = to_pil_image(output)
    return output
1276
1277
1278


def invert(img: Tensor) -> Tensor:
1279
    """Invert the colors of an RGB/grayscale image.
1280
1281
1282

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1283
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1284
1285
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296

    Returns:
        PIL Image or Tensor: Color inverted image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.invert(img)

    return F_t.invert(img)


def posterize(img: Tensor, bits: int) -> Tensor:
1297
    """Posterize an image by reducing the number of bits for each color channel.
1298
1299
1300

    Args:
        img (PIL Image or Tensor): Image to have its colors posterized.
1301
            If img is torch Tensor, it should be of type torch.uint8 and
1302
1303
1304
            it is expected to be in [..., 1 or 3, H, W] format, where ... means
            it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1305
1306
1307
1308
1309
        bits (int): The number of bits to keep for each channel (0-8).
    Returns:
        PIL Image or Tensor: Posterized image.
    """
    if not (0 <= bits <= 8):
1310
        raise ValueError("The number if bits should be between 0 and 8. Got {}".format(bits))
1311
1312
1313
1314
1315
1316
1317
1318

    if not isinstance(img, torch.Tensor):
        return F_pil.posterize(img, bits)

    return F_t.posterize(img, bits)


def solarize(img: Tensor, threshold: float) -> Tensor:
1319
    """Solarize an RGB/grayscale image by inverting all pixel values above a threshold.
1320
1321
1322

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1323
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1324
1325
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
        threshold (float): All pixels equal or above this value are inverted.
    Returns:
        PIL Image or Tensor: Solarized image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.solarize(img, threshold)

    return F_t.solarize(img, threshold)


def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
1337
    """Adjust the sharpness of an image.
1338
1339
1340

    Args:
        img (PIL Image or Tensor): Image to be adjusted.
1341
1342
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
        sharpness_factor (float):  How much to adjust the sharpness. Can be
            any non negative number. 0 gives a blurred image, 1 gives the
            original image while 2 increases the sharpness by a factor of 2.

    Returns:
        PIL Image or Tensor: Sharpness adjusted image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_sharpness(img, sharpness_factor)

    return F_t.adjust_sharpness(img, sharpness_factor)


def autocontrast(img: Tensor) -> Tensor:
1357
    """Maximize contrast of an image by remapping its
1358
1359
1360
1361
1362
    pixels per channel so that the lowest becomes black and the lightest
    becomes white.

    Args:
        img (PIL Image or Tensor): Image on which autocontrast is applied.
1363
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1364
1365
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376

    Returns:
        PIL Image or Tensor: An image that was autocontrasted.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.autocontrast(img)

    return F_t.autocontrast(img)


def equalize(img: Tensor) -> Tensor:
1377
    """Equalize the histogram of an image by applying
1378
1379
1380
1381
1382
    a non-linear mapping to the input in order to create a uniform
    distribution of grayscale values in the output.

    Args:
        img (PIL Image or Tensor): Image on which equalize is applied.
1383
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1384
            where ... means it can have an arbitrary number of leading dimensions.
1385
            The tensor dtype must be ``torch.uint8`` and values are expected to be in ``[0, 255]``.
1386
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1387
1388
1389
1390
1391
1392
1393
1394

    Returns:
        PIL Image or Tensor: An image that was equalized.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.equalize(img)

    return F_t.equalize(img)