functional.py 61 KB
Newer Older
1
import math
2
3
import numbers
import warnings
4
from enum import Enum
5
from typing import List, Tuple, Any, Optional
6
7
8

import numpy as np
import torch
9
from PIL import Image
10
11
from torch import Tensor

12
13
14
15
16
try:
    import accimage
except ImportError:
    accimage = None

17
from ..utils import _log_api_usage_once
18
19
20
from . import functional_pil as F_pil
from . import functional_tensor as F_t

21

22
class InterpolationMode(Enum):
23
    """Interpolation modes
24
    Available interpolation methods are ``nearest``, ``bilinear``, ``bicubic``, ``box``, ``hamming``, and ``lanczos``.
25
    """
26

27
28
29
30
31
32
33
34
35
36
    NEAREST = "nearest"
    BILINEAR = "bilinear"
    BICUBIC = "bicubic"
    # For PIL compatibility
    BOX = "box"
    HAMMING = "hamming"
    LANCZOS = "lanczos"


# TODO: Once torchscript supports Enums with staticmethod
37
38
# this can be put into InterpolationMode as staticmethod
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
39
    inverse_modes_mapping = {
40
41
42
43
44
45
        0: InterpolationMode.NEAREST,
        2: InterpolationMode.BILINEAR,
        3: InterpolationMode.BICUBIC,
        4: InterpolationMode.BOX,
        5: InterpolationMode.HAMMING,
        1: InterpolationMode.LANCZOS,
46
47
48
49
50
    }
    return inverse_modes_mapping[i]


pil_modes_mapping = {
51
52
53
54
55
56
    InterpolationMode.NEAREST: 0,
    InterpolationMode.BILINEAR: 2,
    InterpolationMode.BICUBIC: 3,
    InterpolationMode.BOX: 4,
    InterpolationMode.HAMMING: 5,
    InterpolationMode.LANCZOS: 1,
57
58
}

vfdev's avatar
vfdev committed
59
60
61
_is_pil_image = F_pil._is_pil_image


62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
def get_dimensions(img: Tensor) -> List[int]:
    """Returns the dimensions of an image as [channels, height, width].

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        List[int]: The image dimensions.
    """
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_dimensions)
    if isinstance(img, torch.Tensor):
        return F_t.get_dimensions(img)

    return F_pil.get_dimensions(img)


79
80
81
82
83
84
85
86
def get_image_size(img: Tensor) -> List[int]:
    """Returns the size of an image as [width, height].

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        List[int]: The image size.
vfdev's avatar
vfdev committed
87
    """
88
89
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_image_size)
vfdev's avatar
vfdev committed
90
    if isinstance(img, torch.Tensor):
91
        return F_t.get_image_size(img)
92

93
    return F_pil.get_image_size(img)
94

vfdev's avatar
vfdev committed
95

96
97
98
99
100
101
102
103
def get_image_num_channels(img: Tensor) -> int:
    """Returns the number of channels of an image.

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        int: The number of channels.
104
    """
105
106
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_image_num_channels)
107
    if isinstance(img, torch.Tensor):
108
        return F_t.get_image_num_channels(img)
109

110
    return F_pil.get_image_num_channels(img)
111
112


vfdev's avatar
vfdev committed
113
114
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
115
116
117
    return isinstance(img, np.ndarray)


vfdev's avatar
vfdev committed
118
119
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
120
    return img.ndim in {2, 3}
121
122
123
124


def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
125
    This function does not support torchscript.
126

127
    See :class:`~torchvision.transforms.ToTensor` for more details.
128
129
130
131
132
133
134

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
135
136
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_tensor)
137
    if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
138
        raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}")
139

140
    if _is_numpy(pic) and not _is_numpy_image(pic):
141
        raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
142

143
144
    default_float_dtype = torch.get_default_dtype()

145
146
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
147
148
149
        if pic.ndim == 2:
            pic = pic[:, :, None]

150
        img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
151
        # backward compatibility
152
        if isinstance(img, torch.ByteTensor):
153
            return img.to(dtype=default_float_dtype).div(255)
154
155
        else:
            return img
156
157

    if accimage is not None and isinstance(pic, accimage.Image):
158
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
159
        pic.copyto(nppic)
160
        return torch.from_numpy(nppic).to(dtype=default_float_dtype)
161
162

    # handle PIL Image
163
164
    mode_to_nptype = {"I": np.int32, "I;16": np.int16, "F": np.float32}
    img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True))
165

166
    if pic.mode == "1":
167
        img = 255 * img
168
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
169
    # put it from HWC to CHW format
170
    img = img.permute((2, 0, 1)).contiguous()
171
    if isinstance(img, torch.ByteTensor):
172
        return img.to(dtype=default_float_dtype).div(255)
173
174
175
176
    else:
        return img


177
178
def pil_to_tensor(pic):
    """Convert a ``PIL Image`` to a tensor of the same type.
179
    This function does not support torchscript.
180

vfdev's avatar
vfdev committed
181
    See :class:`~torchvision.transforms.PILToTensor` for more details.
182

183
184
185
186
    .. note::

        A deep copy of the underlying array is performed.

187
188
189
190
191
192
    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
193
194
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(pil_to_tensor)
195
    if not F_pil._is_pil_image(pic):
196
        raise TypeError(f"pic should be PIL Image. Got {type(pic)}")
197
198

    if accimage is not None and isinstance(pic, accimage.Image):
199
200
        # accimage format is always uint8 internally, so always return uint8 here
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
201
202
203
204
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
205
    img = torch.as_tensor(np.array(pic, copy=True))
206
207
208
209
210
211
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


212
213
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly
214
    This function does not support PIL Image.
215
216
217
218
219
220

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
vfdev's avatar
vfdev committed
221
        Tensor: Converted image
222
223
224
225
226
227
228
229
230
231
232
233

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
234
235
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(convert_image_dtype)
236
    if not isinstance(image, torch.Tensor):
237
        raise TypeError("Input img should be Tensor Image")
238
239

    return F_t.convert_image_dtype(image, dtype)
240
241


242
def to_pil_image(pic, mode=None):
243
    """Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
244

245
    See :class:`~torchvision.transforms.ToPILImage` for more details.
246
247
248
249
250

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

251
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
252
253
254
255

    Returns:
        PIL Image: Image converted to PIL Image.
    """
256
257
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_pil_image)
258
    if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
259
        raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.")
260

Varun Agrawal's avatar
Varun Agrawal committed
261
262
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
263
            raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndimension()} dimensions.")
Varun Agrawal's avatar
Varun Agrawal committed
264
265
266

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
267
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
268

269
270
        # check number of channels
        if pic.shape[-3] > 4:
271
            raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-3]} channels.")
272

Varun Agrawal's avatar
Varun Agrawal committed
273
274
    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
275
            raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
Varun Agrawal's avatar
Varun Agrawal committed
276
277
278
279
280

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

281
282
        # check number of channels
        if pic.shape[-1] > 4:
283
            raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-1]} channels.")
284

285
    npimg = pic
Varun Agrawal's avatar
Varun Agrawal committed
286
    if isinstance(pic, torch.Tensor):
287
        if pic.is_floating_point() and mode != "F":
288
289
            pic = pic.mul(255).byte()
        npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
290
291

    if not isinstance(npimg, np.ndarray):
292
        raise TypeError("Input pic must be a torch.Tensor or NumPy ndarray, not {type(npimg)}")
293
294
295
296
297

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
298
            expected_mode = "L"
vfdev's avatar
vfdev committed
299
        elif npimg.dtype == np.int16:
300
            expected_mode = "I;16"
vfdev's avatar
vfdev committed
301
        elif npimg.dtype == np.int32:
302
            expected_mode = "I"
303
        elif npimg.dtype == np.float32:
304
            expected_mode = "F"
305
        if mode is not None and mode != expected_mode:
306
            raise ValueError(f"Incorrect mode ({mode}) supplied for input type {np.dtype}. Should be {expected_mode}")
307
308
        mode = expected_mode

surgan12's avatar
surgan12 committed
309
    elif npimg.shape[2] == 2:
310
        permitted_2_channel_modes = ["LA"]
surgan12's avatar
surgan12 committed
311
        if mode is not None and mode not in permitted_2_channel_modes:
312
            raise ValueError(f"Only modes {permitted_2_channel_modes} are supported for 2D inputs")
surgan12's avatar
surgan12 committed
313
314

        if mode is None and npimg.dtype == np.uint8:
315
            mode = "LA"
surgan12's avatar
surgan12 committed
316

317
    elif npimg.shape[2] == 4:
318
        permitted_4_channel_modes = ["RGBA", "CMYK", "RGBX"]
319
        if mode is not None and mode not in permitted_4_channel_modes:
320
            raise ValueError(f"Only modes {permitted_4_channel_modes} are supported for 4D inputs")
321
322

        if mode is None and npimg.dtype == np.uint8:
323
            mode = "RGBA"
324
    else:
325
        permitted_3_channel_modes = ["RGB", "YCbCr", "HSV"]
326
        if mode is not None and mode not in permitted_3_channel_modes:
327
            raise ValueError(f"Only modes {permitted_3_channel_modes} are supported for 3D inputs")
328
        if mode is None and npimg.dtype == np.uint8:
329
            mode = "RGB"
330
331

    if mode is None:
332
        raise TypeError(f"Input type {npimg.dtype} is not supported")
333
334
335
336

    return Image.fromarray(npimg, mode=mode)


337
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
338
    """Normalize a float tensor image with mean and standard deviation.
339
    This transform does not support PIL Image.
340

341
    .. note::
surgan12's avatar
surgan12 committed
342
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
343

344
    See :class:`~torchvision.transforms.Normalize` for more details.
345
346

    Args:
347
        tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
348
        mean (sequence): Sequence of means for each channel.
349
        std (sequence): Sequence of standard deviations for each channel.
350
        inplace(bool,optional): Bool to make this operation inplace.
351
352
353
354

    Returns:
        Tensor: Normalized Tensor image.
    """
355
356
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(normalize)
357
    if not isinstance(tensor, torch.Tensor):
358
        raise TypeError(f"img should be Tensor Image. Got {type(tensor)}")
359

360
    return F_t.normalize(tensor, mean=mean, std=std, inplace=inplace)
361
362


363
364
365
366
367
368
369
def resize(
    img: Tensor,
    size: List[int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    max_size: Optional[int] = None,
    antialias: Optional[bool] = None,
) -> Tensor:
vfdev's avatar
vfdev committed
370
    r"""Resize the input image to the given size.
371
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
372
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
373

374
375
376
377
    .. warning::
        The output image might be different depending on its type: when downsampling, the interpolation of PIL images
        and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
        in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
378
379
        types. See also below the ``antialias`` parameter, which can help making the output of PIL images and tensors
        closer.
380

381
    Args:
vfdev's avatar
vfdev committed
382
        img (PIL Image or Tensor): Image to be resized.
383
384
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
Vitaliy Chiley's avatar
Vitaliy Chiley committed
385
            the smaller edge of the image will be matched to this number maintaining
386
            the aspect ratio. i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
387
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
388
389
390

            .. note::
                In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
391
392
393
394
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
395
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
396
397
398
399
        max_size (int, optional): The maximum allowed for the longer edge of
            the resized image: if the longer edge of the image is greater
            than ``max_size`` after being resized according to ``size``, then
            the image is resized again so that the longer edge is equal to
400
            ``max_size``. As a result, ``size`` might be overruled, i.e the
401
402
403
            smaller edge may be shorter than ``size``. This is only supported
            if ``size`` is an int (or a sequence of length 1 in torchscript
            mode).
404
        antialias (bool, optional): antialias flag. If ``img`` is PIL Image, the flag is ignored and anti-alias
405
            is always used. If ``img`` is Tensor, the flag is False by default and can be set to True for
406
407
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` modes.
            This can help making the output for PIL images and tensors closer.
408
409

    Returns:
vfdev's avatar
vfdev committed
410
        PIL Image or Tensor: Resized image.
411
    """
412
413
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(resize)
414
415
416
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
417
418
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
419
420
421
        )
        interpolation = _interpolation_modes_from_int(interpolation)

422
423
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
424

vfdev's avatar
vfdev committed
425
    if not isinstance(img, torch.Tensor):
426
        if antialias is not None and not antialias:
427
            warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.")
428
        pil_interpolation = pil_modes_mapping[interpolation]
429
        return F_pil.resize(img, size=size, interpolation=pil_interpolation, max_size=max_size)
vfdev's avatar
vfdev committed
430

431
    return F_t.resize(img, size=size, interpolation=interpolation.value, max_size=max_size, antialias=antialias)
432
433


434
435
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
    r"""Pad the given image on all sides with the given "pad" value.
436
    If the image is torch Tensor, it is expected
437
438
439
    to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
    at most 3 leading dimensions for mode edge,
    and an arbitrary number of leading dimensions for mode constant
440
441

    Args:
442
        img (PIL Image or Tensor): Image to be padded.
443
444
445
        padding (int or sequence): Padding on each border. If a single int is provided this
            is used to pad all borders. If sequence of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a sequence of length 4 is provided
446
            this is the padding for the left, top, right and bottom borders respectively.
447
448
449
450

            .. note::
                In torchscript mode padding as single int is not supported, use a sequence of
                length 1: ``[padding, ]``.
451
452
453
454
455
        fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
            Only number is supported for torch Tensor.
            Only int or str or tuple value is supported for PIL Image.
456
457
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
            Default is constant.
458
459
460

            - constant: pads with a constant value, this value is specified with fill

461
462
            - edge: pads with the last value at the edge of the image.
              If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
463

464
465
466
            - reflect: pads with reflection of image without repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
              will result in [3, 2, 1, 2, 3, 4, 3, 2]
467

468
469
470
            - symmetric: pads with reflection of image repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
              will result in [2, 1, 1, 2, 3, 4, 4, 3]
471
472

    Returns:
473
        PIL Image or Tensor: Padded image.
474
    """
475
476
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(pad)
477
478
    if not isinstance(img, torch.Tensor):
        return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
479

480
    return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
481
482


vfdev's avatar
vfdev committed
483
484
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
    """Crop the given image at specified location and output size.
485
    If the image is torch Tensor, it is expected
486
487
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then cropped.
488

489
    Args:
vfdev's avatar
vfdev committed
490
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
491
492
493
494
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
495

496
    Returns:
vfdev's avatar
vfdev committed
497
        PIL Image or Tensor: Cropped image.
498
499
    """

500
501
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(crop)
vfdev's avatar
vfdev committed
502
503
    if not isinstance(img, torch.Tensor):
        return F_pil.crop(img, top, left, height, width)
504

vfdev's avatar
vfdev committed
505
    return F_t.crop(img, top, left, height, width)
506

vfdev's avatar
vfdev committed
507
508
509

def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
    """Crops the given image at the center.
510
    If the image is torch Tensor, it is expected
511
512
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
513

514
    Args:
vfdev's avatar
vfdev committed
515
        img (PIL Image or Tensor): Image to be cropped.
516
        output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
vfdev's avatar
vfdev committed
517
518
            it is used for both directions.

519
    Returns:
vfdev's avatar
vfdev committed
520
        PIL Image or Tensor: Cropped image.
521
    """
522
523
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(center_crop)
524
525
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
vfdev's avatar
vfdev committed
526
527
528
    elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
        output_size = (output_size[0], output_size[0])

529
    _, image_height, image_width = get_dimensions(img)
530
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
531

532
533
534
535
536
537
538
539
    if crop_width > image_width or crop_height > image_height:
        padding_ltrb = [
            (crop_width - image_width) // 2 if crop_width > image_width else 0,
            (crop_height - image_height) // 2 if crop_height > image_height else 0,
            (crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
            (crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
        ]
        img = pad(img, padding_ltrb, fill=0)  # PIL uses fill value 0
540
        _, image_height, image_width = get_dimensions(img)
541
542
543
        if crop_width == image_width and crop_height == image_height:
            return img

544
545
    crop_top = int(round((image_height - crop_height) / 2.0))
    crop_left = int(round((image_width - crop_width) / 2.0))
546
    return crop(img, crop_top, crop_left, crop_height, crop_width)
547
548


549
def resized_crop(
550
551
552
553
554
555
556
    img: Tensor,
    top: int,
    left: int,
    height: int,
    width: int,
    size: List[int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
557
558
) -> Tensor:
    """Crop the given image and resize it to desired size.
559
    If the image is torch Tensor, it is expected
560
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
561

562
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
563
564

    Args:
565
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
566
567
568
569
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
570
        size (sequence or int): Desired output size. Same semantics as ``resize``.
571
572
573
574
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
575
576
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.

577
    Returns:
578
        PIL Image or Tensor: Cropped image.
579
    """
580
581
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(resized_crop)
582
    img = crop(img, top, left, height, width)
583
584
585
586
    img = resize(img, size, interpolation)
    return img


587
def hflip(img: Tensor) -> Tensor:
588
    """Horizontally flip the given image.
589
590

    Args:
vfdev's avatar
vfdev committed
591
        img (PIL Image or Tensor): Image to be flipped. If img
592
            is a Tensor, it is expected to be in [..., H, W] format,
593
            where ... means it can have an arbitrary number of leading
594
            dimensions.
595
596

    Returns:
vfdev's avatar
vfdev committed
597
        PIL Image or Tensor:  Horizontally flipped image.
598
    """
599
600
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(hflip)
601
602
    if not isinstance(img, torch.Tensor):
        return F_pil.hflip(img)
603

604
    return F_t.hflip(img)
605
606


607
def _get_perspective_coeffs(startpoints: List[List[int]], endpoints: List[List[int]]) -> List[float]:
608
609
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

Vitaliy Chiley's avatar
Vitaliy Chiley committed
610
    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
611
612
613
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
614
615
616
617
618
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.

619
620
621
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
622
623
624
625
626
    a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float)

    for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
        a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
627

628
    b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8)
629
    res = torch.linalg.lstsq(a_matrix, b_matrix, driver="gels").solution
630

631
    output: List[float] = res.tolist()
632
    return output
633
634


635
def perspective(
636
637
638
639
640
    img: Tensor,
    startpoints: List[List[int]],
    endpoints: List[List[int]],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    fill: Optional[List[float]] = None,
641
642
) -> Tensor:
    """Perform perspective transform of the given image.
643
    If the image is torch Tensor, it is expected
644
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
645
646

    Args:
647
648
649
650
651
        img (PIL Image or Tensor): Image to be transformed.
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
652
653
654
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
655
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
656
657
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
658
659
660
661

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
662

663
    Returns:
664
        PIL Image or Tensor: transformed Image.
665
    """
666
667
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(perspective)
668

669
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
670

671
672
673
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
674
675
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
676
677
678
        )
        interpolation = _interpolation_modes_from_int(interpolation)

679
680
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
681

682
    if not isinstance(img, torch.Tensor):
683
684
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill)
685

686
    return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill)
687
688


689
def vflip(img: Tensor) -> Tensor:
690
    """Vertically flip the given image.
691
692

    Args:
vfdev's avatar
vfdev committed
693
        img (PIL Image or Tensor): Image to be flipped. If img
694
            is a Tensor, it is expected to be in [..., H, W] format,
695
            where ... means it can have an arbitrary number of leading
696
            dimensions.
697
698

    Returns:
699
        PIL Image or Tensor:  Vertically flipped image.
700
    """
701
702
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(vflip)
703
704
    if not isinstance(img, torch.Tensor):
        return F_pil.vflip(img)
705

706
    return F_t.vflip(img)
707
708


vfdev's avatar
vfdev committed
709
710
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
    """Crop the given image into four corners and the central crop.
711
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
712
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
713
714
715
716
717
718

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
719
720
721
        img (PIL Image or Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
722
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
723

724
    Returns:
725
       tuple: tuple (tl, tr, bl, br, center)
726
       Corresponding top left, top right, bottom left, bottom right and center crop.
727
    """
728
729
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(five_crop)
730
731
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
732
733
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])
734

vfdev's avatar
vfdev committed
735
736
737
    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")

738
    _, image_height, image_width = get_dimensions(img)
739
740
741
742
743
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

vfdev's avatar
vfdev committed
744
745
746
747
748
749
750
751
    tl = crop(img, 0, 0, crop_height, crop_width)
    tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
    bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
    br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)

    center = center_crop(img, [crop_height, crop_width])

    return tl, tr, bl, br, center
752
753


vfdev's avatar
vfdev committed
754
755
756
def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]:
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
757
    flipped version of these (horizontal flipping is used by default).
758
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
759
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
760
761
762
763
764

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

765
    Args:
vfdev's avatar
vfdev committed
766
        img (PIL Image or Tensor): Image to be cropped.
767
        size (sequence or int): Desired output size of the crop. If size is an
768
            int instead of sequence like (h, w), a square crop (size, size) is
769
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
770
        vertical_flip (bool): Use vertical flipping instead of horizontal
771
772

    Returns:
773
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
774
775
        Corresponding top left, top right, bottom left, bottom right and
        center crop and same for the flipped image.
776
    """
777
778
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(ten_crop)
779
780
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
781
782
783
784
785
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])

    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")
786
787
788
789
790
791
792
793
794
795
796
797

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


798
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
799
    """Adjust brightness of an image.
800
801

    Args:
vfdev's avatar
vfdev committed
802
        img (PIL Image or Tensor): Image to be adjusted.
803
804
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
805
806
807
808
809
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
810
        PIL Image or Tensor: Brightness adjusted image.
811
    """
812
813
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_brightness)
814
815
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_brightness(img, brightness_factor)
816

817
    return F_t.adjust_brightness(img, brightness_factor)
818
819


820
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
821
    """Adjust contrast of an image.
822
823

    Args:
vfdev's avatar
vfdev committed
824
        img (PIL Image or Tensor): Image to be adjusted.
825
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
826
            where ... means it can have an arbitrary number of leading dimensions.
827
828
829
830
831
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
832
        PIL Image or Tensor: Contrast adjusted image.
833
    """
834
835
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_contrast)
836
837
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_contrast(img, contrast_factor)
838

839
    return F_t.adjust_contrast(img, contrast_factor)
840
841


842
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
843
844
845
    """Adjust color saturation of an image.

    Args:
vfdev's avatar
vfdev committed
846
        img (PIL Image or Tensor): Image to be adjusted.
847
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
848
            where ... means it can have an arbitrary number of leading dimensions.
849
850
851
852
853
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
854
        PIL Image or Tensor: Saturation adjusted image.
855
    """
856
857
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_saturation)
858
859
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_saturation(img, saturation_factor)
860

861
    return F_t.adjust_saturation(img, saturation_factor)
862
863


864
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
865
866
867
868
869
870
871
872
873
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

874
875
876
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
877
878

    Args:
879
        img (PIL Image or Tensor): Image to be adjusted.
880
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
881
            where ... means it can have an arbitrary number of leading dimensions.
882
            If img is PIL Image mode "1", "I", "F" and modes with transparency (alpha channel) are not supported.
883
884
885
886
887
888
889
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
890
        PIL Image or Tensor: Hue adjusted image.
891
    """
892
893
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_hue)
894
895
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_hue(img, hue_factor)
896

897
    return F_t.adjust_hue(img, hue_factor)
898
899


900
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
901
    r"""Perform gamma correction on an image.
902
903
904
905

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

906
907
908
909
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
910

911
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
912
913

    Args:
914
        img (PIL Image or Tensor): PIL Image to be adjusted.
915
916
917
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, modes with transparency (alpha channel) are not supported.
918
919
920
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
921
        gain (float): The constant multiplier.
922
923
    Returns:
        PIL Image or Tensor: Gamma correction adjusted image.
924
    """
925
926
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_gamma)
927
928
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_gamma(img, gamma, gain)
929

930
    return F_t.adjust_gamma(img, gamma, gain)
931
932


vfdev's avatar
vfdev committed
933
def _get_inverse_affine_matrix(
934
    center: List[float], angle: float, translate: List[float], scale: float, shear: List[float], inverted: bool = True
vfdev's avatar
vfdev committed
935
) -> List[float]:
936
937
    # Helper method to compute inverse matrix for affine transformation

938
939
940
    # Pillow requires inverse affine transformation matrix:
    # Affine matrix is : M = T * C * RotateScaleShear * C^-1
    #
941
942
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
943
944
945
    #       RotateScaleShear is rotation with scale and shear matrix
    #
    #       RotateScaleShear(a, s, (sx, sy)) =
946
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
947
948
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(sx)/cos(sy) - sin(a)), 0 ]
    #         [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(sx)/cos(sy) + cos(a)), 0 ]
949
950
951
952
953
    #         [ 0                    , 0                                      , 1 ]
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
954
    # Thus, the inverse is M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1
955

956
    rot = math.radians(angle)
957
958
    sx = math.radians(shear[0])
    sy = math.radians(shear[1])
959
960
961
962
963

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
vfdev's avatar
vfdev committed
964
965
966
967
    a = math.cos(rot - sy) / math.cos(sy)
    b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
    c = math.sin(rot - sy) / math.cos(sy)
    d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
968

969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
    if inverted:
        # Inverted rotation matrix with scale and shear
        # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
        matrix = [d, -b, 0.0, -c, a, 0.0]
        matrix = [x / scale for x in matrix]
        # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
        matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
        matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
        # Apply center translation: C * RSS^-1 * C^-1 * T^-1
        matrix[2] += cx
        matrix[5] += cy
    else:
        matrix = [a, b, 0.0, c, d, 0.0]
        matrix = [x * scale for x in matrix]
        # Apply inverse of center translation: RSS * C^-1
        matrix[2] += matrix[0] * (-cx) + matrix[1] * (-cy)
        matrix[5] += matrix[3] * (-cx) + matrix[4] * (-cy)
        # Apply translation and center : T * C * RSS * C^-1
        matrix[2] += cx + tx
        matrix[5] += cy + ty
989

vfdev's avatar
vfdev committed
990
    return matrix
991

vfdev's avatar
vfdev committed
992

vfdev's avatar
vfdev committed
993
def rotate(
994
995
996
997
998
999
1000
    img: Tensor,
    angle: float,
    interpolation: InterpolationMode = InterpolationMode.NEAREST,
    expand: bool = False,
    center: Optional[List[int]] = None,
    fill: Optional[List[float]] = None,
    resample: Optional[int] = None,
vfdev's avatar
vfdev committed
1001
1002
) -> Tensor:
    """Rotate the image by angle.
1003
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1004
1005
1006
1007
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.

    Args:
        img (PIL Image or Tensor): image to be rotated.
1008
        angle (number): rotation angle value in degrees, counter-clockwise.
1009
1010
1011
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1012
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
vfdev's avatar
vfdev committed
1013
1014
1015
1016
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
1017
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
vfdev's avatar
vfdev committed
1018
            Default is the center of the image.
1019
1020
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
1021
1022
1023
1024

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
1025
1026
1027
1028
        resample (int, optional):
            .. warning::
                This parameter was deprecated in ``0.12`` and will be removed in ``0.14``. Please use ``interpolation``
                instead.
vfdev's avatar
vfdev committed
1029
1030
1031
1032
1033
1034
1035

    Returns:
        PIL Image or Tensor: Rotated image.

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

    """
1036
1037
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(rotate)
1038
1039
    if resample is not None:
        warnings.warn(
1040
1041
            "The parameter 'resample' is deprecated since 0.12 and will be removed 0.14. "
            "Please use 'interpolation' instead."
1042
1043
1044
1045
1046
1047
        )
        interpolation = _interpolation_modes_from_int(resample)

    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
1048
1049
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
1050
1051
1052
        )
        interpolation = _interpolation_modes_from_int(interpolation)

vfdev's avatar
vfdev committed
1053
1054
1055
1056
1057
1058
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

1059
1060
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
1061

vfdev's avatar
vfdev committed
1062
    if not isinstance(img, torch.Tensor):
1063
1064
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill)
vfdev's avatar
vfdev committed
1065
1066
1067

    center_f = [0.0, 0.0]
    if center is not None:
1068
        _, height, width = get_dimensions(img)
1069
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
1070
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
1071

vfdev's avatar
vfdev committed
1072
1073
1074
    # due to current incoherence of rotation angle direction between affine and rotate implementations
    # we need to set -angle.
    matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
1075
    return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill)
vfdev's avatar
vfdev committed
1076
1077


vfdev's avatar
vfdev committed
1078
def affine(
1079
1080
1081
1082
1083
1084
1085
1086
1087
    img: Tensor,
    angle: float,
    translate: List[int],
    scale: float,
    shear: List[float],
    interpolation: InterpolationMode = InterpolationMode.NEAREST,
    fill: Optional[List[float]] = None,
    resample: Optional[int] = None,
    fillcolor: Optional[List[float]] = None,
1088
    center: Optional[List[int]] = None,
vfdev's avatar
vfdev committed
1089
1090
) -> Tensor:
    """Apply affine transformation on the image keeping image center invariant.
1091
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1092
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1093
1094

    Args:
vfdev's avatar
vfdev committed
1095
        img (PIL Image or Tensor): image to transform.
1096
1097
        angle (number): rotation angle in degrees between -180 and 180, clockwise direction.
        translate (sequence of integers): horizontal and vertical translations (post-rotation translation)
1098
        scale (float): overall scale
1099
1100
        shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction.
            If a sequence is specified, the first value corresponds to a shear parallel to the x axis, while
vfdev's avatar
vfdev committed
1101
            the second value corresponds to a shear parallel to the y axis.
1102
1103
1104
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1105
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
1106
1107
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
1108
1109
1110
1111

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
1112
1113
1114
1115
1116
1117
1118
        fillcolor (sequence or number, optional):
            .. warning::
                This parameter was deprecated in ``0.12`` and will be removed in ``0.14``. Please use ``fill`` instead.
        resample (int, optional):
            .. warning::
                This parameter was deprecated in ``0.12`` and will be removed in ``0.14``. Please use ``interpolation``
                instead.
1119
1120
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
            Default is the center of the image.
vfdev's avatar
vfdev committed
1121
1122
1123

    Returns:
        PIL Image or Tensor: Transformed image.
1124
    """
1125
1126
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(affine)
1127
1128
    if resample is not None:
        warnings.warn(
1129
1130
            "The parameter 'resample' is deprecated since 0.12 and will be removed in 0.14. "
            "Please use 'interpolation' instead."
1131
1132
1133
1134
1135
1136
        )
        interpolation = _interpolation_modes_from_int(resample)

    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
1137
1138
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
1139
1140
1141
1142
        )
        interpolation = _interpolation_modes_from_int(interpolation)

    if fillcolor is not None:
1143
1144
1145
1146
        warnings.warn(
            "The parameter 'fillcolor' is deprecated since 0.12 and will be removed in 0.14. "
            "Please use 'fill' instead."
        )
1147
1148
        fill = fillcolor

vfdev's avatar
vfdev committed
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if not isinstance(translate, (list, tuple)):
        raise TypeError("Argument translate should be a sequence")

    if len(translate) != 2:
        raise ValueError("Argument translate should be a sequence of length 2")

    if scale <= 0.0:
        raise ValueError("Argument scale should be positive")

    if not isinstance(shear, (numbers.Number, (list, tuple))):
        raise TypeError("Shear should be either a single value or a sequence of two values")

1164
1165
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
1166

vfdev's avatar
vfdev committed
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
    if isinstance(angle, int):
        angle = float(angle)

    if isinstance(translate, tuple):
        translate = list(translate)

    if isinstance(shear, numbers.Number):
        shear = [shear, 0.0]

    if isinstance(shear, tuple):
        shear = list(shear)

    if len(shear) == 1:
        shear = [shear[0], shear[0]]

    if len(shear) != 2:
1183
        raise ValueError(f"Shear should be a sequence containing two values. Got {shear}")
vfdev's avatar
vfdev committed
1184

1185
1186
1187
    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

1188
    _, height, width = get_dimensions(img)
vfdev's avatar
vfdev committed
1189
    if not isinstance(img, torch.Tensor):
1190
        # center = (width * 0.5 + 0.5, height * 0.5 + 0.5)
vfdev's avatar
vfdev committed
1191
1192
        # it is visually better to estimate the center without 0.5 offset
        # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
1193
        if center is None:
1194
            center = [width * 0.5, height * 0.5]
vfdev's avatar
vfdev committed
1195
        matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
1196
1197
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill)
1198

1199
1200
    center_f = [0.0, 0.0]
    if center is not None:
1201
        _, height, width = get_dimensions(img)
1202
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
1203
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
1204

1205
    translate_f = [1.0 * t for t in translate]
1206
    matrix = _get_inverse_affine_matrix(center_f, angle, translate_f, scale, shear)
1207
    return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill)
1208
1209


1210
@torch.jit.unused
1211
def to_grayscale(img, num_output_channels=1):
1212
    """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
1213
    This transform does not support torch Tensor.
1214
1215

    Args:
1216
        img (PIL Image): PIL Image to be converted to grayscale.
1217
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1.
1218
1219

    Returns:
1220
1221
        PIL Image: Grayscale version of the image.

1222
1223
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1224
    """
1225
1226
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_grayscale)
1227
1228
    if isinstance(img, Image.Image):
        return F_pil.to_grayscale(img, num_output_channels)
1229

1230
1231
1232
1233
1234
    raise TypeError("Input should be PIL Image")


def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    """Convert RGB image to grayscale version of image.
1235
1236
    If the image is torch Tensor, it is expected
    to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248

    Note:
        Please, note that this method supports only RGB images as input. For inputs in other color spaces,
        please, consider using meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.

    Args:
        img (PIL Image or Tensor): RGB Image to be converted to grayscale.
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.

    Returns:
        PIL Image or Tensor: Grayscale version of the image.

1249
1250
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1251
    """
1252
1253
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(rgb_to_grayscale)
1254
1255
1256
1257
    if not isinstance(img, torch.Tensor):
        return F_pil.to_grayscale(img, num_output_channels)

    return F_t.rgb_to_grayscale(img, num_output_channels)
1258
1259


1260
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
1261
    """Erase the input Tensor Image with given value.
1262
    This transform does not support PIL Image.
1263
1264
1265
1266
1267
1268
1269
1270

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
Zhun Zhong's avatar
Zhun Zhong committed
1271
        inplace(bool, optional): For in-place operations. By default is set False.
1272
1273
1274
1275

    Returns:
        Tensor Image: Erased image.
    """
1276
1277
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(erase)
1278
    if not isinstance(img, torch.Tensor):
1279
        raise TypeError(f"img should be Tensor Image. Got {type(img)}")
1280

1281
    return F_t.erase(img, i, j, h, w, v, inplace=inplace)
1282
1283
1284


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
1285
1286
1287
    """Performs Gaussian blurring on the image by given kernel.
    If the image is torch Tensor, it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1288
1289
1290
1291
1292

    Args:
        img (PIL Image or Tensor): Image to be blurred
        kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
            like ``(kx, ky)`` or a single integer for square kernels.
1293
1294
1295
1296

            .. note::
                In torchscript mode kernel_size as single int is not supported, use a sequence of
                length 1: ``[ksize, ]``.
1297
1298
1299
1300
        sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
            sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
            same sigma in both X/Y directions. If None, then it is computed using
            ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
1301
1302
1303
1304
1305
            Default, None.

            .. note::
                In torchscript mode sigma as single float is
                not supported, use a sequence of length 1: ``[sigma, ]``.
1306
1307
1308
1309

    Returns:
        PIL Image or Tensor: Gaussian Blurred version of the image.
    """
1310
1311
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(gaussian_blur)
1312
    if not isinstance(kernel_size, (int, list, tuple)):
1313
        raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}")
1314
1315
1316
    if isinstance(kernel_size, int):
        kernel_size = [kernel_size, kernel_size]
    if len(kernel_size) != 2:
1317
        raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}")
1318
1319
    for ksize in kernel_size:
        if ksize % 2 == 0 or ksize < 0:
1320
            raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}")
1321
1322
1323
1324
1325

    if sigma is None:
        sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]

    if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
1326
        raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}")
1327
1328
1329
1330
1331
    if isinstance(sigma, (int, float)):
        sigma = [float(sigma), float(sigma)]
    if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
        sigma = [sigma[0], sigma[0]]
    if len(sigma) != 2:
1332
        raise ValueError(f"If sigma is a sequence, its length should be 2. Got {len(sigma)}")
1333
    for s in sigma:
1334
        if s <= 0.0:
1335
            raise ValueError(f"sigma should have positive values. Got {sigma}")
1336
1337
1338
1339

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
1340
            raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
1341

1342
        t_img = pil_to_tensor(img)
1343
1344
1345
1346

    output = F_t.gaussian_blur(t_img, kernel_size, sigma)

    if not isinstance(img, torch.Tensor):
1347
        output = to_pil_image(output, mode=img.mode)
1348
    return output
1349
1350
1351


def invert(img: Tensor) -> Tensor:
1352
    """Invert the colors of an RGB/grayscale image.
1353
1354
1355

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1356
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1357
1358
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1359
1360
1361
1362

    Returns:
        PIL Image or Tensor: Color inverted image.
    """
1363
1364
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(invert)
1365
1366
1367
1368
1369
1370
1371
    if not isinstance(img, torch.Tensor):
        return F_pil.invert(img)

    return F_t.invert(img)


def posterize(img: Tensor, bits: int) -> Tensor:
1372
    """Posterize an image by reducing the number of bits for each color channel.
1373
1374
1375

    Args:
        img (PIL Image or Tensor): Image to have its colors posterized.
1376
            If img is torch Tensor, it should be of type torch.uint8 and
1377
1378
1379
            it is expected to be in [..., 1 or 3, H, W] format, where ... means
            it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1380
1381
1382
1383
        bits (int): The number of bits to keep for each channel (0-8).
    Returns:
        PIL Image or Tensor: Posterized image.
    """
1384
1385
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(posterize)
1386
    if not (0 <= bits <= 8):
1387
        raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}")
1388
1389
1390
1391
1392
1393
1394
1395

    if not isinstance(img, torch.Tensor):
        return F_pil.posterize(img, bits)

    return F_t.posterize(img, bits)


def solarize(img: Tensor, threshold: float) -> Tensor:
1396
    """Solarize an RGB/grayscale image by inverting all pixel values above a threshold.
1397
1398
1399

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1400
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1401
1402
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1403
1404
1405
1406
        threshold (float): All pixels equal or above this value are inverted.
    Returns:
        PIL Image or Tensor: Solarized image.
    """
1407
1408
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(solarize)
1409
1410
1411
1412
1413
1414
1415
    if not isinstance(img, torch.Tensor):
        return F_pil.solarize(img, threshold)

    return F_t.solarize(img, threshold)


def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
1416
    """Adjust the sharpness of an image.
1417
1418
1419

    Args:
        img (PIL Image or Tensor): Image to be adjusted.
1420
1421
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
1422
1423
1424
1425
1426
1427
1428
        sharpness_factor (float):  How much to adjust the sharpness. Can be
            any non negative number. 0 gives a blurred image, 1 gives the
            original image while 2 increases the sharpness by a factor of 2.

    Returns:
        PIL Image or Tensor: Sharpness adjusted image.
    """
1429
1430
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_sharpness)
1431
1432
1433
1434
1435
1436
1437
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_sharpness(img, sharpness_factor)

    return F_t.adjust_sharpness(img, sharpness_factor)


def autocontrast(img: Tensor) -> Tensor:
1438
    """Maximize contrast of an image by remapping its
1439
1440
1441
1442
1443
    pixels per channel so that the lowest becomes black and the lightest
    becomes white.

    Args:
        img (PIL Image or Tensor): Image on which autocontrast is applied.
1444
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1445
1446
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1447
1448
1449
1450

    Returns:
        PIL Image or Tensor: An image that was autocontrasted.
    """
1451
1452
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(autocontrast)
1453
1454
1455
1456
1457
1458
1459
    if not isinstance(img, torch.Tensor):
        return F_pil.autocontrast(img)

    return F_t.autocontrast(img)


def equalize(img: Tensor) -> Tensor:
1460
    """Equalize the histogram of an image by applying
1461
1462
1463
1464
1465
    a non-linear mapping to the input in order to create a uniform
    distribution of grayscale values in the output.

    Args:
        img (PIL Image or Tensor): Image on which equalize is applied.
1466
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1467
            where ... means it can have an arbitrary number of leading dimensions.
1468
            The tensor dtype must be ``torch.uint8`` and values are expected to be in ``[0, 255]``.
1469
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1470
1471
1472
1473

    Returns:
        PIL Image or Tensor: An image that was equalized.
    """
1474
1475
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(equalize)
1476
1477
1478
1479
    if not isinstance(img, torch.Tensor):
        return F_pil.equalize(img)

    return F_t.equalize(img)