functional.py 63.7 KB
Newer Older
1
import math
2
3
import numbers
import warnings
4
from enum import Enum
5
from typing import Any, List, Optional, Tuple, Union
6
7
8

import numpy as np
import torch
9
from PIL import Image
10
11
from torch import Tensor

12
13
14
15
16
try:
    import accimage
except ImportError:
    accimage = None

17
from ..utils import _log_api_usage_once
18
from . import functional_pil as F_pil, functional_tensor as F_t
19

20

21
class InterpolationMode(Enum):
22
    """Interpolation modes
23
24
    Available interpolation methods are ``nearest``, ``nearest-exact``, ``bilinear``, ``bicubic``, ``box``, ``hamming``,
    and ``lanczos``.
25
    """
26

27
    NEAREST = "nearest"
28
    NEAREST_EXACT = "nearest-exact"
29
30
31
32
33
34
35
36
37
    BILINEAR = "bilinear"
    BICUBIC = "bicubic"
    # For PIL compatibility
    BOX = "box"
    HAMMING = "hamming"
    LANCZOS = "lanczos"


# TODO: Once torchscript supports Enums with staticmethod
38
39
# this can be put into InterpolationMode as staticmethod
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
40
    inverse_modes_mapping = {
41
42
43
44
45
46
        0: InterpolationMode.NEAREST,
        2: InterpolationMode.BILINEAR,
        3: InterpolationMode.BICUBIC,
        4: InterpolationMode.BOX,
        5: InterpolationMode.HAMMING,
        1: InterpolationMode.LANCZOS,
47
48
49
50
51
    }
    return inverse_modes_mapping[i]


pil_modes_mapping = {
52
53
54
    InterpolationMode.NEAREST: 0,
    InterpolationMode.BILINEAR: 2,
    InterpolationMode.BICUBIC: 3,
55
    InterpolationMode.NEAREST_EXACT: 0,
56
57
58
    InterpolationMode.BOX: 4,
    InterpolationMode.HAMMING: 5,
    InterpolationMode.LANCZOS: 1,
59
60
}

vfdev's avatar
vfdev committed
61
62
63
_is_pil_image = F_pil._is_pil_image


64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def get_dimensions(img: Tensor) -> List[int]:
    """Returns the dimensions of an image as [channels, height, width].

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        List[int]: The image dimensions.
    """
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_dimensions)
    if isinstance(img, torch.Tensor):
        return F_t.get_dimensions(img)

    return F_pil.get_dimensions(img)


81
82
83
84
85
86
87
88
def get_image_size(img: Tensor) -> List[int]:
    """Returns the size of an image as [width, height].

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        List[int]: The image size.
vfdev's avatar
vfdev committed
89
    """
90
91
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_image_size)
vfdev's avatar
vfdev committed
92
    if isinstance(img, torch.Tensor):
93
        return F_t.get_image_size(img)
94

95
    return F_pil.get_image_size(img)
96

vfdev's avatar
vfdev committed
97

98
99
100
101
102
103
104
105
def get_image_num_channels(img: Tensor) -> int:
    """Returns the number of channels of an image.

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        int: The number of channels.
106
    """
107
108
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_image_num_channels)
109
    if isinstance(img, torch.Tensor):
110
        return F_t.get_image_num_channels(img)
111

112
    return F_pil.get_image_num_channels(img)
113
114


vfdev's avatar
vfdev committed
115
116
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
117
118
119
    return isinstance(img, np.ndarray)


vfdev's avatar
vfdev committed
120
121
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
122
    return img.ndim in {2, 3}
123
124


125
def to_tensor(pic) -> Tensor:
126
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
127
    This function does not support torchscript.
128

129
    See :class:`~torchvision.transforms.ToTensor` for more details.
130
131
132
133
134
135
136

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
137
138
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_tensor)
139
    if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
140
        raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}")
141

142
    if _is_numpy(pic) and not _is_numpy_image(pic):
143
        raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
144

145
146
    default_float_dtype = torch.get_default_dtype()

147
148
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
149
150
151
        if pic.ndim == 2:
            pic = pic[:, :, None]

152
        img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
153
        # backward compatibility
154
        if isinstance(img, torch.ByteTensor):
155
            return img.to(dtype=default_float_dtype).div(255)
156
157
        else:
            return img
158
159

    if accimage is not None and isinstance(pic, accimage.Image):
160
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
161
        pic.copyto(nppic)
162
        return torch.from_numpy(nppic).to(dtype=default_float_dtype)
163
164

    # handle PIL Image
165
166
    mode_to_nptype = {"I": np.int32, "I;16": np.int16, "F": np.float32}
    img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True))
167

168
    if pic.mode == "1":
169
        img = 255 * img
170
    img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic))
171
    # put it from HWC to CHW format
172
    img = img.permute((2, 0, 1)).contiguous()
173
    if isinstance(img, torch.ByteTensor):
174
        return img.to(dtype=default_float_dtype).div(255)
175
176
177
178
    else:
        return img


179
def pil_to_tensor(pic: Any) -> Tensor:
180
    """Convert a ``PIL Image`` to a tensor of the same type.
181
    This function does not support torchscript.
182

vfdev's avatar
vfdev committed
183
    See :class:`~torchvision.transforms.PILToTensor` for more details.
184

185
186
187
188
    .. note::

        A deep copy of the underlying array is performed.

189
190
191
192
193
194
    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
195
196
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(pil_to_tensor)
197
    if not F_pil._is_pil_image(pic):
198
        raise TypeError(f"pic should be PIL Image. Got {type(pic)}")
199
200

    if accimage is not None and isinstance(pic, accimage.Image):
201
202
        # accimage format is always uint8 internally, so always return uint8 here
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
203
204
205
206
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
207
    img = torch.as_tensor(np.array(pic, copy=True))
208
    img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic))
209
210
211
212
213
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


214
215
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly
216
    This function does not support PIL Image.
217
218
219
220
221
222

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
vfdev's avatar
vfdev committed
223
        Tensor: Converted image
224
225
226
227
228
229
230
231
232
233
234
235

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
236
237
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(convert_image_dtype)
238
    if not isinstance(image, torch.Tensor):
239
        raise TypeError("Input img should be Tensor Image")
240
241

    return F_t.convert_image_dtype(image, dtype)
242
243


244
def to_pil_image(pic, mode=None):
245
    """Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
246

247
    See :class:`~torchvision.transforms.ToPILImage` for more details.
248
249
250
251
252

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

253
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
254
255
256
257

    Returns:
        PIL Image: Image converted to PIL Image.
    """
258
259
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_pil_image)
260

261
    if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
262
        raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.")
263

Varun Agrawal's avatar
Varun Agrawal committed
264
265
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
266
            raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndimension()} dimensions.")
Varun Agrawal's avatar
Varun Agrawal committed
267
268
269

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
270
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
271

272
273
        # check number of channels
        if pic.shape[-3] > 4:
274
            raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-3]} channels.")
275

Varun Agrawal's avatar
Varun Agrawal committed
276
277
    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
278
            raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
Varun Agrawal's avatar
Varun Agrawal committed
279
280
281
282
283

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

284
285
        # check number of channels
        if pic.shape[-1] > 4:
286
            raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-1]} channels.")
287

288
    npimg = pic
Varun Agrawal's avatar
Varun Agrawal committed
289
    if isinstance(pic, torch.Tensor):
290
        if pic.is_floating_point() and mode != "F":
291
292
            pic = pic.mul(255).byte()
        npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
293
294

    if not isinstance(npimg, np.ndarray):
295
        raise TypeError("Input pic must be a torch.Tensor or NumPy ndarray, not {type(npimg)}")
296
297
298
299
300

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
301
            expected_mode = "L"
vfdev's avatar
vfdev committed
302
        elif npimg.dtype == np.int16:
303
            expected_mode = "I;16"
vfdev's avatar
vfdev committed
304
        elif npimg.dtype == np.int32:
305
            expected_mode = "I"
306
        elif npimg.dtype == np.float32:
307
            expected_mode = "F"
308
        if mode is not None and mode != expected_mode:
309
            raise ValueError(f"Incorrect mode ({mode}) supplied for input type {np.dtype}. Should be {expected_mode}")
310
311
        mode = expected_mode

surgan12's avatar
surgan12 committed
312
    elif npimg.shape[2] == 2:
313
        permitted_2_channel_modes = ["LA"]
surgan12's avatar
surgan12 committed
314
        if mode is not None and mode not in permitted_2_channel_modes:
315
            raise ValueError(f"Only modes {permitted_2_channel_modes} are supported for 2D inputs")
surgan12's avatar
surgan12 committed
316
317

        if mode is None and npimg.dtype == np.uint8:
318
            mode = "LA"
surgan12's avatar
surgan12 committed
319

320
    elif npimg.shape[2] == 4:
321
        permitted_4_channel_modes = ["RGBA", "CMYK", "RGBX"]
322
        if mode is not None and mode not in permitted_4_channel_modes:
323
            raise ValueError(f"Only modes {permitted_4_channel_modes} are supported for 4D inputs")
324
325

        if mode is None and npimg.dtype == np.uint8:
326
            mode = "RGBA"
327
    else:
328
        permitted_3_channel_modes = ["RGB", "YCbCr", "HSV"]
329
        if mode is not None and mode not in permitted_3_channel_modes:
330
            raise ValueError(f"Only modes {permitted_3_channel_modes} are supported for 3D inputs")
331
        if mode is None and npimg.dtype == np.uint8:
332
            mode = "RGB"
333
334

    if mode is None:
335
        raise TypeError(f"Input type {npimg.dtype} is not supported")
336
337
338
339

    return Image.fromarray(npimg, mode=mode)


340
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
341
    """Normalize a float tensor image with mean and standard deviation.
342
    This transform does not support PIL Image.
343

344
    .. note::
surgan12's avatar
surgan12 committed
345
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
346

347
    See :class:`~torchvision.transforms.Normalize` for more details.
348
349

    Args:
350
        tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
351
        mean (sequence): Sequence of means for each channel.
352
        std (sequence): Sequence of standard deviations for each channel.
353
        inplace(bool,optional): Bool to make this operation inplace.
354
355
356
357

    Returns:
        Tensor: Normalized Tensor image.
    """
358
359
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(normalize)
360
    if not isinstance(tensor, torch.Tensor):
361
        raise TypeError(f"img should be Tensor Image. Got {type(tensor)}")
362

363
    return F_t.normalize(tensor, mean=mean, std=std, inplace=inplace)
364
365


vfdev's avatar
vfdev committed
366
367
368
def _compute_resized_output_size(
    image_size: Tuple[int, int], size: List[int], max_size: Optional[int] = None
) -> List[int]:
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
    if len(size) == 1:  # specified size only for the smallest edge
        h, w = image_size
        short, long = (w, h) if w <= h else (h, w)
        requested_new_short = size if isinstance(size, int) else size[0]

        new_short, new_long = requested_new_short, int(requested_new_short * long / short)

        if max_size is not None:
            if max_size <= requested_new_short:
                raise ValueError(
                    f"max_size = {max_size} must be strictly greater than the requested "
                    f"size for the smaller edge size = {size}"
                )
            if new_long > max_size:
                new_short, new_long = int(max_size * new_short / new_long), max_size

        new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
    else:  # specified both h and w
        new_w, new_h = size[1], size[0]
    return [new_h, new_w]


391
392
393
394
395
396
397
def resize(
    img: Tensor,
    size: List[int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    max_size: Optional[int] = None,
    antialias: Optional[bool] = None,
) -> Tensor:
vfdev's avatar
vfdev committed
398
    r"""Resize the input image to the given size.
399
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
400
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
401

402
403
404
405
    .. warning::
        The output image might be different depending on its type: when downsampling, the interpolation of PIL images
        and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
        in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
406
407
        types. See also below the ``antialias`` parameter, which can help making the output of PIL images and tensors
        closer.
408

409
    Args:
vfdev's avatar
vfdev committed
410
        img (PIL Image or Tensor): Image to be resized.
411
412
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
Vitaliy Chiley's avatar
Vitaliy Chiley committed
413
            the smaller edge of the image will be matched to this number maintaining
414
            the aspect ratio. i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
415
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
416
417
418

            .. note::
                In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
419
420
421
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
422
423
            ``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are
            supported.
424
425
426
427
        max_size (int, optional): The maximum allowed for the longer edge of
            the resized image: if the longer edge of the image is greater
            than ``max_size`` after being resized according to ``size``, then
            the image is resized again so that the longer edge is equal to
428
            ``max_size``. As a result, ``size`` might be overruled, i.e. the
429
430
431
            smaller edge may be shorter than ``size``. This is only supported
            if ``size`` is an int (or a sequence of length 1 in torchscript
            mode).
432
        antialias (bool, optional): antialias flag. If ``img`` is PIL Image, the flag is ignored and anti-alias
433
            is always used. If ``img`` is Tensor, the flag is False by default and can be set to True for
434
435
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` modes.
            This can help making the output for PIL images and tensors closer.
436
437

    Returns:
vfdev's avatar
vfdev committed
438
        PIL Image or Tensor: Resized image.
439
    """
440
441
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(resize)
442

443
444
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
445

446
447
448
449
450
451
452
453
454
455
456
457
458
459
    if isinstance(size, (list, tuple)):
        if len(size) not in [1, 2]:
            raise ValueError(
                f"Size must be an int or a 1 or 2 element tuple/list, not a {len(size)} element tuple/list"
            )
        if max_size is not None and len(size) != 1:
            raise ValueError(
                "max_size should only be passed if size specifies the length of the smaller edge, "
                "i.e. size should be an int or a sequence of length 1 in torchscript mode."
            )

    _, image_height, image_width = get_dimensions(img)
    if isinstance(size, int):
        size = [size]
vfdev's avatar
vfdev committed
460
    output_size = _compute_resized_output_size((image_height, image_width), size, max_size)
461
462
463
464

    if (image_height, image_width) == output_size:
        return img

vfdev's avatar
vfdev committed
465
    if not isinstance(img, torch.Tensor):
466
        if antialias is not None and not antialias:
467
            warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.")
468
        pil_interpolation = pil_modes_mapping[interpolation]
469
        return F_pil.resize(img, size=output_size, interpolation=pil_interpolation)
vfdev's avatar
vfdev committed
470

471
    return F_t.resize(img, size=output_size, interpolation=interpolation.value, antialias=antialias)
472
473


474
def pad(img: Tensor, padding: List[int], fill: Union[int, float] = 0, padding_mode: str = "constant") -> Tensor:
475
    r"""Pad the given image on all sides with the given "pad" value.
476
    If the image is torch Tensor, it is expected
477
478
479
    to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
    at most 3 leading dimensions for mode edge,
    and an arbitrary number of leading dimensions for mode constant
480
481

    Args:
482
        img (PIL Image or Tensor): Image to be padded.
483
484
485
        padding (int or sequence): Padding on each border. If a single int is provided this
            is used to pad all borders. If sequence of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a sequence of length 4 is provided
486
            this is the padding for the left, top, right and bottom borders respectively.
487
488
489
490

            .. note::
                In torchscript mode padding as single int is not supported, use a sequence of
                length 1: ``[padding, ]``.
491
        fill (number or tuple): Pixel fill value for constant fill. Default is 0.
492
493
494
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
            Only number is supported for torch Tensor.
495
            Only int or tuple value is supported for PIL Image.
496
497
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
            Default is constant.
498
499
500

            - constant: pads with a constant value, this value is specified with fill

501
502
            - edge: pads with the last value at the edge of the image.
              If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
503

504
505
506
            - reflect: pads with reflection of image without repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
              will result in [3, 2, 1, 2, 3, 4, 3, 2]
507

508
509
510
            - symmetric: pads with reflection of image repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
              will result in [2, 1, 1, 2, 3, 4, 4, 3]
511
512

    Returns:
513
        PIL Image or Tensor: Padded image.
514
    """
515
516
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(pad)
517
518
    if not isinstance(img, torch.Tensor):
        return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
519

520
    return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
521
522


vfdev's avatar
vfdev committed
523
524
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
    """Crop the given image at specified location and output size.
525
    If the image is torch Tensor, it is expected
526
527
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then cropped.
528

529
    Args:
vfdev's avatar
vfdev committed
530
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
531
532
533
534
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
535

536
    Returns:
vfdev's avatar
vfdev committed
537
        PIL Image or Tensor: Cropped image.
538
539
    """

540
541
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(crop)
vfdev's avatar
vfdev committed
542
543
    if not isinstance(img, torch.Tensor):
        return F_pil.crop(img, top, left, height, width)
544

vfdev's avatar
vfdev committed
545
    return F_t.crop(img, top, left, height, width)
546

vfdev's avatar
vfdev committed
547
548
549

def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
    """Crops the given image at the center.
550
    If the image is torch Tensor, it is expected
551
552
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
553

554
    Args:
vfdev's avatar
vfdev committed
555
        img (PIL Image or Tensor): Image to be cropped.
556
        output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
vfdev's avatar
vfdev committed
557
558
            it is used for both directions.

559
    Returns:
vfdev's avatar
vfdev committed
560
        PIL Image or Tensor: Cropped image.
561
    """
562
563
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(center_crop)
564
565
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
vfdev's avatar
vfdev committed
566
567
568
    elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
        output_size = (output_size[0], output_size[0])

569
    _, image_height, image_width = get_dimensions(img)
570
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
571

572
573
574
575
576
577
578
579
    if crop_width > image_width or crop_height > image_height:
        padding_ltrb = [
            (crop_width - image_width) // 2 if crop_width > image_width else 0,
            (crop_height - image_height) // 2 if crop_height > image_height else 0,
            (crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
            (crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
        ]
        img = pad(img, padding_ltrb, fill=0)  # PIL uses fill value 0
580
        _, image_height, image_width = get_dimensions(img)
581
582
583
        if crop_width == image_width and crop_height == image_height:
            return img

584
585
    crop_top = int(round((image_height - crop_height) / 2.0))
    crop_left = int(round((image_width - crop_width) / 2.0))
586
    return crop(img, crop_top, crop_left, crop_height, crop_width)
587
588


589
def resized_crop(
590
591
592
593
594
595
596
    img: Tensor,
    top: int,
    left: int,
    height: int,
    width: int,
    size: List[int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
597
    antialias: Optional[bool] = None,
598
599
) -> Tensor:
    """Crop the given image and resize it to desired size.
600
    If the image is torch Tensor, it is expected
601
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
602

603
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
604
605

    Args:
606
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
607
608
609
610
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
611
        size (sequence or int): Desired output size. Same semantics as ``resize``.
612
613
614
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
615
616
            ``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are
            supported.
617
618
619
620
        antialias (bool, optional): antialias flag. If ``img`` is PIL Image, the flag is ignored and anti-alias
            is always used. If ``img`` is Tensor, the flag is False by default and can be set to True for
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` modes.
            This can help making the output for PIL images and tensors closer.
621
    Returns:
622
        PIL Image or Tensor: Cropped image.
623
    """
624
625
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(resized_crop)
626
    img = crop(img, top, left, height, width)
627
    img = resize(img, size, interpolation, antialias=antialias)
628
629
630
    return img


631
def hflip(img: Tensor) -> Tensor:
632
    """Horizontally flip the given image.
633
634

    Args:
vfdev's avatar
vfdev committed
635
        img (PIL Image or Tensor): Image to be flipped. If img
636
            is a Tensor, it is expected to be in [..., H, W] format,
637
            where ... means it can have an arbitrary number of leading
638
            dimensions.
639
640

    Returns:
vfdev's avatar
vfdev committed
641
        PIL Image or Tensor:  Horizontally flipped image.
642
    """
643
644
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(hflip)
645
646
    if not isinstance(img, torch.Tensor):
        return F_pil.hflip(img)
647

648
    return F_t.hflip(img)
649
650


651
def _get_perspective_coeffs(startpoints: List[List[int]], endpoints: List[List[int]]) -> List[float]:
652
653
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

Vitaliy Chiley's avatar
Vitaliy Chiley committed
654
    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
655
656
657
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
658
659
660
661
662
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.

663
664
665
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
666
667
668
669
670
    a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float)

    for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
        a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
671

672
    b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8)
673
    res = torch.linalg.lstsq(a_matrix, b_matrix, driver="gels").solution
674

675
    output: List[float] = res.tolist()
676
    return output
677
678


679
def perspective(
680
681
682
683
684
    img: Tensor,
    startpoints: List[List[int]],
    endpoints: List[List[int]],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    fill: Optional[List[float]] = None,
685
686
) -> Tensor:
    """Perform perspective transform of the given image.
687
    If the image is torch Tensor, it is expected
688
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
689
690

    Args:
691
692
693
694
695
        img (PIL Image or Tensor): Image to be transformed.
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
696
697
698
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
699
700
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
701
702
703
704

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
705

706
    Returns:
707
        PIL Image or Tensor: transformed Image.
708
    """
709
710
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(perspective)
711

712
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
713

714
715
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
716

717
    if not isinstance(img, torch.Tensor):
718
719
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill)
720

721
    return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill)
722
723


724
def vflip(img: Tensor) -> Tensor:
725
    """Vertically flip the given image.
726
727

    Args:
vfdev's avatar
vfdev committed
728
        img (PIL Image or Tensor): Image to be flipped. If img
729
            is a Tensor, it is expected to be in [..., H, W] format,
730
            where ... means it can have an arbitrary number of leading
731
            dimensions.
732
733

    Returns:
734
        PIL Image or Tensor:  Vertically flipped image.
735
    """
736
737
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(vflip)
738
739
    if not isinstance(img, torch.Tensor):
        return F_pil.vflip(img)
740

741
    return F_t.vflip(img)
742
743


vfdev's avatar
vfdev committed
744
745
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
    """Crop the given image into four corners and the central crop.
746
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
747
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
748
749
750
751
752
753

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
754
755
756
        img (PIL Image or Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
757
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
758

759
    Returns:
760
       tuple: tuple (tl, tr, bl, br, center)
761
       Corresponding top left, top right, bottom left, bottom right and center crop.
762
    """
763
764
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(five_crop)
765
766
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
767
768
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])
769

vfdev's avatar
vfdev committed
770
771
772
    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")

773
    _, image_height, image_width = get_dimensions(img)
774
775
776
777
778
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

vfdev's avatar
vfdev committed
779
780
781
782
783
784
785
786
    tl = crop(img, 0, 0, crop_height, crop_width)
    tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
    bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
    br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)

    center = center_crop(img, [crop_height, crop_width])

    return tl, tr, bl, br, center
787
788


vfdev's avatar
vfdev committed
789
790
791
def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]:
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
792
    flipped version of these (horizontal flipping is used by default).
793
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
794
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
795
796
797
798
799

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

800
    Args:
vfdev's avatar
vfdev committed
801
        img (PIL Image or Tensor): Image to be cropped.
802
        size (sequence or int): Desired output size of the crop. If size is an
803
            int instead of sequence like (h, w), a square crop (size, size) is
804
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
805
        vertical_flip (bool): Use vertical flipping instead of horizontal
806
807

    Returns:
808
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
809
810
        Corresponding top left, top right, bottom left, bottom right and
        center crop and same for the flipped image.
811
    """
812
813
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(ten_crop)
814
815
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
816
817
818
819
820
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])

    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")
821
822
823
824
825
826
827
828
829
830
831
832

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


833
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
834
    """Adjust brightness of an image.
835
836

    Args:
vfdev's avatar
vfdev committed
837
        img (PIL Image or Tensor): Image to be adjusted.
838
839
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
840
        brightness_factor (float):  How much to adjust the brightness. Can be
841
            any non-negative number. 0 gives a black image, 1 gives the
842
843
844
            original image while 2 increases the brightness by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
845
        PIL Image or Tensor: Brightness adjusted image.
846
    """
847
848
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_brightness)
849
850
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_brightness(img, brightness_factor)
851

852
    return F_t.adjust_brightness(img, brightness_factor)
853
854


855
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
856
    """Adjust contrast of an image.
857
858

    Args:
vfdev's avatar
vfdev committed
859
        img (PIL Image or Tensor): Image to be adjusted.
860
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
861
            where ... means it can have an arbitrary number of leading dimensions.
862
        contrast_factor (float): How much to adjust the contrast. Can be any
863
            non-negative number. 0 gives a solid gray image, 1 gives the
864
865
866
            original image while 2 increases the contrast by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
867
        PIL Image or Tensor: Contrast adjusted image.
868
    """
869
870
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_contrast)
871
872
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_contrast(img, contrast_factor)
873

874
    return F_t.adjust_contrast(img, contrast_factor)
875
876


877
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
878
879
880
    """Adjust color saturation of an image.

    Args:
vfdev's avatar
vfdev committed
881
        img (PIL Image or Tensor): Image to be adjusted.
882
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
883
            where ... means it can have an arbitrary number of leading dimensions.
884
885
886
887
888
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
889
        PIL Image or Tensor: Saturation adjusted image.
890
    """
891
892
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_saturation)
893
894
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_saturation(img, saturation_factor)
895

896
    return F_t.adjust_saturation(img, saturation_factor)
897
898


899
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
900
901
902
903
904
905
906
907
908
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

909
910
911
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
912
913

    Args:
914
        img (PIL Image or Tensor): Image to be adjusted.
915
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
916
            where ... means it can have an arbitrary number of leading dimensions.
917
            If img is PIL Image mode "1", "I", "F" and modes with transparency (alpha channel) are not supported.
918
919
920
            Note: the pixel values of the input image has to be non-negative for conversion to HSV space;
            thus it does not work if you normalize your image to an interval with negative values,
            or use an interpolation that generates negative values before using this function.
921
922
923
924
925
926
927
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
928
        PIL Image or Tensor: Hue adjusted image.
929
    """
930
931
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_hue)
932
933
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_hue(img, hue_factor)
934

935
    return F_t.adjust_hue(img, hue_factor)
936
937


938
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
939
    r"""Perform gamma correction on an image.
940
941
942
943

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

944
945
946
947
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
948

949
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
950
951

    Args:
952
        img (PIL Image or Tensor): PIL Image to be adjusted.
953
954
955
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, modes with transparency (alpha channel) are not supported.
956
957
958
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
959
        gain (float): The constant multiplier.
960
961
    Returns:
        PIL Image or Tensor: Gamma correction adjusted image.
962
    """
963
964
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_gamma)
965
966
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_gamma(img, gamma, gain)
967

968
    return F_t.adjust_gamma(img, gamma, gain)
969
970


vfdev's avatar
vfdev committed
971
def _get_inverse_affine_matrix(
972
    center: List[float], angle: float, translate: List[float], scale: float, shear: List[float], inverted: bool = True
vfdev's avatar
vfdev committed
973
) -> List[float]:
974
975
    # Helper method to compute inverse matrix for affine transformation

976
977
978
    # Pillow requires inverse affine transformation matrix:
    # Affine matrix is : M = T * C * RotateScaleShear * C^-1
    #
979
980
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
981
982
983
    #       RotateScaleShear is rotation with scale and shear matrix
    #
    #       RotateScaleShear(a, s, (sx, sy)) =
984
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
985
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(sx)/cos(sy) - sin(a)), 0 ]
986
    #         [ s*sin(a - sy)/cos(sy), s*(-sin(a - sy)*tan(sx)/cos(sy) + cos(a)), 0 ]
987
988
989
990
991
    #         [ 0                    , 0                                      , 1 ]
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
992
    # Thus, the inverse is M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1
993

994
    rot = math.radians(angle)
995
996
    sx = math.radians(shear[0])
    sy = math.radians(shear[1])
997
998
999
1000
1001

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
vfdev's avatar
vfdev committed
1002
1003
1004
1005
    a = math.cos(rot - sy) / math.cos(sy)
    b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
    c = math.sin(rot - sy) / math.cos(sy)
    d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
1006

1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
    if inverted:
        # Inverted rotation matrix with scale and shear
        # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
        matrix = [d, -b, 0.0, -c, a, 0.0]
        matrix = [x / scale for x in matrix]
        # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
        matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
        matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
        # Apply center translation: C * RSS^-1 * C^-1 * T^-1
        matrix[2] += cx
        matrix[5] += cy
    else:
        matrix = [a, b, 0.0, c, d, 0.0]
        matrix = [x * scale for x in matrix]
        # Apply inverse of center translation: RSS * C^-1
        matrix[2] += matrix[0] * (-cx) + matrix[1] * (-cy)
        matrix[5] += matrix[3] * (-cx) + matrix[4] * (-cy)
        # Apply translation and center : T * C * RSS * C^-1
        matrix[2] += cx + tx
        matrix[5] += cy + ty
1027

vfdev's avatar
vfdev committed
1028
    return matrix
1029

vfdev's avatar
vfdev committed
1030

vfdev's avatar
vfdev committed
1031
def rotate(
1032
1033
1034
1035
1036
1037
    img: Tensor,
    angle: float,
    interpolation: InterpolationMode = InterpolationMode.NEAREST,
    expand: bool = False,
    center: Optional[List[int]] = None,
    fill: Optional[List[float]] = None,
vfdev's avatar
vfdev committed
1038
1039
) -> Tensor:
    """Rotate the image by angle.
1040
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1041
1042
1043
1044
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.

    Args:
        img (PIL Image or Tensor): image to be rotated.
1045
        angle (number): rotation angle value in degrees, counter-clockwise.
1046
1047
1048
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
vfdev's avatar
vfdev committed
1049
1050
1051
1052
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
1053
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
vfdev's avatar
vfdev committed
1054
            Default is the center of the image.
1055
1056
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
1057
1058
1059
1060

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
vfdev's avatar
vfdev committed
1061
1062
1063
1064
1065
1066
    Returns:
        PIL Image or Tensor: Rotated image.

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

    """
1067
1068
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(rotate)
1069

vfdev's avatar
vfdev committed
1070
1071
1072
1073
1074
1075
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

1076
1077
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
1078

vfdev's avatar
vfdev committed
1079
    if not isinstance(img, torch.Tensor):
1080
1081
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill)
vfdev's avatar
vfdev committed
1082
1083
1084

    center_f = [0.0, 0.0]
    if center is not None:
1085
        _, height, width = get_dimensions(img)
1086
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
1087
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
1088

vfdev's avatar
vfdev committed
1089
1090
1091
    # due to current incoherence of rotation angle direction between affine and rotate implementations
    # we need to set -angle.
    matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
1092
    return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill)
vfdev's avatar
vfdev committed
1093
1094


vfdev's avatar
vfdev committed
1095
def affine(
1096
1097
1098
1099
1100
1101
1102
    img: Tensor,
    angle: float,
    translate: List[int],
    scale: float,
    shear: List[float],
    interpolation: InterpolationMode = InterpolationMode.NEAREST,
    fill: Optional[List[float]] = None,
1103
    center: Optional[List[int]] = None,
vfdev's avatar
vfdev committed
1104
1105
) -> Tensor:
    """Apply affine transformation on the image keeping image center invariant.
1106
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1107
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1108
1109

    Args:
vfdev's avatar
vfdev committed
1110
        img (PIL Image or Tensor): image to transform.
1111
1112
        angle (number): rotation angle in degrees between -180 and 180, clockwise direction.
        translate (sequence of integers): horizontal and vertical translations (post-rotation translation)
1113
        scale (float): overall scale
1114
        shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction.
1115
1116
            If a sequence is specified, the first value corresponds to a shear parallel to the x-axis, while
            the second value corresponds to a shear parallel to the y-axis.
1117
1118
1119
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1120
1121
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
1122
1123
1124
1125

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
1126
1127
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
            Default is the center of the image.
vfdev's avatar
vfdev committed
1128
1129
1130

    Returns:
        PIL Image or Tensor: Transformed image.
1131
    """
1132
1133
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(affine)
1134

vfdev's avatar
vfdev committed
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if not isinstance(translate, (list, tuple)):
        raise TypeError("Argument translate should be a sequence")

    if len(translate) != 2:
        raise ValueError("Argument translate should be a sequence of length 2")

    if scale <= 0.0:
        raise ValueError("Argument scale should be positive")

    if not isinstance(shear, (numbers.Number, (list, tuple))):
        raise TypeError("Shear should be either a single value or a sequence of two values")

1150
1151
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
1152

vfdev's avatar
vfdev committed
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
    if isinstance(angle, int):
        angle = float(angle)

    if isinstance(translate, tuple):
        translate = list(translate)

    if isinstance(shear, numbers.Number):
        shear = [shear, 0.0]

    if isinstance(shear, tuple):
        shear = list(shear)

    if len(shear) == 1:
        shear = [shear[0], shear[0]]

    if len(shear) != 2:
1169
        raise ValueError(f"Shear should be a sequence containing two values. Got {shear}")
vfdev's avatar
vfdev committed
1170

1171
1172
1173
    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

1174
    _, height, width = get_dimensions(img)
vfdev's avatar
vfdev committed
1175
    if not isinstance(img, torch.Tensor):
1176
        # center = (width * 0.5 + 0.5, height * 0.5 + 0.5)
vfdev's avatar
vfdev committed
1177
1178
        # it is visually better to estimate the center without 0.5 offset
        # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
1179
        if center is None:
1180
            center = [width * 0.5, height * 0.5]
vfdev's avatar
vfdev committed
1181
        matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
1182
1183
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill)
1184

1185
1186
    center_f = [0.0, 0.0]
    if center is not None:
1187
        _, height, width = get_dimensions(img)
1188
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
1189
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
1190

1191
    translate_f = [1.0 * t for t in translate]
1192
    matrix = _get_inverse_affine_matrix(center_f, angle, translate_f, scale, shear)
1193
    return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill)
1194
1195


1196
1197
1198
# Looks like to_grayscale() is a stand-alone functional that is never called
# from the transform classes. Perhaps it's still here for BC? I can't be
# bothered to dig. Anyway, this can be deprecated as we migrate to V2.
1199
@torch.jit.unused
1200
def to_grayscale(img, num_output_channels=1):
1201
    """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
1202
    This transform does not support torch Tensor.
1203
1204

    Args:
1205
        img (PIL Image): PIL Image to be converted to grayscale.
1206
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1.
1207
1208

    Returns:
1209
1210
        PIL Image: Grayscale version of the image.

1211
1212
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1213
    """
1214
1215
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_grayscale)
1216
1217
    if isinstance(img, Image.Image):
        return F_pil.to_grayscale(img, num_output_channels)
1218

1219
1220
1221
1222
1223
    raise TypeError("Input should be PIL Image")


def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    """Convert RGB image to grayscale version of image.
1224
1225
    If the image is torch Tensor, it is expected
    to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237

    Note:
        Please, note that this method supports only RGB images as input. For inputs in other color spaces,
        please, consider using meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.

    Args:
        img (PIL Image or Tensor): RGB Image to be converted to grayscale.
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.

    Returns:
        PIL Image or Tensor: Grayscale version of the image.

1238
1239
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1240
    """
1241
1242
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(rgb_to_grayscale)
1243
1244
1245
1246
    if not isinstance(img, torch.Tensor):
        return F_pil.to_grayscale(img, num_output_channels)

    return F_t.rgb_to_grayscale(img, num_output_channels)
1247
1248


1249
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
1250
    """Erase the input Tensor Image with given value.
1251
    This transform does not support PIL Image.
1252
1253
1254
1255
1256
1257
1258
1259

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
1260
        inplace(bool, optional): For in-place operations. By default, is set False.
1261
1262
1263
1264

    Returns:
        Tensor Image: Erased image.
    """
1265
1266
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(erase)
1267
    if not isinstance(img, torch.Tensor):
1268
        raise TypeError(f"img should be Tensor Image. Got {type(img)}")
1269

1270
    return F_t.erase(img, i, j, h, w, v, inplace=inplace)
1271
1272
1273


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
1274
1275
1276
    """Performs Gaussian blurring on the image by given kernel.
    If the image is torch Tensor, it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1277
1278
1279
1280
1281

    Args:
        img (PIL Image or Tensor): Image to be blurred
        kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
            like ``(kx, ky)`` or a single integer for square kernels.
1282
1283
1284
1285

            .. note::
                In torchscript mode kernel_size as single int is not supported, use a sequence of
                length 1: ``[ksize, ]``.
1286
1287
1288
1289
        sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
            sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
            same sigma in both X/Y directions. If None, then it is computed using
            ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
1290
1291
1292
1293
1294
            Default, None.

            .. note::
                In torchscript mode sigma as single float is
                not supported, use a sequence of length 1: ``[sigma, ]``.
1295
1296
1297
1298

    Returns:
        PIL Image or Tensor: Gaussian Blurred version of the image.
    """
1299
1300
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(gaussian_blur)
1301
    if not isinstance(kernel_size, (int, list, tuple)):
1302
        raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}")
1303
1304
1305
    if isinstance(kernel_size, int):
        kernel_size = [kernel_size, kernel_size]
    if len(kernel_size) != 2:
1306
        raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}")
1307
1308
    for ksize in kernel_size:
        if ksize % 2 == 0 or ksize < 0:
1309
            raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}")
1310
1311
1312
1313
1314

    if sigma is None:
        sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]

    if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
1315
        raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}")
1316
1317
1318
1319
1320
    if isinstance(sigma, (int, float)):
        sigma = [float(sigma), float(sigma)]
    if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
        sigma = [sigma[0], sigma[0]]
    if len(sigma) != 2:
1321
        raise ValueError(f"If sigma is a sequence, its length should be 2. Got {len(sigma)}")
1322
    for s in sigma:
1323
        if s <= 0.0:
1324
            raise ValueError(f"sigma should have positive values. Got {sigma}")
1325
1326
1327
1328

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
1329
            raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
1330

1331
        t_img = pil_to_tensor(img)
1332
1333
1334
1335

    output = F_t.gaussian_blur(t_img, kernel_size, sigma)

    if not isinstance(img, torch.Tensor):
1336
        output = to_pil_image(output, mode=img.mode)
1337
    return output
1338
1339
1340


def invert(img: Tensor) -> Tensor:
1341
    """Invert the colors of an RGB/grayscale image.
1342
1343
1344

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1345
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1346
1347
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1348
1349
1350
1351

    Returns:
        PIL Image or Tensor: Color inverted image.
    """
1352
1353
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(invert)
1354
1355
1356
1357
1358
1359
1360
    if not isinstance(img, torch.Tensor):
        return F_pil.invert(img)

    return F_t.invert(img)


def posterize(img: Tensor, bits: int) -> Tensor:
1361
    """Posterize an image by reducing the number of bits for each color channel.
1362
1363
1364

    Args:
        img (PIL Image or Tensor): Image to have its colors posterized.
1365
            If img is torch Tensor, it should be of type torch.uint8, and
1366
1367
1368
            it is expected to be in [..., 1 or 3, H, W] format, where ... means
            it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1369
1370
1371
1372
        bits (int): The number of bits to keep for each channel (0-8).
    Returns:
        PIL Image or Tensor: Posterized image.
    """
1373
1374
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(posterize)
1375
    if not (0 <= bits <= 8):
1376
        raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}")
1377
1378
1379
1380
1381
1382
1383
1384

    if not isinstance(img, torch.Tensor):
        return F_pil.posterize(img, bits)

    return F_t.posterize(img, bits)


def solarize(img: Tensor, threshold: float) -> Tensor:
1385
    """Solarize an RGB/grayscale image by inverting all pixel values above a threshold.
1386
1387
1388

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1389
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1390
1391
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1392
1393
1394
1395
        threshold (float): All pixels equal or above this value are inverted.
    Returns:
        PIL Image or Tensor: Solarized image.
    """
1396
1397
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(solarize)
1398
1399
1400
1401
1402
1403
1404
    if not isinstance(img, torch.Tensor):
        return F_pil.solarize(img, threshold)

    return F_t.solarize(img, threshold)


def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
1405
    """Adjust the sharpness of an image.
1406
1407
1408

    Args:
        img (PIL Image or Tensor): Image to be adjusted.
1409
1410
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
1411
        sharpness_factor (float):  How much to adjust the sharpness. Can be
1412
            any non-negative number. 0 gives a blurred image, 1 gives the
1413
1414
1415
1416
1417
            original image while 2 increases the sharpness by a factor of 2.

    Returns:
        PIL Image or Tensor: Sharpness adjusted image.
    """
1418
1419
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_sharpness)
1420
1421
1422
1423
1424
1425
1426
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_sharpness(img, sharpness_factor)

    return F_t.adjust_sharpness(img, sharpness_factor)


def autocontrast(img: Tensor) -> Tensor:
1427
    """Maximize contrast of an image by remapping its
1428
1429
1430
1431
1432
    pixels per channel so that the lowest becomes black and the lightest
    becomes white.

    Args:
        img (PIL Image or Tensor): Image on which autocontrast is applied.
1433
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1434
1435
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1436
1437
1438
1439

    Returns:
        PIL Image or Tensor: An image that was autocontrasted.
    """
1440
1441
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(autocontrast)
1442
1443
1444
1445
1446
1447
1448
    if not isinstance(img, torch.Tensor):
        return F_pil.autocontrast(img)

    return F_t.autocontrast(img)


def equalize(img: Tensor) -> Tensor:
1449
    """Equalize the histogram of an image by applying
1450
1451
1452
1453
1454
    a non-linear mapping to the input in order to create a uniform
    distribution of grayscale values in the output.

    Args:
        img (PIL Image or Tensor): Image on which equalize is applied.
1455
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1456
            where ... means it can have an arbitrary number of leading dimensions.
1457
            The tensor dtype must be ``torch.uint8`` and values are expected to be in ``[0, 255]``.
1458
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1459
1460
1461
1462

    Returns:
        PIL Image or Tensor: An image that was equalized.
    """
1463
1464
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(equalize)
1465
1466
1467
1468
    if not isinstance(img, torch.Tensor):
        return F_pil.equalize(img)

    return F_t.equalize(img)
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492


def elastic_transform(
    img: Tensor,
    displacement: Tensor,
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    fill: Optional[List[float]] = None,
) -> Tensor:
    """Transform a tensor image with elastic transformations.
    Given alpha and sigma, it will generate displacement
    vectors for all pixels based on random offsets. Alpha controls the strength
    and sigma controls the smoothness of the displacements.
    The displacements are added to an identity grid and the resulting grid is
    used to grid_sample from the image.

    Applications:
        Randomly transforms the morphology of objects in images and produces a
        see-through-water-like effect.

    Args:
        img (PIL Image or Tensor): Image on which elastic_transform is applied.
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1493
        displacement (Tensor): The displacement field. Expected shape is [1, H, W, 2].
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``.
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
        fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
    """
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(elastic_transform)
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
        )
        interpolation = _interpolation_modes_from_int(interpolation)

    if not isinstance(displacement, torch.Tensor):
1513
        raise TypeError("Argument displacement should be a Tensor")
1514
1515
1516
1517
1518
1519
1520

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
            raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
        t_img = pil_to_tensor(img)

1521
1522
1523
1524
1525
1526
1527
1528
1529
    shape = t_img.shape
    shape = (1,) + shape[-2:] + (2,)
    if shape != displacement.shape:
        raise ValueError(f"Argument displacement shape should be {shape}, but given {displacement.shape}")

    # TODO: if image shape is [N1, N2, ..., C, H, W] and
    # displacement is [1, H, W, 2] we need to reshape input image
    # such grid_sampler takes internal code for 4D input

1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
    output = F_t.elastic_transform(
        t_img,
        displacement,
        interpolation=interpolation.value,
        fill=fill,
    )

    if not isinstance(img, torch.Tensor):
        output = to_pil_image(output, mode=img.mode)
    return output