"vscode:/vscode.git/clone" did not exist on "feda9b11b3a7a1ad9aa22d9149e23081945eed26"
functional.py 65.9 KB
Newer Older
1
import math
2
3
import numbers
import warnings
4
from enum import Enum
5
from typing import Any, List, Optional, Tuple, Union
6
7
8

import numpy as np
import torch
9
from PIL import Image
10
11
from torch import Tensor

12
13
14
15
16
try:
    import accimage
except ImportError:
    accimage = None

17
from ..utils import _log_api_usage_once
18
from . import _functional_pil as F_pil, _functional_tensor as F_t
19

20

21
class InterpolationMode(Enum):
22
    """Interpolation modes
23
24
    Available interpolation methods are ``nearest``, ``nearest-exact``, ``bilinear``, ``bicubic``, ``box``, ``hamming``,
    and ``lanczos``.
25
    """
26

27
    NEAREST = "nearest"
28
    NEAREST_EXACT = "nearest-exact"
29
30
31
32
33
34
35
36
37
    BILINEAR = "bilinear"
    BICUBIC = "bicubic"
    # For PIL compatibility
    BOX = "box"
    HAMMING = "hamming"
    LANCZOS = "lanczos"


# TODO: Once torchscript supports Enums with staticmethod
38
39
# this can be put into InterpolationMode as staticmethod
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
40
    inverse_modes_mapping = {
41
42
43
44
45
46
        0: InterpolationMode.NEAREST,
        2: InterpolationMode.BILINEAR,
        3: InterpolationMode.BICUBIC,
        4: InterpolationMode.BOX,
        5: InterpolationMode.HAMMING,
        1: InterpolationMode.LANCZOS,
47
48
49
50
51
    }
    return inverse_modes_mapping[i]


pil_modes_mapping = {
52
53
54
    InterpolationMode.NEAREST: 0,
    InterpolationMode.BILINEAR: 2,
    InterpolationMode.BICUBIC: 3,
55
    InterpolationMode.NEAREST_EXACT: 0,
56
57
58
    InterpolationMode.BOX: 4,
    InterpolationMode.HAMMING: 5,
    InterpolationMode.LANCZOS: 1,
59
60
}

vfdev's avatar
vfdev committed
61
62
63
_is_pil_image = F_pil._is_pil_image


64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def get_dimensions(img: Tensor) -> List[int]:
    """Returns the dimensions of an image as [channels, height, width].

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        List[int]: The image dimensions.
    """
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_dimensions)
    if isinstance(img, torch.Tensor):
        return F_t.get_dimensions(img)

    return F_pil.get_dimensions(img)


81
82
83
84
85
86
87
88
def get_image_size(img: Tensor) -> List[int]:
    """Returns the size of an image as [width, height].

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        List[int]: The image size.
vfdev's avatar
vfdev committed
89
    """
90
91
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_image_size)
vfdev's avatar
vfdev committed
92
    if isinstance(img, torch.Tensor):
93
        return F_t.get_image_size(img)
94

95
    return F_pil.get_image_size(img)
96

vfdev's avatar
vfdev committed
97

98
99
100
101
102
103
104
105
def get_image_num_channels(img: Tensor) -> int:
    """Returns the number of channels of an image.

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        int: The number of channels.
106
    """
107
108
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_image_num_channels)
109
    if isinstance(img, torch.Tensor):
110
        return F_t.get_image_num_channels(img)
111

112
    return F_pil.get_image_num_channels(img)
113
114


vfdev's avatar
vfdev committed
115
116
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
117
118
119
    return isinstance(img, np.ndarray)


vfdev's avatar
vfdev committed
120
121
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
122
    return img.ndim in {2, 3}
123
124


125
def to_tensor(pic) -> Tensor:
126
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
127
    This function does not support torchscript.
128

129
    See :class:`~torchvision.transforms.ToTensor` for more details.
130
131
132
133
134
135
136

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
137
138
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_tensor)
139
    if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
140
        raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}")
141

142
    if _is_numpy(pic) and not _is_numpy_image(pic):
143
        raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
144

145
146
    default_float_dtype = torch.get_default_dtype()

147
148
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
149
150
151
        if pic.ndim == 2:
            pic = pic[:, :, None]

152
        img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
153
        # backward compatibility
154
        if isinstance(img, torch.ByteTensor):
155
            return img.to(dtype=default_float_dtype).div(255)
156
157
        else:
            return img
158
159

    if accimage is not None and isinstance(pic, accimage.Image):
160
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
161
        pic.copyto(nppic)
162
        return torch.from_numpy(nppic).to(dtype=default_float_dtype)
163
164

    # handle PIL Image
165
166
    mode_to_nptype = {"I": np.int32, "I;16": np.int16, "F": np.float32}
    img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True))
167

168
    if pic.mode == "1":
169
        img = 255 * img
170
    img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic))
171
    # put it from HWC to CHW format
172
    img = img.permute((2, 0, 1)).contiguous()
173
    if isinstance(img, torch.ByteTensor):
174
        return img.to(dtype=default_float_dtype).div(255)
175
176
177
178
    else:
        return img


179
def pil_to_tensor(pic: Any) -> Tensor:
180
    """Convert a ``PIL Image`` to a tensor of the same type.
181
    This function does not support torchscript.
182

vfdev's avatar
vfdev committed
183
    See :class:`~torchvision.transforms.PILToTensor` for more details.
184

185
186
187
188
    .. note::

        A deep copy of the underlying array is performed.

189
190
191
192
193
194
    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
195
196
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(pil_to_tensor)
197
    if not F_pil._is_pil_image(pic):
198
        raise TypeError(f"pic should be PIL Image. Got {type(pic)}")
199
200

    if accimage is not None and isinstance(pic, accimage.Image):
201
202
        # accimage format is always uint8 internally, so always return uint8 here
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
203
204
205
206
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
207
    img = torch.as_tensor(np.array(pic, copy=True))
208
    img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic))
209
210
211
212
213
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


214
215
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly
216
    This function does not support PIL Image.
217
218
219
220
221
222

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
vfdev's avatar
vfdev committed
223
        Tensor: Converted image
224
225
226
227
228
229
230
231
232
233
234
235

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
236
237
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(convert_image_dtype)
238
    if not isinstance(image, torch.Tensor):
239
        raise TypeError("Input img should be Tensor Image")
240
241

    return F_t.convert_image_dtype(image, dtype)
242
243


244
def to_pil_image(pic, mode=None):
245
    """Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
246

247
    See :class:`~torchvision.transforms.ToPILImage` for more details.
248
249
250
251
252

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

253
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
254
255
256
257

    Returns:
        PIL Image: Image converted to PIL Image.
    """
258
259
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_pil_image)
260

261
    if not (isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
262
        raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.")
263

Varun Agrawal's avatar
Varun Agrawal committed
264
265
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
266
            raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndimension()} dimensions.")
Varun Agrawal's avatar
Varun Agrawal committed
267
268
269

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
270
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
271

272
273
        # check number of channels
        if pic.shape[-3] > 4:
274
            raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-3]} channels.")
275

Varun Agrawal's avatar
Varun Agrawal committed
276
277
    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
278
            raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
Varun Agrawal's avatar
Varun Agrawal committed
279
280
281
282
283

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

284
285
        # check number of channels
        if pic.shape[-1] > 4:
286
            raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-1]} channels.")
287

288
    npimg = pic
Varun Agrawal's avatar
Varun Agrawal committed
289
    if isinstance(pic, torch.Tensor):
290
        if pic.is_floating_point() and mode != "F":
291
292
            pic = pic.mul(255).byte()
        npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
293
294

    if not isinstance(npimg, np.ndarray):
295
        raise TypeError("Input pic must be a torch.Tensor or NumPy ndarray, not {type(npimg)}")
296
297
298
299
300

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
301
            expected_mode = "L"
vfdev's avatar
vfdev committed
302
        elif npimg.dtype == np.int16:
303
            expected_mode = "I;16"
vfdev's avatar
vfdev committed
304
        elif npimg.dtype == np.int32:
305
            expected_mode = "I"
306
        elif npimg.dtype == np.float32:
307
            expected_mode = "F"
308
        if mode is not None and mode != expected_mode:
309
            raise ValueError(f"Incorrect mode ({mode}) supplied for input type {np.dtype}. Should be {expected_mode}")
310
311
        mode = expected_mode

surgan12's avatar
surgan12 committed
312
    elif npimg.shape[2] == 2:
313
        permitted_2_channel_modes = ["LA"]
surgan12's avatar
surgan12 committed
314
        if mode is not None and mode not in permitted_2_channel_modes:
315
            raise ValueError(f"Only modes {permitted_2_channel_modes} are supported for 2D inputs")
surgan12's avatar
surgan12 committed
316
317

        if mode is None and npimg.dtype == np.uint8:
318
            mode = "LA"
surgan12's avatar
surgan12 committed
319

320
    elif npimg.shape[2] == 4:
321
        permitted_4_channel_modes = ["RGBA", "CMYK", "RGBX"]
322
        if mode is not None and mode not in permitted_4_channel_modes:
323
            raise ValueError(f"Only modes {permitted_4_channel_modes} are supported for 4D inputs")
324
325

        if mode is None and npimg.dtype == np.uint8:
326
            mode = "RGBA"
327
    else:
328
        permitted_3_channel_modes = ["RGB", "YCbCr", "HSV"]
329
        if mode is not None and mode not in permitted_3_channel_modes:
330
            raise ValueError(f"Only modes {permitted_3_channel_modes} are supported for 3D inputs")
331
        if mode is None and npimg.dtype == np.uint8:
332
            mode = "RGB"
333
334

    if mode is None:
335
        raise TypeError(f"Input type {npimg.dtype} is not supported")
336
337
338
339

    return Image.fromarray(npimg, mode=mode)


340
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
341
    """Normalize a float tensor image with mean and standard deviation.
342
    This transform does not support PIL Image.
343

344
    .. note::
surgan12's avatar
surgan12 committed
345
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
346

347
    See :class:`~torchvision.transforms.Normalize` for more details.
348
349

    Args:
350
        tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
351
        mean (sequence): Sequence of means for each channel.
352
        std (sequence): Sequence of standard deviations for each channel.
353
        inplace(bool,optional): Bool to make this operation inplace.
354
355
356
357

    Returns:
        Tensor: Normalized Tensor image.
    """
358
359
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(normalize)
360
    if not isinstance(tensor, torch.Tensor):
361
        raise TypeError(f"img should be Tensor Image. Got {type(tensor)}")
362

363
    return F_t.normalize(tensor, mean=mean, std=std, inplace=inplace)
364
365


vfdev's avatar
vfdev committed
366
367
368
def _compute_resized_output_size(
    image_size: Tuple[int, int], size: List[int], max_size: Optional[int] = None
) -> List[int]:
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
    if len(size) == 1:  # specified size only for the smallest edge
        h, w = image_size
        short, long = (w, h) if w <= h else (h, w)
        requested_new_short = size if isinstance(size, int) else size[0]

        new_short, new_long = requested_new_short, int(requested_new_short * long / short)

        if max_size is not None:
            if max_size <= requested_new_short:
                raise ValueError(
                    f"max_size = {max_size} must be strictly greater than the requested "
                    f"size for the smaller edge size = {size}"
                )
            if new_long > max_size:
                new_short, new_long = int(max_size * new_short / new_long), max_size

        new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
    else:  # specified both h and w
        new_w, new_h = size[1], size[0]
    return [new_h, new_w]


391
392
393
394
395
def resize(
    img: Tensor,
    size: List[int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    max_size: Optional[int] = None,
396
    antialias: Optional[bool] = True,
397
) -> Tensor:
vfdev's avatar
vfdev committed
398
    r"""Resize the input image to the given size.
399
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
400
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
401
402

    Args:
vfdev's avatar
vfdev committed
403
        img (PIL Image or Tensor): Image to be resized.
404
405
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
Vitaliy Chiley's avatar
Vitaliy Chiley committed
406
            the smaller edge of the image will be matched to this number maintaining
407
            the aspect ratio. i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
408
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
409
410
411

            .. note::
                In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
412
413
414
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
415
416
            ``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are
            supported.
417
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
418
        max_size (int, optional): The maximum allowed for the longer edge of
419
            the resized image. If the longer edge of the image is greater
Nicolas Hug's avatar
Nicolas Hug committed
420
            than ``max_size`` after being resized according to ``size``,
421
422
            ``size`` will be overruled so that the longer edge is equal to
            ``max_size``.
Nicolas Hug's avatar
Nicolas Hug committed
423
            As a result, the smaller edge may be shorter than ``size``. This
424
425
            is only supported if ``size`` is an int (or a sequence of length
            1 in torchscript mode).
426
427
428
429
430
431
432
        antialias (bool, optional): Whether to apply antialiasing.
            It only affects **tensors** with bilinear or bicubic modes and it is
            ignored otherwise: on PIL images, antialiasing is always applied on
            bilinear or bicubic modes; on other modes (for PIL images and
            tensors), antialiasing makes no sense and this parameter is ignored.
            Possible values are:

433
            - ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
434
435
436
437
438
439
440
441
              Other mode aren't affected. This is probably what you want to use.
            - ``False``: will not apply antialiasing for tensors on any mode. PIL
              images are still antialiased on bilinear or bicubic modes, because
              PIL doesn't support no antialias.
            - ``None``: equivalent to ``False`` for tensors and ``True`` for
              PIL images. This value exists for legacy reasons and you probably
              don't want to use it unless you really know what you are doing.

442
443
            The default value changed from ``None`` to ``True`` in
            v0.17, for the PIL and Tensor backends to be consistent.
444
445

    Returns:
vfdev's avatar
vfdev committed
446
        PIL Image or Tensor: Resized image.
447
    """
448
449
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(resize)
450

451
452
453
454
455
456
    if isinstance(interpolation, int):
        interpolation = _interpolation_modes_from_int(interpolation)
    elif not isinstance(interpolation, InterpolationMode):
        raise TypeError(
            "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
        )
457

458
459
460
461
462
463
464
465
466
467
468
469
470
471
    if isinstance(size, (list, tuple)):
        if len(size) not in [1, 2]:
            raise ValueError(
                f"Size must be an int or a 1 or 2 element tuple/list, not a {len(size)} element tuple/list"
            )
        if max_size is not None and len(size) != 1:
            raise ValueError(
                "max_size should only be passed if size specifies the length of the smaller edge, "
                "i.e. size should be an int or a sequence of length 1 in torchscript mode."
            )

    _, image_height, image_width = get_dimensions(img)
    if isinstance(size, int):
        size = [size]
vfdev's avatar
vfdev committed
472
    output_size = _compute_resized_output_size((image_height, image_width), size, max_size)
473

474
    if [image_height, image_width] == output_size:
475
476
        return img

vfdev's avatar
vfdev committed
477
    if not isinstance(img, torch.Tensor):
478
        if antialias is False:
479
            warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.")
480
        pil_interpolation = pil_modes_mapping[interpolation]
481
        return F_pil.resize(img, size=output_size, interpolation=pil_interpolation)
vfdev's avatar
vfdev committed
482

483
    return F_t.resize(img, size=output_size, interpolation=interpolation.value, antialias=antialias)
484
485


486
def pad(img: Tensor, padding: List[int], fill: Union[int, float] = 0, padding_mode: str = "constant") -> Tensor:
487
    r"""Pad the given image on all sides with the given "pad" value.
488
    If the image is torch Tensor, it is expected
489
490
491
    to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
    at most 3 leading dimensions for mode edge,
    and an arbitrary number of leading dimensions for mode constant
492
493

    Args:
494
        img (PIL Image or Tensor): Image to be padded.
495
496
497
        padding (int or sequence): Padding on each border. If a single int is provided this
            is used to pad all borders. If sequence of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a sequence of length 4 is provided
498
            this is the padding for the left, top, right and bottom borders respectively.
499
500
501
502

            .. note::
                In torchscript mode padding as single int is not supported, use a sequence of
                length 1: ``[padding, ]``.
503
        fill (number or tuple): Pixel fill value for constant fill. Default is 0.
504
505
506
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
            Only number is supported for torch Tensor.
507
            Only int or tuple value is supported for PIL Image.
508
509
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
            Default is constant.
510
511
512

            - constant: pads with a constant value, this value is specified with fill

513
514
            - edge: pads with the last value at the edge of the image.
              If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
515

516
517
518
            - reflect: pads with reflection of image without repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
              will result in [3, 2, 1, 2, 3, 4, 3, 2]
519

520
521
522
            - symmetric: pads with reflection of image repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
              will result in [2, 1, 1, 2, 3, 4, 4, 3]
523
524

    Returns:
525
        PIL Image or Tensor: Padded image.
526
    """
527
528
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(pad)
529
530
    if not isinstance(img, torch.Tensor):
        return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
531

532
    return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
533
534


vfdev's avatar
vfdev committed
535
536
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
    """Crop the given image at specified location and output size.
537
    If the image is torch Tensor, it is expected
538
539
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then cropped.
540

541
    Args:
vfdev's avatar
vfdev committed
542
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
543
544
545
546
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
547

548
    Returns:
vfdev's avatar
vfdev committed
549
        PIL Image or Tensor: Cropped image.
550
551
    """

552
553
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(crop)
vfdev's avatar
vfdev committed
554
555
    if not isinstance(img, torch.Tensor):
        return F_pil.crop(img, top, left, height, width)
556

vfdev's avatar
vfdev committed
557
    return F_t.crop(img, top, left, height, width)
558

vfdev's avatar
vfdev committed
559
560
561

def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
    """Crops the given image at the center.
562
    If the image is torch Tensor, it is expected
563
564
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
565

566
    Args:
vfdev's avatar
vfdev committed
567
        img (PIL Image or Tensor): Image to be cropped.
568
        output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
vfdev's avatar
vfdev committed
569
570
            it is used for both directions.

571
    Returns:
vfdev's avatar
vfdev committed
572
        PIL Image or Tensor: Cropped image.
573
    """
574
575
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(center_crop)
576
577
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
vfdev's avatar
vfdev committed
578
579
580
    elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
        output_size = (output_size[0], output_size[0])

581
    _, image_height, image_width = get_dimensions(img)
582
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
583

584
585
586
587
588
589
590
591
    if crop_width > image_width or crop_height > image_height:
        padding_ltrb = [
            (crop_width - image_width) // 2 if crop_width > image_width else 0,
            (crop_height - image_height) // 2 if crop_height > image_height else 0,
            (crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
            (crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
        ]
        img = pad(img, padding_ltrb, fill=0)  # PIL uses fill value 0
592
        _, image_height, image_width = get_dimensions(img)
593
594
595
        if crop_width == image_width and crop_height == image_height:
            return img

596
597
    crop_top = int(round((image_height - crop_height) / 2.0))
    crop_left = int(round((image_width - crop_width) / 2.0))
598
    return crop(img, crop_top, crop_left, crop_height, crop_width)
599
600


601
def resized_crop(
602
603
604
605
606
607
608
    img: Tensor,
    top: int,
    left: int,
    height: int,
    width: int,
    size: List[int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
609
    antialias: Optional[bool] = True,
610
611
) -> Tensor:
    """Crop the given image and resize it to desired size.
612
    If the image is torch Tensor, it is expected
613
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
614

615
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
616
617

    Args:
618
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
619
620
621
622
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
623
        size (sequence or int): Desired output size. Same semantics as ``resize``.
624
625
626
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
627
628
            ``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are
            supported.
629
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
630
631
632
633
634
635
636
        antialias (bool, optional): Whether to apply antialiasing.
            It only affects **tensors** with bilinear or bicubic modes and it is
            ignored otherwise: on PIL images, antialiasing is always applied on
            bilinear or bicubic modes; on other modes (for PIL images and
            tensors), antialiasing makes no sense and this parameter is ignored.
            Possible values are:

637
            - ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
638
639
640
641
642
643
644
645
              Other mode aren't affected. This is probably what you want to use.
            - ``False``: will not apply antialiasing for tensors on any mode. PIL
              images are still antialiased on bilinear or bicubic modes, because
              PIL doesn't support no antialias.
            - ``None``: equivalent to ``False`` for tensors and ``True`` for
              PIL images. This value exists for legacy reasons and you probably
              don't want to use it unless you really know what you are doing.

646
647
            The default value changed from ``None`` to ``True`` in
            v0.17, for the PIL and Tensor backends to be consistent.
648
    Returns:
649
        PIL Image or Tensor: Cropped image.
650
    """
651
652
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(resized_crop)
653
    img = crop(img, top, left, height, width)
654
    img = resize(img, size, interpolation, antialias=antialias)
655
656
657
    return img


658
def hflip(img: Tensor) -> Tensor:
659
    """Horizontally flip the given image.
660
661

    Args:
vfdev's avatar
vfdev committed
662
        img (PIL Image or Tensor): Image to be flipped. If img
663
            is a Tensor, it is expected to be in [..., H, W] format,
664
            where ... means it can have an arbitrary number of leading
665
            dimensions.
666
667

    Returns:
vfdev's avatar
vfdev committed
668
        PIL Image or Tensor:  Horizontally flipped image.
669
    """
670
671
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(hflip)
672
673
    if not isinstance(img, torch.Tensor):
        return F_pil.hflip(img)
674

675
    return F_t.hflip(img)
676
677


678
def _get_perspective_coeffs(startpoints: List[List[int]], endpoints: List[List[int]]) -> List[float]:
679
680
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

Vitaliy Chiley's avatar
Vitaliy Chiley committed
681
    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
682
683
684
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
685
686
687
688
689
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.

690
691
692
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
693
694
695
696
697
    a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float)

    for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
        a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
698

699
    b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8)
700
    res = torch.linalg.lstsq(a_matrix, b_matrix, driver="gels").solution
701

702
    output: List[float] = res.tolist()
703
    return output
704
705


706
def perspective(
707
708
709
710
711
    img: Tensor,
    startpoints: List[List[int]],
    endpoints: List[List[int]],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    fill: Optional[List[float]] = None,
712
713
) -> Tensor:
    """Perform perspective transform of the given image.
714
    If the image is torch Tensor, it is expected
715
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
716
717

    Args:
718
719
720
721
722
        img (PIL Image or Tensor): Image to be transformed.
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
723
724
725
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
726
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
727
728
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
729
730
731
732

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
733

734
    Returns:
735
        PIL Image or Tensor: transformed Image.
736
    """
737
738
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(perspective)
739

740
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
741

742
743
744
745
746
747
    if isinstance(interpolation, int):
        interpolation = _interpolation_modes_from_int(interpolation)
    elif not isinstance(interpolation, InterpolationMode):
        raise TypeError(
            "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
        )
748

749
    if not isinstance(img, torch.Tensor):
750
751
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill)
752

753
    return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill)
754
755


756
def vflip(img: Tensor) -> Tensor:
757
    """Vertically flip the given image.
758
759

    Args:
vfdev's avatar
vfdev committed
760
        img (PIL Image or Tensor): Image to be flipped. If img
761
            is a Tensor, it is expected to be in [..., H, W] format,
762
            where ... means it can have an arbitrary number of leading
763
            dimensions.
764
765

    Returns:
766
        PIL Image or Tensor:  Vertically flipped image.
767
    """
768
769
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(vflip)
770
771
    if not isinstance(img, torch.Tensor):
        return F_pil.vflip(img)
772

773
    return F_t.vflip(img)
774
775


vfdev's avatar
vfdev committed
776
777
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
    """Crop the given image into four corners and the central crop.
778
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
779
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
780
781
782
783
784
785

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
786
787
788
        img (PIL Image or Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
789
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
790

791
    Returns:
792
       tuple: tuple (tl, tr, bl, br, center)
793
       Corresponding top left, top right, bottom left, bottom right and center crop.
794
    """
795
796
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(five_crop)
797
798
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
799
800
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])
801

vfdev's avatar
vfdev committed
802
803
804
    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")

805
    _, image_height, image_width = get_dimensions(img)
806
807
808
809
810
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

vfdev's avatar
vfdev committed
811
812
813
814
815
816
817
818
    tl = crop(img, 0, 0, crop_height, crop_width)
    tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
    bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
    br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)

    center = center_crop(img, [crop_height, crop_width])

    return tl, tr, bl, br, center
819
820


Philip Meier's avatar
Philip Meier committed
821
822
823
def ten_crop(
    img: Tensor, size: List[int], vertical_flip: bool = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
vfdev's avatar
vfdev committed
824
825
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
826
    flipped version of these (horizontal flipping is used by default).
827
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
828
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
829
830
831
832
833

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

834
    Args:
vfdev's avatar
vfdev committed
835
        img (PIL Image or Tensor): Image to be cropped.
836
        size (sequence or int): Desired output size of the crop. If size is an
837
            int instead of sequence like (h, w), a square crop (size, size) is
838
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
839
        vertical_flip (bool): Use vertical flipping instead of horizontal
840
841

    Returns:
842
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
843
844
        Corresponding top left, top right, bottom left, bottom right and
        center crop and same for the flipped image.
845
    """
846
847
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(ten_crop)
848
849
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
850
851
852
853
854
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])

    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")
855
856
857
858
859
860
861
862
863
864
865
866

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


867
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
868
    """Adjust brightness of an image.
869
870

    Args:
vfdev's avatar
vfdev committed
871
        img (PIL Image or Tensor): Image to be adjusted.
872
873
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
874
        brightness_factor (float):  How much to adjust the brightness. Can be
875
            any non-negative number. 0 gives a black image, 1 gives the
876
877
878
            original image while 2 increases the brightness by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
879
        PIL Image or Tensor: Brightness adjusted image.
880
    """
881
882
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_brightness)
883
884
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_brightness(img, brightness_factor)
885

886
    return F_t.adjust_brightness(img, brightness_factor)
887
888


889
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
890
    """Adjust contrast of an image.
891
892

    Args:
vfdev's avatar
vfdev committed
893
        img (PIL Image or Tensor): Image to be adjusted.
894
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
895
            where ... means it can have an arbitrary number of leading dimensions.
896
        contrast_factor (float): How much to adjust the contrast. Can be any
897
            non-negative number. 0 gives a solid gray image, 1 gives the
898
899
900
            original image while 2 increases the contrast by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
901
        PIL Image or Tensor: Contrast adjusted image.
902
    """
903
904
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_contrast)
905
906
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_contrast(img, contrast_factor)
907

908
    return F_t.adjust_contrast(img, contrast_factor)
909
910


911
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
912
913
914
    """Adjust color saturation of an image.

    Args:
vfdev's avatar
vfdev committed
915
        img (PIL Image or Tensor): Image to be adjusted.
916
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
917
            where ... means it can have an arbitrary number of leading dimensions.
918
919
920
921
922
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
923
        PIL Image or Tensor: Saturation adjusted image.
924
    """
925
926
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_saturation)
927
928
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_saturation(img, saturation_factor)
929

930
    return F_t.adjust_saturation(img, saturation_factor)
931
932


933
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
934
935
936
937
938
939
940
941
942
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

943
944
945
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
946
947

    Args:
948
        img (PIL Image or Tensor): Image to be adjusted.
949
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
950
            where ... means it can have an arbitrary number of leading dimensions.
951
            If img is PIL Image mode "1", "I", "F" and modes with transparency (alpha channel) are not supported.
952
953
954
            Note: the pixel values of the input image has to be non-negative for conversion to HSV space;
            thus it does not work if you normalize your image to an interval with negative values,
            or use an interpolation that generates negative values before using this function.
955
956
957
958
959
960
961
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
962
        PIL Image or Tensor: Hue adjusted image.
963
    """
964
965
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_hue)
966
967
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_hue(img, hue_factor)
968

969
    return F_t.adjust_hue(img, hue_factor)
970
971


972
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
973
    r"""Perform gamma correction on an image.
974
975
976
977

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

978
979
980
981
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
982

983
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
984
985

    Args:
986
        img (PIL Image or Tensor): PIL Image to be adjusted.
987
988
989
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, modes with transparency (alpha channel) are not supported.
990
991
992
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
993
        gain (float): The constant multiplier.
994
995
    Returns:
        PIL Image or Tensor: Gamma correction adjusted image.
996
    """
997
998
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_gamma)
999
1000
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_gamma(img, gamma, gain)
1001

1002
    return F_t.adjust_gamma(img, gamma, gain)
1003
1004


vfdev's avatar
vfdev committed
1005
def _get_inverse_affine_matrix(
1006
    center: List[float], angle: float, translate: List[float], scale: float, shear: List[float], inverted: bool = True
vfdev's avatar
vfdev committed
1007
) -> List[float]:
1008
1009
    # Helper method to compute inverse matrix for affine transformation

1010
1011
1012
    # Pillow requires inverse affine transformation matrix:
    # Affine matrix is : M = T * C * RotateScaleShear * C^-1
    #
1013
1014
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
1015
1016
1017
    #       RotateScaleShear is rotation with scale and shear matrix
    #
    #       RotateScaleShear(a, s, (sx, sy)) =
1018
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
1019
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(sx)/cos(sy) - sin(a)), 0 ]
1020
    #         [ s*sin(a - sy)/cos(sy), s*(-sin(a - sy)*tan(sx)/cos(sy) + cos(a)), 0 ]
1021
1022
1023
1024
1025
    #         [ 0                    , 0                                      , 1 ]
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
1026
    # Thus, the inverse is M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1
1027

1028
    rot = math.radians(angle)
1029
1030
    sx = math.radians(shear[0])
    sy = math.radians(shear[1])
1031
1032
1033
1034
1035

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
vfdev's avatar
vfdev committed
1036
1037
1038
1039
    a = math.cos(rot - sy) / math.cos(sy)
    b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
    c = math.sin(rot - sy) / math.cos(sy)
    d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
1040

1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
    if inverted:
        # Inverted rotation matrix with scale and shear
        # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
        matrix = [d, -b, 0.0, -c, a, 0.0]
        matrix = [x / scale for x in matrix]
        # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
        matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
        matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
        # Apply center translation: C * RSS^-1 * C^-1 * T^-1
        matrix[2] += cx
        matrix[5] += cy
    else:
        matrix = [a, b, 0.0, c, d, 0.0]
        matrix = [x * scale for x in matrix]
        # Apply inverse of center translation: RSS * C^-1
        matrix[2] += matrix[0] * (-cx) + matrix[1] * (-cy)
        matrix[5] += matrix[3] * (-cx) + matrix[4] * (-cy)
        # Apply translation and center : T * C * RSS * C^-1
        matrix[2] += cx + tx
        matrix[5] += cy + ty
1061

vfdev's avatar
vfdev committed
1062
    return matrix
1063

vfdev's avatar
vfdev committed
1064

vfdev's avatar
vfdev committed
1065
def rotate(
1066
1067
1068
1069
1070
1071
    img: Tensor,
    angle: float,
    interpolation: InterpolationMode = InterpolationMode.NEAREST,
    expand: bool = False,
    center: Optional[List[int]] = None,
    fill: Optional[List[float]] = None,
vfdev's avatar
vfdev committed
1072
1073
) -> Tensor:
    """Rotate the image by angle.
1074
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1075
1076
1077
1078
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.

    Args:
        img (PIL Image or Tensor): image to be rotated.
1079
        angle (number): rotation angle value in degrees, counter-clockwise.
1080
1081
1082
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1083
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
vfdev's avatar
vfdev committed
1084
1085
1086
1087
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
1088
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
vfdev's avatar
vfdev committed
1089
            Default is the center of the image.
1090
1091
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
1092
1093
1094
1095

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
vfdev's avatar
vfdev committed
1096
1097
1098
1099
1100
1101
    Returns:
        PIL Image or Tensor: Rotated image.

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

    """
1102
1103
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(rotate)
1104

1105
1106
1107
1108
1109
1110
1111
    if isinstance(interpolation, int):
        interpolation = _interpolation_modes_from_int(interpolation)
    elif not isinstance(interpolation, InterpolationMode):
        raise TypeError(
            "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
        )

vfdev's avatar
vfdev committed
1112
1113
1114
1115
1116
1117
1118
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

    if not isinstance(img, torch.Tensor):
1119
1120
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill)
vfdev's avatar
vfdev committed
1121
1122
1123

    center_f = [0.0, 0.0]
    if center is not None:
1124
        _, height, width = get_dimensions(img)
1125
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
1126
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
1127

vfdev's avatar
vfdev committed
1128
1129
1130
    # due to current incoherence of rotation angle direction between affine and rotate implementations
    # we need to set -angle.
    matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
1131
    return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill)
vfdev's avatar
vfdev committed
1132
1133


vfdev's avatar
vfdev committed
1134
def affine(
1135
1136
1137
1138
1139
1140
1141
    img: Tensor,
    angle: float,
    translate: List[int],
    scale: float,
    shear: List[float],
    interpolation: InterpolationMode = InterpolationMode.NEAREST,
    fill: Optional[List[float]] = None,
1142
    center: Optional[List[int]] = None,
vfdev's avatar
vfdev committed
1143
1144
) -> Tensor:
    """Apply affine transformation on the image keeping image center invariant.
1145
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1146
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1147
1148

    Args:
vfdev's avatar
vfdev committed
1149
        img (PIL Image or Tensor): image to transform.
1150
1151
        angle (number): rotation angle in degrees between -180 and 180, clockwise direction.
        translate (sequence of integers): horizontal and vertical translations (post-rotation translation)
1152
        scale (float): overall scale
1153
        shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction.
1154
1155
            If a sequence is specified, the first value corresponds to a shear parallel to the x-axis, while
            the second value corresponds to a shear parallel to the y-axis.
1156
1157
1158
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1159
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
1160
1161
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
1162
1163
1164
1165

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
1166
1167
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
            Default is the center of the image.
vfdev's avatar
vfdev committed
1168
1169
1170

    Returns:
        PIL Image or Tensor: Transformed image.
1171
    """
1172
1173
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(affine)
1174

1175
1176
1177
1178
1179
1180
1181
    if isinstance(interpolation, int):
        interpolation = _interpolation_modes_from_int(interpolation)
    elif not isinstance(interpolation, InterpolationMode):
        raise TypeError(
            "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
        )

vfdev's avatar
vfdev committed
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if not isinstance(translate, (list, tuple)):
        raise TypeError("Argument translate should be a sequence")

    if len(translate) != 2:
        raise ValueError("Argument translate should be a sequence of length 2")

    if scale <= 0.0:
        raise ValueError("Argument scale should be positive")

    if not isinstance(shear, (numbers.Number, (list, tuple))):
        raise TypeError("Shear should be either a single value or a sequence of two values")

    if isinstance(angle, int):
        angle = float(angle)

    if isinstance(translate, tuple):
        translate = list(translate)

    if isinstance(shear, numbers.Number):
        shear = [shear, 0.0]

    if isinstance(shear, tuple):
        shear = list(shear)

    if len(shear) == 1:
        shear = [shear[0], shear[0]]

    if len(shear) != 2:
1213
        raise ValueError(f"Shear should be a sequence containing two values. Got {shear}")
vfdev's avatar
vfdev committed
1214

1215
1216
1217
    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

1218
    _, height, width = get_dimensions(img)
vfdev's avatar
vfdev committed
1219
    if not isinstance(img, torch.Tensor):
1220
        # center = (width * 0.5 + 0.5, height * 0.5 + 0.5)
vfdev's avatar
vfdev committed
1221
1222
        # it is visually better to estimate the center without 0.5 offset
        # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
1223
        if center is None:
1224
            center = [width * 0.5, height * 0.5]
vfdev's avatar
vfdev committed
1225
        matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
1226
1227
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill)
1228

1229
1230
    center_f = [0.0, 0.0]
    if center is not None:
1231
        _, height, width = get_dimensions(img)
1232
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
1233
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
1234

1235
    translate_f = [1.0 * t for t in translate]
1236
    matrix = _get_inverse_affine_matrix(center_f, angle, translate_f, scale, shear)
1237
    return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill)
1238
1239


1240
1241
# Looks like to_grayscale() is a stand-alone functional that is never called
# from the transform classes. Perhaps it's still here for BC? I can't be
1242
# bothered to dig.
1243
@torch.jit.unused
1244
def to_grayscale(img, num_output_channels=1):
1245
    """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
1246
    This transform does not support torch Tensor.
1247
1248

    Args:
1249
        img (PIL Image): PIL Image to be converted to grayscale.
1250
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1.
1251
1252

    Returns:
1253
1254
        PIL Image: Grayscale version of the image.

1255
1256
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1257
    """
1258
1259
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_grayscale)
1260
1261
    if isinstance(img, Image.Image):
        return F_pil.to_grayscale(img, num_output_channels)
1262

1263
1264
1265
1266
1267
    raise TypeError("Input should be PIL Image")


def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    """Convert RGB image to grayscale version of image.
1268
1269
    If the image is torch Tensor, it is expected
    to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1270
1271
1272

    Note:
        Please, note that this method supports only RGB images as input. For inputs in other color spaces,
1273
        please, consider using :meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.
1274
1275
1276
1277
1278
1279
1280
1281

    Args:
        img (PIL Image or Tensor): RGB Image to be converted to grayscale.
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.

    Returns:
        PIL Image or Tensor: Grayscale version of the image.

1282
1283
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1284
    """
1285
1286
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(rgb_to_grayscale)
1287
1288
1289
1290
    if not isinstance(img, torch.Tensor):
        return F_pil.to_grayscale(img, num_output_channels)

    return F_t.rgb_to_grayscale(img, num_output_channels)
1291
1292


1293
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
1294
    """Erase the input Tensor Image with given value.
1295
    This transform does not support PIL Image.
1296
1297
1298
1299
1300
1301
1302
1303

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
1304
        inplace(bool, optional): For in-place operations. By default, is set False.
1305
1306
1307
1308

    Returns:
        Tensor Image: Erased image.
    """
1309
1310
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(erase)
1311
    if not isinstance(img, torch.Tensor):
1312
        raise TypeError(f"img should be Tensor Image. Got {type(img)}")
1313

1314
    return F_t.erase(img, i, j, h, w, v, inplace=inplace)
1315
1316
1317


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
1318
1319
    """Performs Gaussian blurring on the image by given kernel.
    If the image is torch Tensor, it is expected
Haochen Yu's avatar
Haochen Yu committed
1320
    to have [..., H, W] shape, where ... means at most one leading dimension.
1321
1322
1323
1324
1325

    Args:
        img (PIL Image or Tensor): Image to be blurred
        kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
            like ``(kx, ky)`` or a single integer for square kernels.
1326
1327
1328
1329

            .. note::
                In torchscript mode kernel_size as single int is not supported, use a sequence of
                length 1: ``[ksize, ]``.
1330
1331
1332
1333
        sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
            sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
            same sigma in both X/Y directions. If None, then it is computed using
            ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
1334
1335
1336
1337
1338
            Default, None.

            .. note::
                In torchscript mode sigma as single float is
                not supported, use a sequence of length 1: ``[sigma, ]``.
1339
1340
1341
1342

    Returns:
        PIL Image or Tensor: Gaussian Blurred version of the image.
    """
1343
1344
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(gaussian_blur)
1345
    if not isinstance(kernel_size, (int, list, tuple)):
1346
        raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}")
1347
1348
1349
    if isinstance(kernel_size, int):
        kernel_size = [kernel_size, kernel_size]
    if len(kernel_size) != 2:
1350
        raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}")
1351
1352
    for ksize in kernel_size:
        if ksize % 2 == 0 or ksize < 0:
1353
            raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}")
1354
1355
1356
1357
1358

    if sigma is None:
        sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]

    if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
1359
        raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}")
1360
1361
1362
1363
1364
    if isinstance(sigma, (int, float)):
        sigma = [float(sigma), float(sigma)]
    if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
        sigma = [sigma[0], sigma[0]]
    if len(sigma) != 2:
1365
        raise ValueError(f"If sigma is a sequence, its length should be 2. Got {len(sigma)}")
1366
    for s in sigma:
1367
        if s <= 0.0:
1368
            raise ValueError(f"sigma should have positive values. Got {sigma}")
1369
1370
1371
1372

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
1373
            raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
1374

1375
        t_img = pil_to_tensor(img)
1376
1377
1378
1379

    output = F_t.gaussian_blur(t_img, kernel_size, sigma)

    if not isinstance(img, torch.Tensor):
1380
        output = to_pil_image(output, mode=img.mode)
1381
    return output
1382
1383
1384


def invert(img: Tensor) -> Tensor:
1385
    """Invert the colors of an RGB/grayscale image.
1386
1387
1388

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1389
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1390
1391
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1392
1393
1394
1395

    Returns:
        PIL Image or Tensor: Color inverted image.
    """
1396
1397
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(invert)
1398
1399
1400
1401
1402
1403
1404
    if not isinstance(img, torch.Tensor):
        return F_pil.invert(img)

    return F_t.invert(img)


def posterize(img: Tensor, bits: int) -> Tensor:
1405
    """Posterize an image by reducing the number of bits for each color channel.
1406
1407
1408

    Args:
        img (PIL Image or Tensor): Image to have its colors posterized.
1409
            If img is torch Tensor, it should be of type torch.uint8, and
1410
1411
1412
            it is expected to be in [..., 1 or 3, H, W] format, where ... means
            it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1413
1414
1415
1416
        bits (int): The number of bits to keep for each channel (0-8).
    Returns:
        PIL Image or Tensor: Posterized image.
    """
1417
1418
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(posterize)
1419
    if not (0 <= bits <= 8):
1420
        raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}")
1421
1422
1423
1424
1425
1426
1427
1428

    if not isinstance(img, torch.Tensor):
        return F_pil.posterize(img, bits)

    return F_t.posterize(img, bits)


def solarize(img: Tensor, threshold: float) -> Tensor:
1429
    """Solarize an RGB/grayscale image by inverting all pixel values above a threshold.
1430
1431
1432

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1433
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1434
1435
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1436
1437
1438
1439
        threshold (float): All pixels equal or above this value are inverted.
    Returns:
        PIL Image or Tensor: Solarized image.
    """
1440
1441
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(solarize)
1442
1443
1444
1445
1446
1447
1448
    if not isinstance(img, torch.Tensor):
        return F_pil.solarize(img, threshold)

    return F_t.solarize(img, threshold)


def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
1449
    """Adjust the sharpness of an image.
1450
1451
1452

    Args:
        img (PIL Image or Tensor): Image to be adjusted.
1453
1454
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
1455
        sharpness_factor (float):  How much to adjust the sharpness. Can be
1456
            any non-negative number. 0 gives a blurred image, 1 gives the
1457
1458
1459
1460
1461
            original image while 2 increases the sharpness by a factor of 2.

    Returns:
        PIL Image or Tensor: Sharpness adjusted image.
    """
1462
1463
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_sharpness)
1464
1465
1466
1467
1468
1469
1470
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_sharpness(img, sharpness_factor)

    return F_t.adjust_sharpness(img, sharpness_factor)


def autocontrast(img: Tensor) -> Tensor:
1471
    """Maximize contrast of an image by remapping its
1472
1473
1474
1475
1476
    pixels per channel so that the lowest becomes black and the lightest
    becomes white.

    Args:
        img (PIL Image or Tensor): Image on which autocontrast is applied.
1477
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1478
1479
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1480
1481
1482
1483

    Returns:
        PIL Image or Tensor: An image that was autocontrasted.
    """
1484
1485
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(autocontrast)
1486
1487
1488
1489
1490
1491
1492
    if not isinstance(img, torch.Tensor):
        return F_pil.autocontrast(img)

    return F_t.autocontrast(img)


def equalize(img: Tensor) -> Tensor:
1493
    """Equalize the histogram of an image by applying
1494
1495
1496
1497
1498
    a non-linear mapping to the input in order to create a uniform
    distribution of grayscale values in the output.

    Args:
        img (PIL Image or Tensor): Image on which equalize is applied.
1499
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1500
            where ... means it can have an arbitrary number of leading dimensions.
1501
            The tensor dtype must be ``torch.uint8`` and values are expected to be in ``[0, 255]``.
1502
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1503
1504
1505
1506

    Returns:
        PIL Image or Tensor: An image that was equalized.
    """
1507
1508
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(equalize)
1509
1510
1511
1512
    if not isinstance(img, torch.Tensor):
        return F_pil.equalize(img)

    return F_t.equalize(img)
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536


def elastic_transform(
    img: Tensor,
    displacement: Tensor,
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    fill: Optional[List[float]] = None,
) -> Tensor:
    """Transform a tensor image with elastic transformations.
    Given alpha and sigma, it will generate displacement
    vectors for all pixels based on random offsets. Alpha controls the strength
    and sigma controls the smoothness of the displacements.
    The displacements are added to an identity grid and the resulting grid is
    used to grid_sample from the image.

    Applications:
        Randomly transforms the morphology of objects in images and produces a
        see-through-water-like effect.

    Args:
        img (PIL Image or Tensor): Image on which elastic_transform is applied.
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1537
        displacement (Tensor): The displacement field. Expected shape is [1, H, W, 2].
1538
1539
1540
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``.
1541
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
        fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
    """
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(elastic_transform)
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
        )
        interpolation = _interpolation_modes_from_int(interpolation)

    if not isinstance(displacement, torch.Tensor):
1557
        raise TypeError("Argument displacement should be a Tensor")
1558
1559
1560
1561
1562
1563
1564

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
            raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
        t_img = pil_to_tensor(img)

1565
1566
1567
1568
1569
1570
1571
1572
1573
    shape = t_img.shape
    shape = (1,) + shape[-2:] + (2,)
    if shape != displacement.shape:
        raise ValueError(f"Argument displacement shape should be {shape}, but given {displacement.shape}")

    # TODO: if image shape is [N1, N2, ..., C, H, W] and
    # displacement is [1, H, W, 2] we need to reshape input image
    # such grid_sampler takes internal code for 4D input

1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
    output = F_t.elastic_transform(
        t_img,
        displacement,
        interpolation=interpolation.value,
        fill=fill,
    )

    if not isinstance(img, torch.Tensor):
        output = to_pil_image(output, mode=img.mode)
    return output