functional.py 65.8 KB
Newer Older
1
import math
2
import numbers
3
import sys
4
import warnings
5
from enum import Enum
6
from typing import Any, List, Optional, Tuple, Union
7
8
9

import numpy as np
import torch
10
from PIL import Image
11
from PIL.Image import Image as PILImage
12
13
from torch import Tensor

14
15
16
17
18
try:
    import accimage
except ImportError:
    accimage = None

19
from ..utils import _log_api_usage_once
20
from . import _functional_pil as F_pil, _functional_tensor as F_t
21

22

23
class InterpolationMode(Enum):
24
    """Interpolation modes
25
26
    Available interpolation methods are ``nearest``, ``nearest-exact``, ``bilinear``, ``bicubic``, ``box``, ``hamming``,
    and ``lanczos``.
27
    """
28

29
    NEAREST = "nearest"
30
    NEAREST_EXACT = "nearest-exact"
31
32
33
34
35
36
37
38
39
    BILINEAR = "bilinear"
    BICUBIC = "bicubic"
    # For PIL compatibility
    BOX = "box"
    HAMMING = "hamming"
    LANCZOS = "lanczos"


# TODO: Once torchscript supports Enums with staticmethod
40
41
# this can be put into InterpolationMode as staticmethod
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
42
    inverse_modes_mapping = {
43
44
45
46
47
48
        0: InterpolationMode.NEAREST,
        2: InterpolationMode.BILINEAR,
        3: InterpolationMode.BICUBIC,
        4: InterpolationMode.BOX,
        5: InterpolationMode.HAMMING,
        1: InterpolationMode.LANCZOS,
49
50
51
52
53
    }
    return inverse_modes_mapping[i]


pil_modes_mapping = {
54
55
56
    InterpolationMode.NEAREST: 0,
    InterpolationMode.BILINEAR: 2,
    InterpolationMode.BICUBIC: 3,
57
    InterpolationMode.NEAREST_EXACT: 0,
58
59
60
    InterpolationMode.BOX: 4,
    InterpolationMode.HAMMING: 5,
    InterpolationMode.LANCZOS: 1,
61
62
}

vfdev's avatar
vfdev committed
63
64
65
_is_pil_image = F_pil._is_pil_image


66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
def get_dimensions(img: Tensor) -> List[int]:
    """Returns the dimensions of an image as [channels, height, width].

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        List[int]: The image dimensions.
    """
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_dimensions)
    if isinstance(img, torch.Tensor):
        return F_t.get_dimensions(img)

    return F_pil.get_dimensions(img)


83
84
85
86
87
88
89
90
def get_image_size(img: Tensor) -> List[int]:
    """Returns the size of an image as [width, height].

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        List[int]: The image size.
vfdev's avatar
vfdev committed
91
    """
92
93
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_image_size)
vfdev's avatar
vfdev committed
94
    if isinstance(img, torch.Tensor):
95
        return F_t.get_image_size(img)
96

97
    return F_pil.get_image_size(img)
98

vfdev's avatar
vfdev committed
99

100
101
102
103
104
105
106
107
def get_image_num_channels(img: Tensor) -> int:
    """Returns the number of channels of an image.

    Args:
        img (PIL Image or Tensor): The image to be checked.

    Returns:
        int: The number of channels.
108
    """
109
110
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(get_image_num_channels)
111
    if isinstance(img, torch.Tensor):
112
        return F_t.get_image_num_channels(img)
113

114
    return F_pil.get_image_num_channels(img)
115
116


vfdev's avatar
vfdev committed
117
118
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
119
120
121
    return isinstance(img, np.ndarray)


vfdev's avatar
vfdev committed
122
123
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
124
    return img.ndim in {2, 3}
125
126


127
def to_tensor(pic: Union[PILImage, np.ndarray]) -> Tensor:
128
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
129
    This function does not support torchscript.
130

131
    See :class:`~torchvision.transforms.ToTensor` for more details.
132
133
134
135
136
137
138

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
139
140
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_tensor)
141
    if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
142
        raise TypeError(f"pic should be PIL Image or ndarray. Got {type(pic)}")
143

144
    if _is_numpy(pic) and not _is_numpy_image(pic):
145
        raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
146

147
148
    default_float_dtype = torch.get_default_dtype()

149
150
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
151
152
153
        if pic.ndim == 2:
            pic = pic[:, :, None]

154
        img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
155
        # backward compatibility
156
        if isinstance(img, torch.ByteTensor):
157
            return img.to(dtype=default_float_dtype).div(255)
158
159
        else:
            return img
160
161

    if accimage is not None and isinstance(pic, accimage.Image):
162
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
163
        pic.copyto(nppic)
164
        return torch.from_numpy(nppic).to(dtype=default_float_dtype)
165
166

    # handle PIL Image
167
    mode_to_nptype = {"I": np.int32, "I;16" if sys.byteorder == "little" else "I;16B": np.int16, "F": np.float32}
168
    img = torch.from_numpy(np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True))
169

170
    if pic.mode == "1":
171
        img = 255 * img
172
    img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic))
173
    # put it from HWC to CHW format
174
    img = img.permute((2, 0, 1)).contiguous()
175
    if isinstance(img, torch.ByteTensor):
176
        return img.to(dtype=default_float_dtype).div(255)
177
178
179
180
    else:
        return img


181
def pil_to_tensor(pic: Any) -> Tensor:
182
    """Convert a ``PIL Image`` to a tensor of the same type.
183
    This function does not support torchscript.
184

vfdev's avatar
vfdev committed
185
    See :class:`~torchvision.transforms.PILToTensor` for more details.
186

187
188
189
190
    .. note::

        A deep copy of the underlying array is performed.

191
192
193
194
195
196
    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
197
198
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(pil_to_tensor)
199
    if not F_pil._is_pil_image(pic):
200
        raise TypeError(f"pic should be PIL Image. Got {type(pic)}")
201
202

    if accimage is not None and isinstance(pic, accimage.Image):
203
204
        # accimage format is always uint8 internally, so always return uint8 here
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
205
206
207
208
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
209
    img = torch.as_tensor(np.array(pic, copy=True))
210
    img = img.view(pic.size[1], pic.size[0], F_pil.get_image_num_channels(pic))
211
212
213
214
215
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


216
217
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly
218
    This function does not support PIL Image.
219
220
221
222
223
224

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
vfdev's avatar
vfdev committed
225
        Tensor: Converted image
226
227
228
229
230
231
232
233
234
235
236
237

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
238
239
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(convert_image_dtype)
240
    if not isinstance(image, torch.Tensor):
241
        raise TypeError("Input img should be Tensor Image")
242
243

    return F_t.convert_image_dtype(image, dtype)
244
245


246
def to_pil_image(pic, mode=None):
247
    """Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
248

249
    See :class:`~torchvision.transforms.ToPILImage` for more details.
250
251
252
253
254

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

255
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
256
257
258
259

    Returns:
        PIL Image: Image converted to PIL Image.
    """
260
261
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_pil_image)
262

263
264
265
266
267
    if isinstance(pic, torch.Tensor):
        if pic.ndim == 3:
            pic = pic.permute((1, 2, 0))
        pic = pic.numpy(force=True)
    elif not isinstance(pic, np.ndarray):
268
        raise TypeError(f"pic should be Tensor or ndarray. Got {type(pic)}.")
269

270
271
272
273
274
    if pic.ndim == 2:
        # if 2D image, add channel dimension (HWC)
        pic = np.expand_dims(pic, 2)
    if pic.ndim != 3:
        raise ValueError(f"pic should be 2/3 dimensional. Got {pic.ndim} dimensions.")
Varun Agrawal's avatar
Varun Agrawal committed
275

276
277
    if pic.shape[-1] > 4:
        raise ValueError(f"pic should not have > 4 channels. Got {pic.shape[-1]} channels.")
278

279
280
    npimg = pic

281
282
    if np.issubdtype(npimg.dtype, np.floating) and mode != "F":
        npimg = (npimg * 255).astype(np.uint8)
283
284
285
286
287

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
288
            expected_mode = "L"
vfdev's avatar
vfdev committed
289
        elif npimg.dtype == np.int16:
290
            expected_mode = "I;16" if sys.byteorder == "little" else "I;16B"
vfdev's avatar
vfdev committed
291
        elif npimg.dtype == np.int32:
292
            expected_mode = "I"
293
        elif npimg.dtype == np.float32:
294
            expected_mode = "F"
295
        if mode is not None and mode != expected_mode:
296
            raise ValueError(f"Incorrect mode ({mode}) supplied for input type {np.dtype}. Should be {expected_mode}")
297
298
        mode = expected_mode

surgan12's avatar
surgan12 committed
299
    elif npimg.shape[2] == 2:
300
        permitted_2_channel_modes = ["LA"]
surgan12's avatar
surgan12 committed
301
        if mode is not None and mode not in permitted_2_channel_modes:
302
            raise ValueError(f"Only modes {permitted_2_channel_modes} are supported for 2D inputs")
surgan12's avatar
surgan12 committed
303
304

        if mode is None and npimg.dtype == np.uint8:
305
            mode = "LA"
surgan12's avatar
surgan12 committed
306

307
    elif npimg.shape[2] == 4:
308
        permitted_4_channel_modes = ["RGBA", "CMYK", "RGBX"]
309
        if mode is not None and mode not in permitted_4_channel_modes:
310
            raise ValueError(f"Only modes {permitted_4_channel_modes} are supported for 4D inputs")
311
312

        if mode is None and npimg.dtype == np.uint8:
313
            mode = "RGBA"
314
    else:
315
        permitted_3_channel_modes = ["RGB", "YCbCr", "HSV"]
316
        if mode is not None and mode not in permitted_3_channel_modes:
317
            raise ValueError(f"Only modes {permitted_3_channel_modes} are supported for 3D inputs")
318
        if mode is None and npimg.dtype == np.uint8:
319
            mode = "RGB"
320
321

    if mode is None:
322
        raise TypeError(f"Input type {npimg.dtype} is not supported")
323
324
325
326

    return Image.fromarray(npimg, mode=mode)


327
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
328
    """Normalize a float tensor image with mean and standard deviation.
329
    This transform does not support PIL Image.
330

331
    .. note::
surgan12's avatar
surgan12 committed
332
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
333

334
    See :class:`~torchvision.transforms.Normalize` for more details.
335
336

    Args:
337
        tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
338
        mean (sequence): Sequence of means for each channel.
339
        std (sequence): Sequence of standard deviations for each channel.
340
        inplace(bool,optional): Bool to make this operation inplace.
341
342
343
344

    Returns:
        Tensor: Normalized Tensor image.
    """
345
346
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(normalize)
347
    if not isinstance(tensor, torch.Tensor):
348
        raise TypeError(f"img should be Tensor Image. Got {type(tensor)}")
349

350
    return F_t.normalize(tensor, mean=mean, std=std, inplace=inplace)
351
352


vfdev's avatar
vfdev committed
353
354
355
def _compute_resized_output_size(
    image_size: Tuple[int, int], size: List[int], max_size: Optional[int] = None
) -> List[int]:
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
    if len(size) == 1:  # specified size only for the smallest edge
        h, w = image_size
        short, long = (w, h) if w <= h else (h, w)
        requested_new_short = size if isinstance(size, int) else size[0]

        new_short, new_long = requested_new_short, int(requested_new_short * long / short)

        if max_size is not None:
            if max_size <= requested_new_short:
                raise ValueError(
                    f"max_size = {max_size} must be strictly greater than the requested "
                    f"size for the smaller edge size = {size}"
                )
            if new_long > max_size:
                new_short, new_long = int(max_size * new_short / new_long), max_size

        new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
    else:  # specified both h and w
        new_w, new_h = size[1], size[0]
    return [new_h, new_w]


378
379
380
381
382
def resize(
    img: Tensor,
    size: List[int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    max_size: Optional[int] = None,
383
    antialias: Optional[bool] = True,
384
) -> Tensor:
vfdev's avatar
vfdev committed
385
    r"""Resize the input image to the given size.
386
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
387
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
388
389

    Args:
vfdev's avatar
vfdev committed
390
        img (PIL Image or Tensor): Image to be resized.
391
392
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
Vitaliy Chiley's avatar
Vitaliy Chiley committed
393
            the smaller edge of the image will be matched to this number maintaining
394
            the aspect ratio. i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
395
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
396
397
398

            .. note::
                In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
399
400
401
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
402
403
            ``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are
            supported.
404
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
405
        max_size (int, optional): The maximum allowed for the longer edge of
406
            the resized image. If the longer edge of the image is greater
Nicolas Hug's avatar
Nicolas Hug committed
407
            than ``max_size`` after being resized according to ``size``,
408
409
            ``size`` will be overruled so that the longer edge is equal to
            ``max_size``.
Nicolas Hug's avatar
Nicolas Hug committed
410
            As a result, the smaller edge may be shorter than ``size``. This
411
412
            is only supported if ``size`` is an int (or a sequence of length
            1 in torchscript mode).
413
414
415
416
417
418
419
        antialias (bool, optional): Whether to apply antialiasing.
            It only affects **tensors** with bilinear or bicubic modes and it is
            ignored otherwise: on PIL images, antialiasing is always applied on
            bilinear or bicubic modes; on other modes (for PIL images and
            tensors), antialiasing makes no sense and this parameter is ignored.
            Possible values are:

420
            - ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
421
422
423
424
425
426
427
428
              Other mode aren't affected. This is probably what you want to use.
            - ``False``: will not apply antialiasing for tensors on any mode. PIL
              images are still antialiased on bilinear or bicubic modes, because
              PIL doesn't support no antialias.
            - ``None``: equivalent to ``False`` for tensors and ``True`` for
              PIL images. This value exists for legacy reasons and you probably
              don't want to use it unless you really know what you are doing.

429
430
            The default value changed from ``None`` to ``True`` in
            v0.17, for the PIL and Tensor backends to be consistent.
431
432

    Returns:
vfdev's avatar
vfdev committed
433
        PIL Image or Tensor: Resized image.
434
    """
435
436
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(resize)
437

438
439
440
441
442
443
    if isinstance(interpolation, int):
        interpolation = _interpolation_modes_from_int(interpolation)
    elif not isinstance(interpolation, InterpolationMode):
        raise TypeError(
            "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
        )
444

445
446
447
448
449
450
451
452
453
454
455
456
457
458
    if isinstance(size, (list, tuple)):
        if len(size) not in [1, 2]:
            raise ValueError(
                f"Size must be an int or a 1 or 2 element tuple/list, not a {len(size)} element tuple/list"
            )
        if max_size is not None and len(size) != 1:
            raise ValueError(
                "max_size should only be passed if size specifies the length of the smaller edge, "
                "i.e. size should be an int or a sequence of length 1 in torchscript mode."
            )

    _, image_height, image_width = get_dimensions(img)
    if isinstance(size, int):
        size = [size]
vfdev's avatar
vfdev committed
459
    output_size = _compute_resized_output_size((image_height, image_width), size, max_size)
460

461
    if [image_height, image_width] == output_size:
462
463
        return img

vfdev's avatar
vfdev committed
464
    if not isinstance(img, torch.Tensor):
465
        if antialias is False:
466
            warnings.warn("Anti-alias option is always applied for PIL Image input. Argument antialias is ignored.")
467
        pil_interpolation = pil_modes_mapping[interpolation]
468
        return F_pil.resize(img, size=output_size, interpolation=pil_interpolation)
vfdev's avatar
vfdev committed
469

470
    return F_t.resize(img, size=output_size, interpolation=interpolation.value, antialias=antialias)
471
472


473
def pad(img: Tensor, padding: List[int], fill: Union[int, float] = 0, padding_mode: str = "constant") -> Tensor:
474
    r"""Pad the given image on all sides with the given "pad" value.
475
    If the image is torch Tensor, it is expected
476
477
478
    to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
    at most 3 leading dimensions for mode edge,
    and an arbitrary number of leading dimensions for mode constant
479
480

    Args:
481
        img (PIL Image or Tensor): Image to be padded.
482
483
484
        padding (int or sequence): Padding on each border. If a single int is provided this
            is used to pad all borders. If sequence of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a sequence of length 4 is provided
485
            this is the padding for the left, top, right and bottom borders respectively.
486
487
488
489

            .. note::
                In torchscript mode padding as single int is not supported, use a sequence of
                length 1: ``[padding, ]``.
490
        fill (number or tuple): Pixel fill value for constant fill. Default is 0.
491
492
493
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
            Only number is supported for torch Tensor.
494
            Only int or tuple value is supported for PIL Image.
495
496
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
            Default is constant.
497
498
499

            - constant: pads with a constant value, this value is specified with fill

500
501
            - edge: pads with the last value at the edge of the image.
              If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
502

503
504
505
            - reflect: pads with reflection of image without repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
              will result in [3, 2, 1, 2, 3, 4, 3, 2]
506

507
508
509
            - symmetric: pads with reflection of image repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
              will result in [2, 1, 1, 2, 3, 4, 4, 3]
510
511

    Returns:
512
        PIL Image or Tensor: Padded image.
513
    """
514
515
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(pad)
516
517
    if not isinstance(img, torch.Tensor):
        return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
518

519
    return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
520
521


vfdev's avatar
vfdev committed
522
523
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
    """Crop the given image at specified location and output size.
524
    If the image is torch Tensor, it is expected
525
526
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then cropped.
527

528
    Args:
vfdev's avatar
vfdev committed
529
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
530
531
532
533
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
534

535
    Returns:
vfdev's avatar
vfdev committed
536
        PIL Image or Tensor: Cropped image.
537
538
    """

539
540
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(crop)
vfdev's avatar
vfdev committed
541
542
    if not isinstance(img, torch.Tensor):
        return F_pil.crop(img, top, left, height, width)
543

vfdev's avatar
vfdev committed
544
    return F_t.crop(img, top, left, height, width)
545

vfdev's avatar
vfdev committed
546
547
548

def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
    """Crops the given image at the center.
549
    If the image is torch Tensor, it is expected
550
551
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
552

553
    Args:
vfdev's avatar
vfdev committed
554
        img (PIL Image or Tensor): Image to be cropped.
555
        output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
vfdev's avatar
vfdev committed
556
557
            it is used for both directions.

558
    Returns:
vfdev's avatar
vfdev committed
559
        PIL Image or Tensor: Cropped image.
560
    """
561
562
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(center_crop)
563
564
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
vfdev's avatar
vfdev committed
565
566
567
    elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
        output_size = (output_size[0], output_size[0])

568
    _, image_height, image_width = get_dimensions(img)
569
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
570

571
572
573
574
575
576
577
578
    if crop_width > image_width or crop_height > image_height:
        padding_ltrb = [
            (crop_width - image_width) // 2 if crop_width > image_width else 0,
            (crop_height - image_height) // 2 if crop_height > image_height else 0,
            (crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
            (crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
        ]
        img = pad(img, padding_ltrb, fill=0)  # PIL uses fill value 0
579
        _, image_height, image_width = get_dimensions(img)
580
581
582
        if crop_width == image_width and crop_height == image_height:
            return img

583
584
    crop_top = int(round((image_height - crop_height) / 2.0))
    crop_left = int(round((image_width - crop_width) / 2.0))
585
    return crop(img, crop_top, crop_left, crop_height, crop_width)
586
587


588
def resized_crop(
589
590
591
592
593
594
595
    img: Tensor,
    top: int,
    left: int,
    height: int,
    width: int,
    size: List[int],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
596
    antialias: Optional[bool] = True,
597
598
) -> Tensor:
    """Crop the given image and resize it to desired size.
599
    If the image is torch Tensor, it is expected
600
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
601

602
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
603
604

    Args:
605
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
606
607
608
609
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
610
        size (sequence or int): Desired output size. Same semantics as ``resize``.
611
612
613
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
614
615
            ``InterpolationMode.NEAREST_EXACT``, ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are
            supported.
616
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
617
618
619
620
621
622
623
        antialias (bool, optional): Whether to apply antialiasing.
            It only affects **tensors** with bilinear or bicubic modes and it is
            ignored otherwise: on PIL images, antialiasing is always applied on
            bilinear or bicubic modes; on other modes (for PIL images and
            tensors), antialiasing makes no sense and this parameter is ignored.
            Possible values are:

624
            - ``True`` (default): will apply antialiasing for bilinear or bicubic modes.
625
626
627
628
629
630
631
632
              Other mode aren't affected. This is probably what you want to use.
            - ``False``: will not apply antialiasing for tensors on any mode. PIL
              images are still antialiased on bilinear or bicubic modes, because
              PIL doesn't support no antialias.
            - ``None``: equivalent to ``False`` for tensors and ``True`` for
              PIL images. This value exists for legacy reasons and you probably
              don't want to use it unless you really know what you are doing.

633
634
            The default value changed from ``None`` to ``True`` in
            v0.17, for the PIL and Tensor backends to be consistent.
635
    Returns:
636
        PIL Image or Tensor: Cropped image.
637
    """
638
639
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(resized_crop)
640
    img = crop(img, top, left, height, width)
641
    img = resize(img, size, interpolation, antialias=antialias)
642
643
644
    return img


645
def hflip(img: Tensor) -> Tensor:
646
    """Horizontally flip the given image.
647
648

    Args:
vfdev's avatar
vfdev committed
649
        img (PIL Image or Tensor): Image to be flipped. If img
650
            is a Tensor, it is expected to be in [..., H, W] format,
651
            where ... means it can have an arbitrary number of leading
652
            dimensions.
653
654

    Returns:
vfdev's avatar
vfdev committed
655
        PIL Image or Tensor:  Horizontally flipped image.
656
    """
657
658
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(hflip)
659
660
    if not isinstance(img, torch.Tensor):
        return F_pil.hflip(img)
661

662
    return F_t.hflip(img)
663
664


665
def _get_perspective_coeffs(startpoints: List[List[int]], endpoints: List[List[int]]) -> List[float]:
666
667
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

Vitaliy Chiley's avatar
Vitaliy Chiley committed
668
    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
669
670
671
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
672
673
674
675
676
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.

677
678
679
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
680
681
682
683
684
    if len(startpoints) != 4 or len(endpoints) != 4:
        raise ValueError(
            f"Please provide exactly four corners, got {len(startpoints)} startpoints and {len(endpoints)} endpoints."
        )
    a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float64)
685
686
687
688

    for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
        a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
689

690
691
692
    b_matrix = torch.tensor(startpoints, dtype=torch.float64).view(8)
    # do least squares in double precision to prevent numerical issues
    res = torch.linalg.lstsq(a_matrix, b_matrix, driver="gels").solution.to(torch.float32)
693

694
    output: List[float] = res.tolist()
695
    return output
696
697


698
def perspective(
699
700
701
702
703
    img: Tensor,
    startpoints: List[List[int]],
    endpoints: List[List[int]],
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    fill: Optional[List[float]] = None,
704
705
) -> Tensor:
    """Perform perspective transform of the given image.
706
    If the image is torch Tensor, it is expected
707
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
708
709

    Args:
710
711
712
713
714
        img (PIL Image or Tensor): Image to be transformed.
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
715
716
717
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
718
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
719
720
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
721
722
723
724

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
725

726
    Returns:
727
        PIL Image or Tensor: transformed Image.
728
    """
729
730
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(perspective)
731

732
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
733

734
735
736
737
738
739
    if isinstance(interpolation, int):
        interpolation = _interpolation_modes_from_int(interpolation)
    elif not isinstance(interpolation, InterpolationMode):
        raise TypeError(
            "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
        )
740

741
    if not isinstance(img, torch.Tensor):
742
743
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill)
744

745
    return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill)
746
747


748
def vflip(img: Tensor) -> Tensor:
749
    """Vertically flip the given image.
750
751

    Args:
vfdev's avatar
vfdev committed
752
        img (PIL Image or Tensor): Image to be flipped. If img
753
            is a Tensor, it is expected to be in [..., H, W] format,
754
            where ... means it can have an arbitrary number of leading
755
            dimensions.
756
757

    Returns:
758
        PIL Image or Tensor:  Vertically flipped image.
759
    """
760
761
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(vflip)
762
763
    if not isinstance(img, torch.Tensor):
        return F_pil.vflip(img)
764

765
    return F_t.vflip(img)
766
767


vfdev's avatar
vfdev committed
768
769
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
    """Crop the given image into four corners and the central crop.
770
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
771
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
772
773
774
775
776
777

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
778
779
780
        img (PIL Image or Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
781
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
782

783
    Returns:
784
       tuple: tuple (tl, tr, bl, br, center)
785
       Corresponding top left, top right, bottom left, bottom right and center crop.
786
    """
787
788
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(five_crop)
789
790
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
791
792
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])
793

vfdev's avatar
vfdev committed
794
795
796
    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")

797
    _, image_height, image_width = get_dimensions(img)
798
799
800
801
802
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

vfdev's avatar
vfdev committed
803
804
805
806
807
808
809
810
    tl = crop(img, 0, 0, crop_height, crop_width)
    tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
    bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
    br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)

    center = center_crop(img, [crop_height, crop_width])

    return tl, tr, bl, br, center
811
812


Philip Meier's avatar
Philip Meier committed
813
814
815
def ten_crop(
    img: Tensor, size: List[int], vertical_flip: bool = False
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
vfdev's avatar
vfdev committed
816
817
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
818
    flipped version of these (horizontal flipping is used by default).
819
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
820
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
821
822
823
824
825

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

826
    Args:
vfdev's avatar
vfdev committed
827
        img (PIL Image or Tensor): Image to be cropped.
828
        size (sequence or int): Desired output size of the crop. If size is an
829
            int instead of sequence like (h, w), a square crop (size, size) is
830
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
831
        vertical_flip (bool): Use vertical flipping instead of horizontal
832
833

    Returns:
834
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
835
836
        Corresponding top left, top right, bottom left, bottom right and
        center crop and same for the flipped image.
837
    """
838
839
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(ten_crop)
840
841
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
842
843
844
845
846
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])

    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")
847
848
849
850
851
852
853
854
855
856
857
858

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


859
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
860
    """Adjust brightness of an image.
861
862

    Args:
vfdev's avatar
vfdev committed
863
        img (PIL Image or Tensor): Image to be adjusted.
864
865
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
866
        brightness_factor (float):  How much to adjust the brightness. Can be
867
            any non-negative number. 0 gives a black image, 1 gives the
868
869
870
            original image while 2 increases the brightness by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
871
        PIL Image or Tensor: Brightness adjusted image.
872
    """
873
874
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_brightness)
875
876
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_brightness(img, brightness_factor)
877

878
    return F_t.adjust_brightness(img, brightness_factor)
879
880


881
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
882
    """Adjust contrast of an image.
883
884

    Args:
vfdev's avatar
vfdev committed
885
        img (PIL Image or Tensor): Image to be adjusted.
886
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
887
            where ... means it can have an arbitrary number of leading dimensions.
888
        contrast_factor (float): How much to adjust the contrast. Can be any
889
            non-negative number. 0 gives a solid gray image, 1 gives the
890
891
892
            original image while 2 increases the contrast by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
893
        PIL Image or Tensor: Contrast adjusted image.
894
    """
895
896
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_contrast)
897
898
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_contrast(img, contrast_factor)
899

900
    return F_t.adjust_contrast(img, contrast_factor)
901
902


903
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
904
905
906
    """Adjust color saturation of an image.

    Args:
vfdev's avatar
vfdev committed
907
        img (PIL Image or Tensor): Image to be adjusted.
908
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
909
            where ... means it can have an arbitrary number of leading dimensions.
910
911
912
913
914
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
915
        PIL Image or Tensor: Saturation adjusted image.
916
    """
917
918
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_saturation)
919
920
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_saturation(img, saturation_factor)
921

922
    return F_t.adjust_saturation(img, saturation_factor)
923
924


925
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
926
927
928
929
930
931
932
933
934
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

935
936
937
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
938
939

    Args:
940
        img (PIL Image or Tensor): Image to be adjusted.
941
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
942
            where ... means it can have an arbitrary number of leading dimensions.
943
            If img is PIL Image mode "1", "I", "F" and modes with transparency (alpha channel) are not supported.
944
945
946
            Note: the pixel values of the input image has to be non-negative for conversion to HSV space;
            thus it does not work if you normalize your image to an interval with negative values,
            or use an interpolation that generates negative values before using this function.
947
948
949
950
951
952
953
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
954
        PIL Image or Tensor: Hue adjusted image.
955
    """
956
957
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_hue)
958
959
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_hue(img, hue_factor)
960

961
    return F_t.adjust_hue(img, hue_factor)
962
963


964
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
965
    r"""Perform gamma correction on an image.
966
967
968
969

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

970
971
972
973
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
974

975
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
976
977

    Args:
978
        img (PIL Image or Tensor): PIL Image to be adjusted.
979
980
981
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, modes with transparency (alpha channel) are not supported.
982
983
984
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
985
        gain (float): The constant multiplier.
986
987
    Returns:
        PIL Image or Tensor: Gamma correction adjusted image.
988
    """
989
990
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_gamma)
991
992
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_gamma(img, gamma, gain)
993

994
    return F_t.adjust_gamma(img, gamma, gain)
995
996


vfdev's avatar
vfdev committed
997
def _get_inverse_affine_matrix(
998
    center: List[float], angle: float, translate: List[float], scale: float, shear: List[float], inverted: bool = True
vfdev's avatar
vfdev committed
999
) -> List[float]:
1000
1001
    # Helper method to compute inverse matrix for affine transformation

1002
1003
1004
    # Pillow requires inverse affine transformation matrix:
    # Affine matrix is : M = T * C * RotateScaleShear * C^-1
    #
1005
1006
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
1007
1008
1009
    #       RotateScaleShear is rotation with scale and shear matrix
    #
    #       RotateScaleShear(a, s, (sx, sy)) =
1010
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
1011
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(sx)/cos(sy) - sin(a)), 0 ]
1012
    #         [ s*sin(a - sy)/cos(sy), s*(-sin(a - sy)*tan(sx)/cos(sy) + cos(a)), 0 ]
1013
1014
1015
1016
1017
    #         [ 0                    , 0                                      , 1 ]
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
1018
    # Thus, the inverse is M^-1 = C * RotateScaleShear^-1 * C^-1 * T^-1
1019

1020
    rot = math.radians(angle)
1021
1022
    sx = math.radians(shear[0])
    sy = math.radians(shear[1])
1023
1024
1025
1026
1027

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
vfdev's avatar
vfdev committed
1028
1029
1030
1031
    a = math.cos(rot - sy) / math.cos(sy)
    b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
    c = math.sin(rot - sy) / math.cos(sy)
    d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
1032

1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
    if inverted:
        # Inverted rotation matrix with scale and shear
        # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
        matrix = [d, -b, 0.0, -c, a, 0.0]
        matrix = [x / scale for x in matrix]
        # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
        matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
        matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
        # Apply center translation: C * RSS^-1 * C^-1 * T^-1
        matrix[2] += cx
        matrix[5] += cy
    else:
        matrix = [a, b, 0.0, c, d, 0.0]
        matrix = [x * scale for x in matrix]
        # Apply inverse of center translation: RSS * C^-1
        matrix[2] += matrix[0] * (-cx) + matrix[1] * (-cy)
        matrix[5] += matrix[3] * (-cx) + matrix[4] * (-cy)
        # Apply translation and center : T * C * RSS * C^-1
        matrix[2] += cx + tx
        matrix[5] += cy + ty
1053

vfdev's avatar
vfdev committed
1054
    return matrix
1055

vfdev's avatar
vfdev committed
1056

vfdev's avatar
vfdev committed
1057
def rotate(
1058
1059
1060
1061
1062
1063
    img: Tensor,
    angle: float,
    interpolation: InterpolationMode = InterpolationMode.NEAREST,
    expand: bool = False,
    center: Optional[List[int]] = None,
    fill: Optional[List[float]] = None,
vfdev's avatar
vfdev committed
1064
1065
) -> Tensor:
    """Rotate the image by angle.
1066
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1067
1068
1069
1070
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.

    Args:
        img (PIL Image or Tensor): image to be rotated.
1071
        angle (number): rotation angle value in degrees, counter-clockwise.
1072
1073
1074
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1075
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
vfdev's avatar
vfdev committed
1076
1077
1078
1079
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
1080
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
vfdev's avatar
vfdev committed
1081
            Default is the center of the image.
1082
1083
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
1084
1085
1086
1087

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
vfdev's avatar
vfdev committed
1088
1089
1090
1091
1092
1093
    Returns:
        PIL Image or Tensor: Rotated image.

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

    """
1094
1095
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(rotate)
1096

1097
1098
1099
1100
1101
1102
1103
    if isinstance(interpolation, int):
        interpolation = _interpolation_modes_from_int(interpolation)
    elif not isinstance(interpolation, InterpolationMode):
        raise TypeError(
            "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
        )

vfdev's avatar
vfdev committed
1104
1105
1106
1107
1108
1109
1110
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

    if not isinstance(img, torch.Tensor):
1111
1112
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill)
vfdev's avatar
vfdev committed
1113
1114
1115

    center_f = [0.0, 0.0]
    if center is not None:
1116
        _, height, width = get_dimensions(img)
1117
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
1118
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
1119

vfdev's avatar
vfdev committed
1120
1121
1122
    # due to current incoherence of rotation angle direction between affine and rotate implementations
    # we need to set -angle.
    matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
1123
    return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill)
vfdev's avatar
vfdev committed
1124
1125


vfdev's avatar
vfdev committed
1126
def affine(
1127
1128
1129
1130
1131
1132
1133
    img: Tensor,
    angle: float,
    translate: List[int],
    scale: float,
    shear: List[float],
    interpolation: InterpolationMode = InterpolationMode.NEAREST,
    fill: Optional[List[float]] = None,
1134
    center: Optional[List[int]] = None,
vfdev's avatar
vfdev committed
1135
1136
) -> Tensor:
    """Apply affine transformation on the image keeping image center invariant.
1137
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1138
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1139
1140

    Args:
vfdev's avatar
vfdev committed
1141
        img (PIL Image or Tensor): image to transform.
1142
1143
        angle (number): rotation angle in degrees between -180 and 180, clockwise direction.
        translate (sequence of integers): horizontal and vertical translations (post-rotation translation)
1144
        scale (float): overall scale
1145
        shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction.
1146
1147
            If a sequence is specified, the first value corresponds to a shear parallel to the x-axis, while
            the second value corresponds to a shear parallel to the y-axis.
1148
1149
1150
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1151
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
1152
1153
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
1154
1155
1156
1157

            .. note::
                In torchscript mode single int/float value is not supported, please use a sequence
                of length 1: ``[value, ]``.
1158
1159
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
            Default is the center of the image.
vfdev's avatar
vfdev committed
1160
1161
1162

    Returns:
        PIL Image or Tensor: Transformed image.
1163
    """
1164
1165
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(affine)
1166

1167
1168
1169
1170
1171
1172
1173
    if isinstance(interpolation, int):
        interpolation = _interpolation_modes_from_int(interpolation)
    elif not isinstance(interpolation, InterpolationMode):
        raise TypeError(
            "Argument interpolation should be a InterpolationMode or a corresponding Pillow integer constant"
        )

vfdev's avatar
vfdev committed
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if not isinstance(translate, (list, tuple)):
        raise TypeError("Argument translate should be a sequence")

    if len(translate) != 2:
        raise ValueError("Argument translate should be a sequence of length 2")

    if scale <= 0.0:
        raise ValueError("Argument scale should be positive")

    if not isinstance(shear, (numbers.Number, (list, tuple))):
        raise TypeError("Shear should be either a single value or a sequence of two values")

    if isinstance(angle, int):
        angle = float(angle)

    if isinstance(translate, tuple):
        translate = list(translate)

    if isinstance(shear, numbers.Number):
        shear = [shear, 0.0]

    if isinstance(shear, tuple):
        shear = list(shear)

    if len(shear) == 1:
        shear = [shear[0], shear[0]]

    if len(shear) != 2:
1205
        raise ValueError(f"Shear should be a sequence containing two values. Got {shear}")
vfdev's avatar
vfdev committed
1206

1207
1208
1209
    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

1210
    _, height, width = get_dimensions(img)
vfdev's avatar
vfdev committed
1211
    if not isinstance(img, torch.Tensor):
1212
        # center = (width * 0.5 + 0.5, height * 0.5 + 0.5)
vfdev's avatar
vfdev committed
1213
1214
        # it is visually better to estimate the center without 0.5 offset
        # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
1215
        if center is None:
1216
            center = [width * 0.5, height * 0.5]
vfdev's avatar
vfdev committed
1217
        matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
1218
1219
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill)
1220

1221
1222
    center_f = [0.0, 0.0]
    if center is not None:
1223
        _, height, width = get_dimensions(img)
1224
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
1225
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, [width, height])]
1226

1227
    translate_f = [1.0 * t for t in translate]
1228
    matrix = _get_inverse_affine_matrix(center_f, angle, translate_f, scale, shear)
1229
    return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill)
1230
1231


1232
1233
# Looks like to_grayscale() is a stand-alone functional that is never called
# from the transform classes. Perhaps it's still here for BC? I can't be
1234
# bothered to dig.
1235
@torch.jit.unused
1236
def to_grayscale(img, num_output_channels=1):
1237
    """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
1238
    This transform does not support torch Tensor.
1239
1240

    Args:
1241
        img (PIL Image): PIL Image to be converted to grayscale.
1242
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1.
1243
1244

    Returns:
1245
1246
        PIL Image: Grayscale version of the image.

1247
1248
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1249
    """
1250
1251
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(to_grayscale)
1252
1253
    if isinstance(img, Image.Image):
        return F_pil.to_grayscale(img, num_output_channels)
1254

1255
1256
1257
1258
1259
    raise TypeError("Input should be PIL Image")


def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    """Convert RGB image to grayscale version of image.
1260
1261
    If the image is torch Tensor, it is expected
    to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1262
1263
1264

    Note:
        Please, note that this method supports only RGB images as input. For inputs in other color spaces,
1265
        please, consider using :meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.
1266
1267
1268
1269
1270
1271
1272
1273

    Args:
        img (PIL Image or Tensor): RGB Image to be converted to grayscale.
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.

    Returns:
        PIL Image or Tensor: Grayscale version of the image.

1274
1275
        - if num_output_channels = 1 : returned image is single channel
        - if num_output_channels = 3 : returned image is 3 channel with r = g = b
1276
    """
1277
1278
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(rgb_to_grayscale)
1279
1280
1281
1282
    if not isinstance(img, torch.Tensor):
        return F_pil.to_grayscale(img, num_output_channels)

    return F_t.rgb_to_grayscale(img, num_output_channels)
1283
1284


1285
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
1286
    """Erase the input Tensor Image with given value.
1287
    This transform does not support PIL Image.
1288
1289
1290
1291
1292
1293
1294
1295

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
1296
        inplace(bool, optional): For in-place operations. By default, is set False.
1297
1298
1299
1300

    Returns:
        Tensor Image: Erased image.
    """
1301
1302
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(erase)
1303
    if not isinstance(img, torch.Tensor):
1304
        raise TypeError(f"img should be Tensor Image. Got {type(img)}")
1305

1306
    return F_t.erase(img, i, j, h, w, v, inplace=inplace)
1307
1308
1309


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
1310
1311
1312
    """Performs Gaussian blurring on the image by given kernel

    The convolution will be using reflection padding corresponding to the kernel size, to maintain the input shape.
1313
    If the image is torch Tensor, it is expected
Haochen Yu's avatar
Haochen Yu committed
1314
    to have [..., H, W] shape, where ... means at most one leading dimension.
1315
1316
1317
1318
1319

    Args:
        img (PIL Image or Tensor): Image to be blurred
        kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
            like ``(kx, ky)`` or a single integer for square kernels.
1320
1321
1322
1323

            .. note::
                In torchscript mode kernel_size as single int is not supported, use a sequence of
                length 1: ``[ksize, ]``.
1324
1325
1326
1327
        sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
            sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
            same sigma in both X/Y directions. If None, then it is computed using
            ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
1328
1329
1330
1331
1332
            Default, None.

            .. note::
                In torchscript mode sigma as single float is
                not supported, use a sequence of length 1: ``[sigma, ]``.
1333
1334
1335
1336

    Returns:
        PIL Image or Tensor: Gaussian Blurred version of the image.
    """
1337
1338
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(gaussian_blur)
1339
    if not isinstance(kernel_size, (int, list, tuple)):
1340
        raise TypeError(f"kernel_size should be int or a sequence of integers. Got {type(kernel_size)}")
1341
1342
1343
    if isinstance(kernel_size, int):
        kernel_size = [kernel_size, kernel_size]
    if len(kernel_size) != 2:
1344
        raise ValueError(f"If kernel_size is a sequence its length should be 2. Got {len(kernel_size)}")
1345
1346
    for ksize in kernel_size:
        if ksize % 2 == 0 or ksize < 0:
1347
            raise ValueError(f"kernel_size should have odd and positive integers. Got {kernel_size}")
1348
1349
1350
1351
1352

    if sigma is None:
        sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]

    if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
1353
        raise TypeError(f"sigma should be either float or sequence of floats. Got {type(sigma)}")
1354
1355
1356
1357
1358
    if isinstance(sigma, (int, float)):
        sigma = [float(sigma), float(sigma)]
    if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
        sigma = [sigma[0], sigma[0]]
    if len(sigma) != 2:
1359
        raise ValueError(f"If sigma is a sequence, its length should be 2. Got {len(sigma)}")
1360
    for s in sigma:
1361
        if s <= 0.0:
1362
            raise ValueError(f"sigma should have positive values. Got {sigma}")
1363
1364
1365
1366

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
1367
            raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
1368

1369
        t_img = pil_to_tensor(img)
1370
1371
1372
1373

    output = F_t.gaussian_blur(t_img, kernel_size, sigma)

    if not isinstance(img, torch.Tensor):
1374
        output = to_pil_image(output, mode=img.mode)
1375
    return output
1376
1377
1378


def invert(img: Tensor) -> Tensor:
1379
    """Invert the colors of an RGB/grayscale image.
1380
1381
1382

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1383
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1384
1385
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1386
1387
1388
1389

    Returns:
        PIL Image or Tensor: Color inverted image.
    """
1390
1391
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(invert)
1392
1393
1394
1395
1396
1397
1398
    if not isinstance(img, torch.Tensor):
        return F_pil.invert(img)

    return F_t.invert(img)


def posterize(img: Tensor, bits: int) -> Tensor:
1399
    """Posterize an image by reducing the number of bits for each color channel.
1400
1401
1402

    Args:
        img (PIL Image or Tensor): Image to have its colors posterized.
1403
            If img is torch Tensor, it should be of type torch.uint8, and
1404
1405
1406
            it is expected to be in [..., 1 or 3, H, W] format, where ... means
            it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1407
1408
1409
1410
        bits (int): The number of bits to keep for each channel (0-8).
    Returns:
        PIL Image or Tensor: Posterized image.
    """
1411
1412
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(posterize)
1413
    if not (0 <= bits <= 8):
1414
        raise ValueError(f"The number if bits should be between 0 and 8. Got {bits}")
1415
1416
1417
1418
1419
1420
1421
1422

    if not isinstance(img, torch.Tensor):
        return F_pil.posterize(img, bits)

    return F_t.posterize(img, bits)


def solarize(img: Tensor, threshold: float) -> Tensor:
1423
    """Solarize an RGB/grayscale image by inverting all pixel values above a threshold.
1424
1425
1426

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1427
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1428
1429
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1430
1431
1432
1433
        threshold (float): All pixels equal or above this value are inverted.
    Returns:
        PIL Image or Tensor: Solarized image.
    """
1434
1435
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(solarize)
1436
1437
1438
1439
1440
1441
1442
    if not isinstance(img, torch.Tensor):
        return F_pil.solarize(img, threshold)

    return F_t.solarize(img, threshold)


def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
1443
    """Adjust the sharpness of an image.
1444
1445
1446

    Args:
        img (PIL Image or Tensor): Image to be adjusted.
1447
1448
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
1449
        sharpness_factor (float):  How much to adjust the sharpness. Can be
1450
            any non-negative number. 0 gives a blurred image, 1 gives the
1451
1452
1453
1454
1455
            original image while 2 increases the sharpness by a factor of 2.

    Returns:
        PIL Image or Tensor: Sharpness adjusted image.
    """
1456
1457
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(adjust_sharpness)
1458
1459
1460
1461
1462
1463
1464
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_sharpness(img, sharpness_factor)

    return F_t.adjust_sharpness(img, sharpness_factor)


def autocontrast(img: Tensor) -> Tensor:
1465
    """Maximize contrast of an image by remapping its
1466
1467
1468
1469
1470
    pixels per channel so that the lowest becomes black and the lightest
    becomes white.

    Args:
        img (PIL Image or Tensor): Image on which autocontrast is applied.
1471
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1472
1473
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1474
1475
1476
1477

    Returns:
        PIL Image or Tensor: An image that was autocontrasted.
    """
1478
1479
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(autocontrast)
1480
1481
1482
1483
1484
1485
1486
    if not isinstance(img, torch.Tensor):
        return F_pil.autocontrast(img)

    return F_t.autocontrast(img)


def equalize(img: Tensor) -> Tensor:
1487
    """Equalize the histogram of an image by applying
1488
1489
1490
1491
1492
    a non-linear mapping to the input in order to create a uniform
    distribution of grayscale values in the output.

    Args:
        img (PIL Image or Tensor): Image on which equalize is applied.
1493
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1494
            where ... means it can have an arbitrary number of leading dimensions.
1495
            The tensor dtype must be ``torch.uint8`` and values are expected to be in ``[0, 255]``.
1496
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1497
1498
1499
1500

    Returns:
        PIL Image or Tensor: An image that was equalized.
    """
1501
1502
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(equalize)
1503
1504
1505
1506
    if not isinstance(img, torch.Tensor):
        return F_pil.equalize(img)

    return F_t.equalize(img)
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530


def elastic_transform(
    img: Tensor,
    displacement: Tensor,
    interpolation: InterpolationMode = InterpolationMode.BILINEAR,
    fill: Optional[List[float]] = None,
) -> Tensor:
    """Transform a tensor image with elastic transformations.
    Given alpha and sigma, it will generate displacement
    vectors for all pixels based on random offsets. Alpha controls the strength
    and sigma controls the smoothness of the displacements.
    The displacements are added to an identity grid and the resulting grid is
    used to grid_sample from the image.

    Applications:
        Randomly transforms the morphology of objects in images and produces a
        see-through-water-like effect.

    Args:
        img (PIL Image or Tensor): Image on which elastic_transform is applied.
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1531
        displacement (Tensor): The displacement field. Expected shape is [1, H, W, 2].
1532
1533
1534
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``.
1535
            The corresponding Pillow integer constants, e.g. ``PIL.Image.BILINEAR`` are accepted as well.
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
        fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
    """
    if not torch.jit.is_scripting() and not torch.jit.is_tracing():
        _log_api_usage_once(elastic_transform)
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
        )
        interpolation = _interpolation_modes_from_int(interpolation)

    if not isinstance(displacement, torch.Tensor):
1551
        raise TypeError("Argument displacement should be a Tensor")
1552
1553
1554
1555
1556
1557
1558

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
            raise TypeError(f"img should be PIL Image or Tensor. Got {type(img)}")
        t_img = pil_to_tensor(img)

1559
1560
1561
1562
1563
1564
1565
1566
1567
    shape = t_img.shape
    shape = (1,) + shape[-2:] + (2,)
    if shape != displacement.shape:
        raise ValueError(f"Argument displacement shape should be {shape}, but given {displacement.shape}")

    # TODO: if image shape is [N1, N2, ..., C, H, W] and
    # displacement is [1, H, W, 2] we need to reshape input image
    # such grid_sampler takes internal code for 4D input

1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
    output = F_t.elastic_transform(
        t_img,
        displacement,
        interpolation=interpolation.value,
        fill=fill,
    )

    if not isinstance(img, torch.Tensor):
        output = to_pil_image(output, mode=img.mode)
    return output