functional.py 55.4 KB
Newer Older
1
import math
2
3
import numbers
import warnings
4
from enum import Enum
5
6

import numpy as np
vfdev's avatar
vfdev committed
7
from PIL import Image
8
9
10

import torch
from torch import Tensor
11
from typing import List, Tuple, Any, Optional
12

13
14
15
16
17
try:
    import accimage
except ImportError:
    accimage = None

18
19
20
from . import functional_pil as F_pil
from . import functional_tensor as F_t

21

22
class InterpolationMode(Enum):
23
24
25
26
27
28
29
30
31
32
33
34
    """Interpolation modes
    """
    NEAREST = "nearest"
    BILINEAR = "bilinear"
    BICUBIC = "bicubic"
    # For PIL compatibility
    BOX = "box"
    HAMMING = "hamming"
    LANCZOS = "lanczos"


# TODO: Once torchscript supports Enums with staticmethod
35
36
# this can be put into InterpolationMode as staticmethod
def _interpolation_modes_from_int(i: int) -> InterpolationMode:
37
    inverse_modes_mapping = {
38
39
40
41
42
43
        0: InterpolationMode.NEAREST,
        2: InterpolationMode.BILINEAR,
        3: InterpolationMode.BICUBIC,
        4: InterpolationMode.BOX,
        5: InterpolationMode.HAMMING,
        1: InterpolationMode.LANCZOS,
44
45
46
47
48
    }
    return inverse_modes_mapping[i]


pil_modes_mapping = {
49
50
51
52
53
54
    InterpolationMode.NEAREST: 0,
    InterpolationMode.BILINEAR: 2,
    InterpolationMode.BICUBIC: 3,
    InterpolationMode.BOX: 4,
    InterpolationMode.HAMMING: 5,
    InterpolationMode.LANCZOS: 1,
55
56
}

vfdev's avatar
vfdev committed
57
_is_pil_image = F_pil._is_pil_image
vfdev's avatar
vfdev committed
58
_parse_fill = F_pil._parse_fill
vfdev's avatar
vfdev committed
59
60
61


def _get_image_size(img: Tensor) -> List[int]:
62
    """Returns image size as [w, h]
vfdev's avatar
vfdev committed
63
64
65
    """
    if isinstance(img, torch.Tensor):
        return F_t._get_image_size(img)
66

vfdev's avatar
vfdev committed
67
    return F_pil._get_image_size(img)
68

vfdev's avatar
vfdev committed
69

70
def _get_image_num_channels(img: Tensor) -> int:
71
72
    """Returns number of image channels
    """
73
74
75
76
77
78
    if isinstance(img, torch.Tensor):
        return F_t._get_image_num_channels(img)

    return F_pil._get_image_num_channels(img)


vfdev's avatar
vfdev committed
79
80
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
81
82
83
    return isinstance(img, np.ndarray)


vfdev's avatar
vfdev committed
84
85
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
86
    return img.ndim in {2, 3}
87
88
89
90


def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
91
    This function does not support torchscript.
92

93
    See :class:`~torchvision.transforms.ToTensor` for more details.
94
95
96
97
98
99
100

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
vfdev's avatar
vfdev committed
101
    if not(F_pil._is_pil_image(pic) or _is_numpy(pic)):
102
103
        raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))

104
105
106
    if _is_numpy(pic) and not _is_numpy_image(pic):
        raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

107
108
    default_float_dtype = torch.get_default_dtype()

109
110
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
111
112
113
        if pic.ndim == 2:
            pic = pic[:, :, None]

114
        img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
115
        # backward compatibility
116
        if isinstance(img, torch.ByteTensor):
117
            return img.to(dtype=default_float_dtype).div(255)
118
119
        else:
            return img
120
121

    if accimage is not None and isinstance(pic, accimage.Image):
122
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
123
        pic.copyto(nppic)
124
        return torch.from_numpy(nppic).to(dtype=default_float_dtype)
125
126
127
128
129
130

    # handle PIL Image
    if pic.mode == 'I':
        img = torch.from_numpy(np.array(pic, np.int32, copy=False))
    elif pic.mode == 'I;16':
        img = torch.from_numpy(np.array(pic, np.int16, copy=False))
131
132
    elif pic.mode == 'F':
        img = torch.from_numpy(np.array(pic, np.float32, copy=False))
133
134
    elif pic.mode == '1':
        img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
135
136
    else:
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
137
138

    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
139
    # put it from HWC to CHW format
140
    img = img.permute((2, 0, 1)).contiguous()
141
    if isinstance(img, torch.ByteTensor):
142
        return img.to(dtype=default_float_dtype).div(255)
143
144
145
146
    else:
        return img


147
148
def pil_to_tensor(pic):
    """Convert a ``PIL Image`` to a tensor of the same type.
149
    This function does not support torchscript.
150

vfdev's avatar
vfdev committed
151
    See :class:`~torchvision.transforms.PILToTensor` for more details.
152
153
154
155
156
157
158

    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
159
    if not F_pil._is_pil_image(pic):
160
161
162
        raise TypeError('pic should be PIL Image. Got {}'.format(type(pic)))

    if accimage is not None and isinstance(pic, accimage.Image):
163
164
        # accimage format is always uint8 internally, so always return uint8 here
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8)
165
166
167
168
169
170
171
172
173
174
175
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
    img = torch.as_tensor(np.asarray(pic))
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


176
177
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly
178
    This function does not support PIL Image.
179
180
181
182
183
184

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
vfdev's avatar
vfdev committed
185
        Tensor: Converted image
186
187
188
189
190
191
192
193
194
195
196
197

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
198
199
200
201
    if not isinstance(image, torch.Tensor):
        raise TypeError('Input img should be Tensor Image')

    return F_t.convert_image_dtype(image, dtype)
202
203


204
def to_pil_image(pic, mode=None):
205
    """Convert a tensor or an ndarray to PIL Image. This function does not support torchscript.
206

207
    See :class:`~torchvision.transforms.ToPILImage` for more details.
208
209
210
211
212

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

213
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
214
215
216
217

    Returns:
        PIL Image: Image converted to PIL Image.
    """
Varun Agrawal's avatar
Varun Agrawal committed
218
    if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
219
220
        raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))

Varun Agrawal's avatar
Varun Agrawal committed
221
222
223
224
225
226
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
227
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
228

229
230
231
232
        # check number of channels
        if pic.shape[-3] > 4:
            raise ValueError('pic should not have > 4 channels. Got {} channels.'.format(pic.shape[-3]))

Varun Agrawal's avatar
Varun Agrawal committed
233
234
235
236
237
238
239
240
    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

241
242
243
244
        # check number of channels
        if pic.shape[-1] > 4:
            raise ValueError('pic should not have > 4 channels. Got {} channels.'.format(pic.shape[-1]))

245
    npimg = pic
Varun Agrawal's avatar
Varun Agrawal committed
246
    if isinstance(pic, torch.Tensor):
247
248
249
        if pic.is_floating_point() and mode != 'F':
            pic = pic.mul(255).byte()
        npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
250
251
252
253
254
255
256
257
258
259

    if not isinstance(npimg, np.ndarray):
        raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
                        'not {}'.format(type(npimg)))

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
            expected_mode = 'L'
vfdev's avatar
vfdev committed
260
        elif npimg.dtype == np.int16:
261
            expected_mode = 'I;16'
vfdev's avatar
vfdev committed
262
        elif npimg.dtype == np.int32:
263
264
265
266
267
268
269
270
            expected_mode = 'I'
        elif npimg.dtype == np.float32:
            expected_mode = 'F'
        if mode is not None and mode != expected_mode:
            raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
                             .format(mode, np.dtype, expected_mode))
        mode = expected_mode

surgan12's avatar
surgan12 committed
271
272
273
274
275
276
277
278
    elif npimg.shape[2] == 2:
        permitted_2_channel_modes = ['LA']
        if mode is not None and mode not in permitted_2_channel_modes:
            raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'LA'

279
    elif npimg.shape[2] == 4:
surgan12's avatar
surgan12 committed
280
        permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
        if mode is not None and mode not in permitted_4_channel_modes:
            raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGBA'
    else:
        permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
        if mode is not None and mode not in permitted_3_channel_modes:
            raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGB'

    if mode is None:
        raise TypeError('Input type {} is not supported'.format(npimg.dtype))

    return Image.fromarray(npimg, mode=mode)


299
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
300
    """Normalize a float tensor image with mean and standard deviation.
301
    This transform does not support PIL Image.
302

303
    .. note::
surgan12's avatar
surgan12 committed
304
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
305

306
    See :class:`~torchvision.transforms.Normalize` for more details.
307
308

    Args:
309
        tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
310
        mean (sequence): Sequence of means for each channel.
311
        std (sequence): Sequence of standard deviations for each channel.
312
        inplace(bool,optional): Bool to make this operation inplace.
313
314
315
316

    Returns:
        Tensor: Normalized Tensor image.
    """
317
318
    if not isinstance(tensor, torch.Tensor):
        raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))
319

320
321
322
    if not tensor.is_floating_point():
        raise TypeError('Input tensor should be a float tensor. Got {}.'.format(tensor.dtype))

323
324
    if tensor.ndim < 3:
        raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '
325
                         '{}.'.format(tensor.size()))
326

surgan12's avatar
surgan12 committed
327
328
329
    if not inplace:
        tensor = tensor.clone()

330
331
332
    dtype = tensor.dtype
    mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
333
334
    if (std == 0).any():
        raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
335
    if mean.ndim == 1:
336
        mean = mean.view(-1, 1, 1)
337
    if std.ndim == 1:
338
        std = std.view(-1, 1, 1)
339
    tensor.sub_(mean).div_(std)
340
    return tensor
341
342


343
344
def resize(img: Tensor, size: List[int], interpolation: InterpolationMode = InterpolationMode.BILINEAR,
           max_size: Optional[int] = None) -> Tensor:
vfdev's avatar
vfdev committed
345
    r"""Resize the input image to the given size.
346
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
347
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
348

349
350
351
352
353
354
    .. warning::
        The output image might be different depending on its type: when downsampling, the interpolation of PIL images
        and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
        in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
        types.

355
    Args:
vfdev's avatar
vfdev committed
356
        img (PIL Image or Tensor): Image to be resized.
357
358
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
Vitaliy Chiley's avatar
Vitaliy Chiley committed
359
            the smaller edge of the image will be matched to this number maintaining
360
            the aspect ratio. i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
361
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
362
            In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
363
364
365
366
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
367
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
368
369
370
371
372
373
374
375
        max_size (int, optional): The maximum allowed for the longer edge of
            the resized image: if the longer edge of the image is greater
            than ``max_size`` after being resized according to ``size``, then
            the image is resized again so that the longer edge is equal to
            ``max_size``. As a result, ```size` might be overruled, i.e the
            smaller edge may be shorter than ``size``. This is only supported
            if ``size`` is an int (or a sequence of length 1 in torchscript
            mode).
376
377

    Returns:
vfdev's avatar
vfdev committed
378
        PIL Image or Tensor: Resized image.
379
    """
380
381
382
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
383
384
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
385
386
387
        )
        interpolation = _interpolation_modes_from_int(interpolation)

388
389
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
390

vfdev's avatar
vfdev committed
391
    if not isinstance(img, torch.Tensor):
392
        pil_interpolation = pil_modes_mapping[interpolation]
393
        return F_pil.resize(img, size=size, interpolation=pil_interpolation, max_size=max_size)
vfdev's avatar
vfdev committed
394

395
    return F_t.resize(img, size=size, interpolation=interpolation.value, max_size=max_size)
396
397
398
399
400
401
402
403


def scale(*args, **kwargs):
    warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                  "please use transforms.Resize instead.")
    return resize(*args, **kwargs)


404
405
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
    r"""Pad the given image on all sides with the given "pad" value.
406
    If the image is torch Tensor, it is expected
407
408
409
    to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
    at most 3 leading dimensions for mode edge,
    and an arbitrary number of leading dimensions for mode constant
410
411

    Args:
412
        img (PIL Image or Tensor): Image to be padded.
413
414
415
        padding (int or sequence): Padding on each border. If a single int is provided this
            is used to pad all borders. If sequence of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a sequence of length 4 is provided
416
            this is the padding for the left, top, right and bottom borders respectively.
417
418
419
420
421
422
            In torchscript mode padding as single int is not supported, use a sequence of length 1: ``[padding, ]``.
        fill (number or str or tuple): Pixel fill value for constant fill. Default is 0.
            If a tuple of length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant.
            Only number is supported for torch Tensor.
            Only int or str or tuple value is supported for PIL Image.
423
        padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
424
425
426

            - constant: pads with a constant value, this value is specified with fill

427
428
            - edge: pads with the last value on the edge of the image,
                    if input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
429
430
431
432
433
434
435
436
437
438

            - reflect: pads with reflection of image (without repeating the last value on the edge)

                       padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
                       will result in [3, 2, 1, 2, 3, 4, 3, 2]

            - symmetric: pads with reflection of image (repeating the last value on the edge)

                         padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
                         will result in [2, 1, 1, 2, 3, 4, 4, 3]
439
440

    Returns:
441
        PIL Image or Tensor: Padded image.
442
    """
443
444
    if not isinstance(img, torch.Tensor):
        return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
445

446
    return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
447
448


vfdev's avatar
vfdev committed
449
450
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
    """Crop the given image at specified location and output size.
451
452
    If the image is torch Tensor, it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
453

454
    Args:
vfdev's avatar
vfdev committed
455
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
456
457
458
459
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
460

461
    Returns:
vfdev's avatar
vfdev committed
462
        PIL Image or Tensor: Cropped image.
463
464
    """

vfdev's avatar
vfdev committed
465
466
    if not isinstance(img, torch.Tensor):
        return F_pil.crop(img, top, left, height, width)
467

vfdev's avatar
vfdev committed
468
    return F_t.crop(img, top, left, height, width)
469

vfdev's avatar
vfdev committed
470
471
472

def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
    """Crops the given image at the center.
473
    If the image is torch Tensor, it is expected
474
475
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
476

477
    Args:
vfdev's avatar
vfdev committed
478
        img (PIL Image or Tensor): Image to be cropped.
479
        output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int,
vfdev's avatar
vfdev committed
480
481
            it is used for both directions.

482
    Returns:
vfdev's avatar
vfdev committed
483
        PIL Image or Tensor: Cropped image.
484
    """
485
486
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
vfdev's avatar
vfdev committed
487
488
489
490
    elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
        output_size = (output_size[0], output_size[0])

    image_width, image_height = _get_image_size(img)
491
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
492

493
494
495
496
497
498
499
500
501
502
503
504
    if crop_width > image_width or crop_height > image_height:
        padding_ltrb = [
            (crop_width - image_width) // 2 if crop_width > image_width else 0,
            (crop_height - image_height) // 2 if crop_height > image_height else 0,
            (crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
            (crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
        ]
        img = pad(img, padding_ltrb, fill=0)  # PIL uses fill value 0
        image_width, image_height = _get_image_size(img)
        if crop_width == image_width and crop_height == image_height:
            return img

505
506
    crop_top = int(round((image_height - crop_height) / 2.))
    crop_left = int(round((image_width - crop_width) / 2.))
507
    return crop(img, crop_top, crop_left, crop_height, crop_width)
508
509


510
def resized_crop(
511
        img: Tensor, top: int, left: int, height: int, width: int, size: List[int],
512
        interpolation: InterpolationMode = InterpolationMode.BILINEAR
513
514
) -> Tensor:
    """Crop the given image and resize it to desired size.
515
    If the image is torch Tensor, it is expected
516
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
517

518
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
519
520

    Args:
521
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
522
523
524
525
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
526
        size (sequence or int): Desired output size. Same semantics as ``resize``.
527
528
529
530
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`.
            Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``,
            ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported.
531
532
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.

533
    Returns:
534
        PIL Image or Tensor: Cropped image.
535
    """
536
    img = crop(img, top, left, height, width)
537
538
539
540
    img = resize(img, size, interpolation)
    return img


541
def hflip(img: Tensor) -> Tensor:
542
    """Horizontally flip the given image.
543
544

    Args:
vfdev's avatar
vfdev committed
545
        img (PIL Image or Tensor): Image to be flipped. If img
546
            is a Tensor, it is expected to be in [..., H, W] format,
547
            where ... means it can have an arbitrary number of leading
548
            dimensions.
549
550

    Returns:
vfdev's avatar
vfdev committed
551
        PIL Image or Tensor:  Horizontally flipped image.
552
    """
553
554
    if not isinstance(img, torch.Tensor):
        return F_pil.hflip(img)
555

556
    return F_t.hflip(img)
557
558


559
560
561
def _get_perspective_coeffs(
        startpoints: List[List[int]], endpoints: List[List[int]]
) -> List[float]:
562
563
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

Vitaliy Chiley's avatar
Vitaliy Chiley committed
564
    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
565
566
567
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
568
569
570
571
572
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.

573
574
575
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
576
577
578
579
580
    a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float)

    for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
        a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
581

582
583
    b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8)
    res = torch.lstsq(b_matrix, a_matrix)[0]
584

585
586
    output: List[float] = res.squeeze(1).tolist()
    return output
587
588


589
590
591
592
def perspective(
        img: Tensor,
        startpoints: List[List[int]],
        endpoints: List[List[int]],
593
        interpolation: InterpolationMode = InterpolationMode.BILINEAR,
594
        fill: Optional[List[float]] = None
595
596
) -> Tensor:
    """Perform perspective transform of the given image.
597
    If the image is torch Tensor, it is expected
598
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
599
600

    Args:
601
602
603
604
605
        img (PIL Image or Tensor): Image to be transformed.
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
606
607
608
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
609
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
610
611
612
613
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
            In torchscript mode single int/float value is not supported, please use a sequence
            of length 1: ``[value, ]``.
614
            If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.
615

616
    Returns:
617
        PIL Image or Tensor: transformed Image.
618
    """
619

620
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
621

622
623
624
    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
625
626
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
627
628
629
        )
        interpolation = _interpolation_modes_from_int(interpolation)

630
631
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
632

633
    if not isinstance(img, torch.Tensor):
634
635
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill)
636

637
    return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill)
638
639


640
def vflip(img: Tensor) -> Tensor:
641
    """Vertically flip the given image.
642
643

    Args:
vfdev's avatar
vfdev committed
644
        img (PIL Image or Tensor): Image to be flipped. If img
645
            is a Tensor, it is expected to be in [..., H, W] format,
646
            where ... means it can have an arbitrary number of leading
647
            dimensions.
648
649

    Returns:
650
        PIL Image or Tensor:  Vertically flipped image.
651
    """
652
653
    if not isinstance(img, torch.Tensor):
        return F_pil.vflip(img)
654

655
    return F_t.vflip(img)
656
657


vfdev's avatar
vfdev committed
658
659
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
    """Crop the given image into four corners and the central crop.
660
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
661
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
662
663
664
665
666
667

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
668
669
670
        img (PIL Image or Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
671
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
672

673
    Returns:
674
675
       tuple: tuple (tl, tr, bl, br, center)
                Corresponding top left, top right, bottom left, bottom right and center crop.
676
677
678
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
679
680
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])
681

vfdev's avatar
vfdev committed
682
683
684
685
    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")

    image_width, image_height = _get_image_size(img)
686
687
688
689
690
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

vfdev's avatar
vfdev committed
691
692
693
694
695
696
697
698
    tl = crop(img, 0, 0, crop_height, crop_width)
    tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
    bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
    br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)

    center = center_crop(img, [crop_height, crop_width])

    return tl, tr, bl, br, center
699
700


vfdev's avatar
vfdev committed
701
702
703
def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]:
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
704
    flipped version of these (horizontal flipping is used by default).
705
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
706
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
707
708
709
710
711

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

712
    Args:
vfdev's avatar
vfdev committed
713
        img (PIL Image or Tensor): Image to be cropped.
714
        size (sequence or int): Desired output size of the crop. If size is an
715
            int instead of sequence like (h, w), a square crop (size, size) is
716
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
717
        vertical_flip (bool): Use vertical flipping instead of horizontal
718
719

    Returns:
720
721
722
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
            Corresponding top left, top right, bottom left, bottom right and
            center crop and same for the flipped image.
723
724
725
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
726
727
728
729
730
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])

    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")
731
732
733
734
735
736
737
738
739
740
741
742

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


743
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
744
    """Adjust brightness of an image.
745
746

    Args:
vfdev's avatar
vfdev committed
747
        img (PIL Image or Tensor): Image to be adjusted.
748
        If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
749
        where ... means it can have an arbitrary number of leading dimensions.
750
751
752
753
754
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
755
        PIL Image or Tensor: Brightness adjusted image.
756
    """
757
758
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_brightness(img, brightness_factor)
759

760
    return F_t.adjust_brightness(img, brightness_factor)
761
762


763
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
764
    """Adjust contrast of an image.
765
766

    Args:
vfdev's avatar
vfdev committed
767
        img (PIL Image or Tensor): Image to be adjusted.
768
769
        If img is torch Tensor, it is expected to be in [..., 3, H, W] format,
        where ... means it can have an arbitrary number of leading dimensions.
770
771
772
773
774
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
775
        PIL Image or Tensor: Contrast adjusted image.
776
    """
777
778
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_contrast(img, contrast_factor)
779

780
    return F_t.adjust_contrast(img, contrast_factor)
781
782


783
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
784
785
786
    """Adjust color saturation of an image.

    Args:
vfdev's avatar
vfdev committed
787
        img (PIL Image or Tensor): Image to be adjusted.
788
789
        If img is torch Tensor, it is expected to be in [..., 3, H, W] format,
        where ... means it can have an arbitrary number of leading dimensions.
790
791
792
793
794
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
795
        PIL Image or Tensor: Saturation adjusted image.
796
    """
797
798
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_saturation(img, saturation_factor)
799

800
    return F_t.adjust_saturation(img, saturation_factor)
801
802


803
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
804
805
806
807
808
809
810
811
812
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

813
814
815
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
816
817

    Args:
818
        img (PIL Image or Tensor): Image to be adjusted.
819
820
821
        If img is torch Tensor, it is expected to be in [..., 3, H, W] format,
        where ... means it can have an arbitrary number of leading dimensions.
        If img is PIL Image mode "1", "L", "I", "F" and modes with transparency (alpha channel) are not supported.
822
823
824
825
826
827
828
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
829
        PIL Image or Tensor: Hue adjusted image.
830
    """
831
832
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_hue(img, hue_factor)
833

834
    return F_t.adjust_hue(img, hue_factor)
835
836


837
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
838
    r"""Perform gamma correction on an image.
839
840
841
842

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

843
844
845
846
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
847

848
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
849
850

    Args:
851
        img (PIL Image or Tensor): PIL Image to be adjusted.
852
        If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
853
        where ... means it can have an arbitrary number of leading dimensions.
854
        If img is PIL Image, modes with transparency (alpha channel) are not supported.
855
856
857
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
858
        gain (float): The constant multiplier.
859
860
    Returns:
        PIL Image or Tensor: Gamma correction adjusted image.
861
    """
862
863
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_gamma(img, gamma, gain)
864

865
    return F_t.adjust_gamma(img, gamma, gain)
866
867


vfdev's avatar
vfdev committed
868
def _get_inverse_affine_matrix(
vfdev's avatar
vfdev committed
869
        center: List[float], angle: float, translate: List[float], scale: float, shear: List[float]
vfdev's avatar
vfdev committed
870
) -> List[float]:
871
872
873
874
875
876
877
    # Helper method to compute inverse matrix for affine transformation

    # As it is explained in PIL.Image.rotate
    # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
    #       RSS is rotation with scale and shear matrix
878
879
880
881
882
883
884
885
886
887
    #       RSS(a, s, (sx, sy)) =
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ]
    #         [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ]
    #         [ 0                    , 0                                      , 1 ]
    #
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
888
889
    # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1

890
891
892
893
894
895
896
    rot = math.radians(angle)
    sx, sy = [math.radians(s) for s in shear]

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
vfdev's avatar
vfdev committed
897
898
899
900
    a = math.cos(rot - sy) / math.cos(sy)
    b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
    c = math.sin(rot - sy) / math.cos(sy)
    d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
901
902

    # Inverted rotation matrix with scale and shear
903
    # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
vfdev's avatar
vfdev committed
904
905
    matrix = [d, -b, 0.0, -c, a, 0.0]
    matrix = [x / scale for x in matrix]
906
907

    # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
vfdev's avatar
vfdev committed
908
909
    matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
    matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
910
911

    # Apply center translation: C * RSS^-1 * C^-1 * T^-1
vfdev's avatar
vfdev committed
912
913
    matrix[2] += cx
    matrix[5] += cy
914

vfdev's avatar
vfdev committed
915
    return matrix
916

vfdev's avatar
vfdev committed
917

vfdev's avatar
vfdev committed
918
def rotate(
919
        img: Tensor, angle: float, interpolation: InterpolationMode = InterpolationMode.NEAREST,
920
        expand: bool = False, center: Optional[List[int]] = None,
921
        fill: Optional[List[float]] = None, resample: Optional[int] = None
vfdev's avatar
vfdev committed
922
923
) -> Tensor:
    """Rotate the image by angle.
924
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
925
926
927
928
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.

    Args:
        img (PIL Image or Tensor): image to be rotated.
929
        angle (number): rotation angle value in degrees, counter-clockwise.
930
931
932
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
933
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
vfdev's avatar
vfdev committed
934
935
936
937
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
938
        center (sequence, optional): Optional center of rotation. Origin is the upper left corner.
vfdev's avatar
vfdev committed
939
            Default is the center of the image.
940
941
942
943
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
            In torchscript mode single int/float value is not supported, please use a sequence
            of length 1: ``[value, ]``.
944
            If input is PIL Image, the options is only available for ``Pillow>=5.2.0``.
vfdev's avatar
vfdev committed
945
946
947
948
949
950
951

    Returns:
        PIL Image or Tensor: Rotated image.

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

    """
952
953
954
955
956
957
958
959
960
    if resample is not None:
        warnings.warn(
            "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
        )
        interpolation = _interpolation_modes_from_int(resample)

    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
961
962
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
963
964
965
        )
        interpolation = _interpolation_modes_from_int(interpolation)

vfdev's avatar
vfdev committed
966
967
968
969
970
971
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

972
973
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
974

vfdev's avatar
vfdev committed
975
    if not isinstance(img, torch.Tensor):
976
977
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill)
vfdev's avatar
vfdev committed
978
979
980
981

    center_f = [0.0, 0.0]
    if center is not None:
        img_size = _get_image_size(img)
982
983
984
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, img_size)]

vfdev's avatar
vfdev committed
985
986
987
    # due to current incoherence of rotation angle direction between affine and rotate implementations
    # we need to set -angle.
    matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
988
    return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill)
vfdev's avatar
vfdev committed
989
990


vfdev's avatar
vfdev committed
991
992
def affine(
        img: Tensor, angle: float, translate: List[int], scale: float, shear: List[float],
993
994
        interpolation: InterpolationMode = InterpolationMode.NEAREST, fill: Optional[List[float]] = None,
        resample: Optional[int] = None, fillcolor: Optional[List[float]] = None
vfdev's avatar
vfdev committed
995
996
) -> Tensor:
    """Apply affine transformation on the image keeping image center invariant.
997
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
998
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
999
1000

    Args:
vfdev's avatar
vfdev committed
1001
        img (PIL Image or Tensor): image to transform.
1002
1003
        angle (number): rotation angle in degrees between -180 and 180, clockwise direction.
        translate (sequence of integers): horizontal and vertical translations (post-rotation translation)
1004
        scale (float): overall scale
1005
1006
        shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction.
            If a sequence is specified, the first value corresponds to a shear parallel to the x axis, while
vfdev's avatar
vfdev committed
1007
            the second value corresponds to a shear parallel to the y axis.
1008
1009
1010
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1011
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
1012
1013
1014
1015
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
            In torchscript mode single int/float value is not supported, please use a sequence
            of length 1: ``[value, ]``.
1016
1017
            If input is PIL Image, the options is only available for ``Pillow>=5.0.0``.
        fillcolor (sequence, int, float): deprecated argument and will be removed since v0.10.0.
1018
            Please use the ``fill`` parameter instead.
1019
        resample (int, optional): deprecated argument and will be removed since v0.10.0.
1020
            Please use the ``interpolation`` parameter instead.
vfdev's avatar
vfdev committed
1021
1022
1023

    Returns:
        PIL Image or Tensor: Transformed image.
1024
    """
1025
1026
1027
1028
1029
1030
1031
1032
1033
    if resample is not None:
        warnings.warn(
            "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead"
        )
        interpolation = _interpolation_modes_from_int(resample)

    # Backward compatibility with integer value
    if isinstance(interpolation, int):
        warnings.warn(
1034
1035
            "Argument interpolation should be of type InterpolationMode instead of int. "
            "Please, use InterpolationMode enum."
1036
1037
1038
1039
1040
1041
1042
1043
1044
        )
        interpolation = _interpolation_modes_from_int(interpolation)

    if fillcolor is not None:
        warnings.warn(
            "Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead"
        )
        fill = fillcolor

vfdev's avatar
vfdev committed
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if not isinstance(translate, (list, tuple)):
        raise TypeError("Argument translate should be a sequence")

    if len(translate) != 2:
        raise ValueError("Argument translate should be a sequence of length 2")

    if scale <= 0.0:
        raise ValueError("Argument scale should be positive")

    if not isinstance(shear, (numbers.Number, (list, tuple))):
        raise TypeError("Shear should be either a single value or a sequence of two values")

1060
1061
    if not isinstance(interpolation, InterpolationMode):
        raise TypeError("Argument interpolation should be a InterpolationMode")
1062

vfdev's avatar
vfdev committed
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
    if isinstance(angle, int):
        angle = float(angle)

    if isinstance(translate, tuple):
        translate = list(translate)

    if isinstance(shear, numbers.Number):
        shear = [shear, 0.0]

    if isinstance(shear, tuple):
        shear = list(shear)

    if len(shear) == 1:
        shear = [shear[0], shear[0]]

    if len(shear) != 2:
        raise ValueError("Shear should be a sequence containing two values. Got {}".format(shear))

    img_size = _get_image_size(img)
    if not isinstance(img, torch.Tensor):
        # center = (img_size[0] * 0.5 + 0.5, img_size[1] * 0.5 + 0.5)
        # it is visually better to estimate the center without 0.5 offset
        # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
        center = [img_size[0] * 0.5, img_size[1] * 0.5]
        matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
1088
1089
        pil_interpolation = pil_modes_mapping[interpolation]
        return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill)
1090

1091
1092
    translate_f = [1.0 * t for t in translate]
    matrix = _get_inverse_affine_matrix([0.0, 0.0], angle, translate_f, scale, shear)
1093
    return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill)
1094
1095


1096
@torch.jit.unused
1097
def to_grayscale(img, num_output_channels=1):
1098
    """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
1099
    This transform does not support torch Tensor.
1100
1101

    Args:
1102
        img (PIL Image): PIL Image to be converted to grayscale.
1103
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1.
1104
1105

    Returns:
1106
1107
1108
1109
        PIL Image: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel

            if num_output_channels = 3 : returned image is 3 channel with r = g = b
1110
    """
1111
1112
    if isinstance(img, Image.Image):
        return F_pil.to_grayscale(img, num_output_channels)
1113

1114
1115
1116
1117
1118
    raise TypeError("Input should be PIL Image")


def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    """Convert RGB image to grayscale version of image.
1119
1120
    If the image is torch Tensor, it is expected
    to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139

    Note:
        Please, note that this method supports only RGB images as input. For inputs in other color spaces,
        please, consider using meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.

    Args:
        img (PIL Image or Tensor): RGB Image to be converted to grayscale.
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.

    Returns:
        PIL Image or Tensor: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel

            if num_output_channels = 3 : returned image is 3 channel with r = g = b
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.to_grayscale(img, num_output_channels)

    return F_t.rgb_to_grayscale(img, num_output_channels)
1140
1141


1142
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
1143
    """ Erase the input Tensor Image with given value.
1144
    This transform does not support PIL Image.
1145
1146
1147
1148
1149
1150
1151
1152

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
Zhun Zhong's avatar
Zhun Zhong committed
1153
        inplace(bool, optional): For in-place operations. By default is set False.
1154
1155
1156
1157
1158
1159
1160

    Returns:
        Tensor Image: Erased image.
    """
    if not isinstance(img, torch.Tensor):
        raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))

1161
1162
1163
    if not inplace:
        img = img.clone()

vfdev's avatar
vfdev committed
1164
    img[..., i:i + h, j:j + w] = v
1165
    return img
1166
1167
1168


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
1169
1170
1171
    """Performs Gaussian blurring on the image by given kernel.
    If the image is torch Tensor, it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1172
1173
1174
1175
1176

    Args:
        img (PIL Image or Tensor): Image to be blurred
        kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
            like ``(kx, ky)`` or a single integer for square kernels.
1177
            In torchscript mode kernel_size as single int is not supported, use a sequence of length 1: ``[ksize, ]``.
1178
1179
1180
1181
1182
        sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
            sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
            same sigma in both X/Y directions. If None, then it is computed using
            ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
            Default, None. In torchscript mode sigma as single float is
1183
            not supported, use a sequence of length 1: ``[sigma, ]``.
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224

    Returns:
        PIL Image or Tensor: Gaussian Blurred version of the image.
    """
    if not isinstance(kernel_size, (int, list, tuple)):
        raise TypeError('kernel_size should be int or a sequence of integers. Got {}'.format(type(kernel_size)))
    if isinstance(kernel_size, int):
        kernel_size = [kernel_size, kernel_size]
    if len(kernel_size) != 2:
        raise ValueError('If kernel_size is a sequence its length should be 2. Got {}'.format(len(kernel_size)))
    for ksize in kernel_size:
        if ksize % 2 == 0 or ksize < 0:
            raise ValueError('kernel_size should have odd and positive integers. Got {}'.format(kernel_size))

    if sigma is None:
        sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]

    if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
        raise TypeError('sigma should be either float or sequence of floats. Got {}'.format(type(sigma)))
    if isinstance(sigma, (int, float)):
        sigma = [float(sigma), float(sigma)]
    if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
        sigma = [sigma[0], sigma[0]]
    if len(sigma) != 2:
        raise ValueError('If sigma is a sequence, its length should be 2. Got {}'.format(len(sigma)))
    for s in sigma:
        if s <= 0.:
            raise ValueError('sigma should have positive values. Got {}'.format(sigma))

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
            raise TypeError('img should be PIL Image or Tensor. Got {}'.format(type(img)))

        t_img = to_tensor(img)

    output = F_t.gaussian_blur(t_img, kernel_size, sigma)

    if not isinstance(img, torch.Tensor):
        output = to_pil_image(output)
    return output
1225
1226
1227


def invert(img: Tensor) -> Tensor:
1228
    """Invert the colors of an RGB/grayscale image.
1229
1230
1231

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1232
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1233
1234
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245

    Returns:
        PIL Image or Tensor: Color inverted image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.invert(img)

    return F_t.invert(img)


def posterize(img: Tensor, bits: int) -> Tensor:
1246
    """Posterize an image by reducing the number of bits for each color channel.
1247
1248
1249

    Args:
        img (PIL Image or Tensor): Image to have its colors posterized.
1250
            If img is torch Tensor, it should be of type torch.uint8 and
1251
1252
1253
            it is expected to be in [..., 1 or 3, H, W] format, where ... means
            it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
        bits (int): The number of bits to keep for each channel (0-8).
    Returns:
        PIL Image or Tensor: Posterized image.
    """
    if not (0 <= bits <= 8):
        raise ValueError('The number if bits should be between 0 and 8. Got {}'.format(bits))

    if not isinstance(img, torch.Tensor):
        return F_pil.posterize(img, bits)

    return F_t.posterize(img, bits)


def solarize(img: Tensor, threshold: float) -> Tensor:
1268
    """Solarize an RGB/grayscale image by inverting all pixel values above a threshold.
1269
1270
1271

    Args:
        img (PIL Image or Tensor): Image to have its colors inverted.
1272
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1273
1274
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
        threshold (float): All pixels equal or above this value are inverted.
    Returns:
        PIL Image or Tensor: Solarized image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.solarize(img, threshold)

    return F_t.solarize(img, threshold)


def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
1286
    """Adjust the sharpness of an image.
1287
1288
1289

    Args:
        img (PIL Image or Tensor): Image to be adjusted.
1290
        If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1291
        where ... means it can have an arbitrary number of leading dimensions.
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
        sharpness_factor (float):  How much to adjust the sharpness. Can be
            any non negative number. 0 gives a blurred image, 1 gives the
            original image while 2 increases the sharpness by a factor of 2.

    Returns:
        PIL Image or Tensor: Sharpness adjusted image.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_sharpness(img, sharpness_factor)

    return F_t.adjust_sharpness(img, sharpness_factor)


def autocontrast(img: Tensor) -> Tensor:
1306
    """Maximize contrast of an image by remapping its
1307
1308
1309
1310
1311
    pixels per channel so that the lowest becomes black and the lightest
    becomes white.

    Args:
        img (PIL Image or Tensor): Image on which autocontrast is applied.
1312
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1313
1314
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "L" or "RGB".
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325

    Returns:
        PIL Image or Tensor: An image that was autocontrasted.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.autocontrast(img)

    return F_t.autocontrast(img)


def equalize(img: Tensor) -> Tensor:
1326
    """Equalize the histogram of an image by applying
1327
1328
1329
1330
1331
    a non-linear mapping to the input in order to create a uniform
    distribution of grayscale values in the output.

    Args:
        img (PIL Image or Tensor): Image on which equalize is applied.
1332
            If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format,
1333
1334
            where ... means it can have an arbitrary number of leading dimensions.
            If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
1335
1336
1337
1338
1339
1340
1341
1342

    Returns:
        PIL Image or Tensor: An image that was equalized.
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.equalize(img)

    return F_t.equalize(img)