functional.py 42.6 KB
Newer Older
1
import math
2
3
import numbers
import warnings
vfdev's avatar
vfdev committed
4
from typing import Any, Optional
5
6

import numpy as np
vfdev's avatar
vfdev committed
7
from PIL import Image
8
9
10

import torch
from torch import Tensor
vfdev's avatar
vfdev committed
11
from torch.jit.annotations import List, Tuple
12

13
14
15
16
17
try:
    import accimage
except ImportError:
    accimage = None

18
19
20
from . import functional_pil as F_pil
from . import functional_tensor as F_t

21

vfdev's avatar
vfdev committed
22
_is_pil_image = F_pil._is_pil_image
vfdev's avatar
vfdev committed
23
_parse_fill = F_pil._parse_fill
vfdev's avatar
vfdev committed
24
25
26
27
28
29
30


def _get_image_size(img: Tensor) -> List[int]:
    """Returns image sizea as (w, h)
    """
    if isinstance(img, torch.Tensor):
        return F_t._get_image_size(img)
31

vfdev's avatar
vfdev committed
32
    return F_pil._get_image_size(img)
33

vfdev's avatar
vfdev committed
34

35
36
37
38
39
40
41
def _get_image_num_channels(img: Tensor) -> int:
    if isinstance(img, torch.Tensor):
        return F_t._get_image_num_channels(img)

    return F_pil._get_image_num_channels(img)


vfdev's avatar
vfdev committed
42
43
@torch.jit.unused
def _is_numpy(img: Any) -> bool:
44
45
46
    return isinstance(img, np.ndarray)


vfdev's avatar
vfdev committed
47
48
@torch.jit.unused
def _is_numpy_image(img: Any) -> bool:
49
    return img.ndim in {2, 3}
50
51
52
53
54
55
56
57
58
59
60
61
62


def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.

    See ``ToTensor`` for more details.

    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
vfdev's avatar
vfdev committed
63
    if not(F_pil._is_pil_image(pic) or _is_numpy(pic)):
64
65
        raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))

66
67
68
    if _is_numpy(pic) and not _is_numpy_image(pic):
        raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

69
70
    if isinstance(pic, np.ndarray):
        # handle numpy array
surgan12's avatar
surgan12 committed
71
72
73
        if pic.ndim == 2:
            pic = pic[:, :, None]

74
        img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous()
75
        # backward compatibility
76
77
78
79
        if isinstance(img, torch.ByteTensor):
            return img.float().div(255)
        else:
            return img
80
81
82
83
84
85
86
87
88
89
90

    if accimage is not None and isinstance(pic, accimage.Image):
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
        pic.copyto(nppic)
        return torch.from_numpy(nppic)

    # handle PIL Image
    if pic.mode == 'I':
        img = torch.from_numpy(np.array(pic, np.int32, copy=False))
    elif pic.mode == 'I;16':
        img = torch.from_numpy(np.array(pic, np.int16, copy=False))
91
92
    elif pic.mode == 'F':
        img = torch.from_numpy(np.array(pic, np.float32, copy=False))
93
94
    elif pic.mode == '1':
        img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False))
95
96
    else:
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
97
98

    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
99
    # put it from HWC to CHW format
100
    img = img.permute((2, 0, 1)).contiguous()
101
102
103
104
105
106
    if isinstance(img, torch.ByteTensor):
        return img.float().div(255)
    else:
        return img


107
108
109
def pil_to_tensor(pic):
    """Convert a ``PIL Image`` to a tensor of the same type.

vfdev's avatar
vfdev committed
110
    See :class:`~torchvision.transforms.PILToTensor` for more details.
111
112
113
114
115
116
117

    Args:
        pic (PIL Image): Image to be converted to tensor.

    Returns:
        Tensor: Converted image.
    """
118
    if not F_pil._is_pil_image(pic):
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
        raise TypeError('pic should be PIL Image. Got {}'.format(type(pic)))

    if accimage is not None and isinstance(pic, accimage.Image):
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
        pic.copyto(nppic)
        return torch.as_tensor(nppic)

    # handle PIL Image
    img = torch.as_tensor(np.asarray(pic))
    img = img.view(pic.size[1], pic.size[0], len(pic.getbands()))
    # put it from HWC to CHW format
    img = img.permute((2, 0, 1))
    return img


134
135
136
137
138
139
140
141
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
vfdev's avatar
vfdev committed
142
        Tensor: Converted image
143
144
145
146
147
148
149
150
151
152
153
154

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
155
156
157
158
    if not isinstance(image, torch.Tensor):
        raise TypeError('Input img should be Tensor Image')

    return F_t.convert_image_dtype(image, dtype)
159
160


161
162
163
def to_pil_image(pic, mode=None):
    """Convert a tensor or an ndarray to PIL Image.

164
    See :class:`~torchvision.transforms.ToPILImage` for more details.
165
166
167
168
169

    Args:
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

170
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
171
172
173
174

    Returns:
        PIL Image: Image converted to PIL Image.
    """
Varun Agrawal's avatar
Varun Agrawal committed
175
    if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
176
177
        raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))

Varun Agrawal's avatar
Varun Agrawal committed
178
179
180
181
182
183
    elif isinstance(pic, torch.Tensor):
        if pic.ndimension() not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))

        elif pic.ndimension() == 2:
            # if 2D image, add channel dimension (CHW)
Surgan Jandial's avatar
Surgan Jandial committed
184
            pic = pic.unsqueeze(0)
Varun Agrawal's avatar
Varun Agrawal committed
185
186
187
188
189
190
191
192
193

    elif isinstance(pic, np.ndarray):
        if pic.ndim not in {2, 3}:
            raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))

        elif pic.ndim == 2:
            # if 2D image, add channel dimension (HWC)
            pic = np.expand_dims(pic, 2)

194
    npimg = pic
Varun Agrawal's avatar
Varun Agrawal committed
195
    if isinstance(pic, torch.Tensor):
196
197
198
        if pic.is_floating_point() and mode != 'F':
            pic = pic.mul(255).byte()
        npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))
199
200
201
202
203
204
205
206
207
208

    if not isinstance(npimg, np.ndarray):
        raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
                        'not {}'.format(type(npimg)))

    if npimg.shape[2] == 1:
        expected_mode = None
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
            expected_mode = 'L'
vfdev's avatar
vfdev committed
209
        elif npimg.dtype == np.int16:
210
            expected_mode = 'I;16'
vfdev's avatar
vfdev committed
211
        elif npimg.dtype == np.int32:
212
213
214
215
216
217
218
219
            expected_mode = 'I'
        elif npimg.dtype == np.float32:
            expected_mode = 'F'
        if mode is not None and mode != expected_mode:
            raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
                             .format(mode, np.dtype, expected_mode))
        mode = expected_mode

surgan12's avatar
surgan12 committed
220
221
222
223
224
225
226
227
    elif npimg.shape[2] == 2:
        permitted_2_channel_modes = ['LA']
        if mode is not None and mode not in permitted_2_channel_modes:
            raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'LA'

228
    elif npimg.shape[2] == 4:
surgan12's avatar
surgan12 committed
229
        permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
        if mode is not None and mode not in permitted_4_channel_modes:
            raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGBA'
    else:
        permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
        if mode is not None and mode not in permitted_3_channel_modes:
            raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGB'

    if mode is None:
        raise TypeError('Input type {} is not supported'.format(npimg.dtype))

    return Image.fromarray(npimg, mode=mode)


248
def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor:
249
250
    """Normalize a tensor image with mean and standard deviation.

251
    .. note::
surgan12's avatar
surgan12 committed
252
        This transform acts out of place by default, i.e., it does not mutates the input tensor.
253

254
    See :class:`~torchvision.transforms.Normalize` for more details.
255
256

    Args:
257
        tensor (Tensor): Tensor image of size (C, H, W) or (B, C, H, W) to be normalized.
258
        mean (sequence): Sequence of means for each channel.
259
        std (sequence): Sequence of standard deviations for each channel.
260
        inplace(bool,optional): Bool to make this operation inplace.
261
262
263
264

    Returns:
        Tensor: Normalized Tensor image.
    """
265
266
    if not isinstance(tensor, torch.Tensor):
        raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))
267

268
269
    if tensor.ndim < 3:
        raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '
270
                         '{}.'.format(tensor.size()))
271

surgan12's avatar
surgan12 committed
272
273
274
    if not inplace:
        tensor = tensor.clone()

275
276
277
    dtype = tensor.dtype
    mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)
    std = torch.as_tensor(std, dtype=dtype, device=tensor.device)
278
279
    if (std == 0).any():
        raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))
280
    if mean.ndim == 1:
281
        mean = mean.view(-1, 1, 1)
282
    if std.ndim == 1:
283
        std = std.view(-1, 1, 1)
284
    tensor.sub_(mean).div_(std)
285
    return tensor
286
287


vfdev's avatar
vfdev committed
288
def resize(img: Tensor, size: List[int], interpolation: int = Image.BILINEAR) -> Tensor:
vfdev's avatar
vfdev committed
289
290
291
    r"""Resize the input image to the given size.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
292
293

    Args:
vfdev's avatar
vfdev committed
294
        img (PIL Image or Tensor): Image to be resized.
295
296
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
Vitaliy Chiley's avatar
Vitaliy Chiley committed
297
            the smaller edge of the image will be matched to this number maintaining
298
            the aspect ratio. i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
299
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
300
            In torchscript mode size as single int is not supported, use a tuple or
vfdev's avatar
vfdev committed
301
            list of length 1: ``[size, ]``.
vfdev's avatar
vfdev committed
302
303
304
        interpolation (int, optional): Desired interpolation enum defined by `filters`_.
            Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR``
            and ``PIL.Image.BICUBIC`` are supported.
305
306

    Returns:
vfdev's avatar
vfdev committed
307
        PIL Image or Tensor: Resized image.
308
    """
vfdev's avatar
vfdev committed
309
310
311
312
    if not isinstance(img, torch.Tensor):
        return F_pil.resize(img, size=size, interpolation=interpolation)

    return F_t.resize(img, size=size, interpolation=interpolation)
313
314
315
316
317
318
319
320


def scale(*args, **kwargs):
    warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                  "please use transforms.Resize instead.")
    return resize(*args, **kwargs)


321
322
323
324
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
    r"""Pad the given image on all sides with the given "pad" value.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
325
326

    Args:
327
328
        img (PIL Image or Tensor): Image to be padded.
        padding (int or tuple or list): Padding on each border. If a single int is provided this
329
330
            is used to pad all borders. If tuple of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple of length 4 is provided
331
332
333
334
            this is the padding for the left, top, right and bottom borders respectively.
            In torchscript mode padding as single int is not supported, use a tuple or
            list of length 1: ``[padding, ]``.
        fill (int or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
335
            length 3, it is used to fill R, G, B channels respectively.
336
            This value is only used when the padding_mode is constant. Only int value is supported for Tensors.
337
        padding_mode: Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
vfdev's avatar
vfdev committed
338
            Mode symmetric is not yet supported for Tensor inputs.
339
340
341
342
343
344
345
346
347
348
349
350
351
352

            - constant: pads with a constant value, this value is specified with fill

            - edge: pads with the last value on the edge of the image

            - reflect: pads with reflection of image (without repeating the last value on the edge)

                       padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
                       will result in [3, 2, 1, 2, 3, 4, 3, 2]

            - symmetric: pads with reflection of image (repeating the last value on the edge)

                         padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
                         will result in [2, 1, 1, 2, 3, 4, 4, 3]
353
354

    Returns:
355
        PIL Image or Tensor: Padded image.
356
    """
357
358
    if not isinstance(img, torch.Tensor):
        return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
359

360
    return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode)
361
362


vfdev's avatar
vfdev committed
363
364
365
366
367
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
    """Crop the given image at specified location and output size.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
368

369
    Args:
vfdev's avatar
vfdev committed
370
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
371
372
373
374
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
375

376
    Returns:
vfdev's avatar
vfdev committed
377
        PIL Image or Tensor: Cropped image.
378
379
    """

vfdev's avatar
vfdev committed
380
381
    if not isinstance(img, torch.Tensor):
        return F_pil.crop(img, top, left, height, width)
382

vfdev's avatar
vfdev committed
383
    return F_t.crop(img, top, left, height, width)
384

vfdev's avatar
vfdev committed
385
386
387
388
389

def center_crop(img: Tensor, output_size: List[int]) -> Tensor:
    """Crops the given image at the center.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
390

391
    Args:
vfdev's avatar
vfdev committed
392
393
394
395
        img (PIL Image or Tensor): Image to be cropped.
        output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int
            it is used for both directions.

396
    Returns:
vfdev's avatar
vfdev committed
397
        PIL Image or Tensor: Cropped image.
398
    """
399
400
    if isinstance(output_size, numbers.Number):
        output_size = (int(output_size), int(output_size))
vfdev's avatar
vfdev committed
401
402
403
404
    elif isinstance(output_size, (tuple, list)) and len(output_size) == 1:
        output_size = (output_size[0], output_size[0])

    image_width, image_height = _get_image_size(img)
405
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
406
407
408
409
410
411
412
413
414

    # crop_top = int(round((image_height - crop_height) / 2.))
    # Result can be different between python func and scripted func
    # Temporary workaround:
    crop_top = int((image_height - crop_height + 1) * 0.5)
    # crop_left = int(round((image_width - crop_width) / 2.))
    # Result can be different between python func and scripted func
    # Temporary workaround:
    crop_left = int((image_width - crop_width + 1) * 0.5)
415
    return crop(img, crop_top, crop_left, crop_height, crop_width)
416
417


418
419
420
421
422
423
def resized_crop(
        img: Tensor, top: int, left: int, height: int, width: int, size: List[int], interpolation: int = Image.BILINEAR
) -> Tensor:
    """Crop the given image and resize it to desired size.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
424

425
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
426
427

    Args:
428
        img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image.
429
430
431
432
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
433
        size (sequence or int): Desired output size. Same semantics as ``resize``.
vfdev's avatar
vfdev committed
434
435
436
        interpolation (int, optional): Desired interpolation enum defined by `filters`_.
            Default is ``PIL.Image.BILINEAR``. If input is Tensor, only ``PIL.Image.NEAREST``, ``PIL.Image.BILINEAR``
            and ``PIL.Image.BICUBIC`` are supported.
437
    Returns:
438
        PIL Image or Tensor: Cropped image.
439
    """
440
    img = crop(img, top, left, height, width)
441
442
443
444
    img = resize(img, size, interpolation)
    return img


445
def hflip(img: Tensor) -> Tensor:
vfdev's avatar
vfdev committed
446
    """Horizontally flip the given PIL Image or Tensor.
447
448

    Args:
vfdev's avatar
vfdev committed
449
        img (PIL Image or Tensor): Image to be flipped. If img
450
451
452
            is a Tensor, it is expected to be in [..., H, W] format,
            where ... means it can have an arbitrary number of trailing
            dimensions.
453
454

    Returns:
vfdev's avatar
vfdev committed
455
        PIL Image or Tensor:  Horizontally flipped image.
456
    """
457
458
    if not isinstance(img, torch.Tensor):
        return F_pil.hflip(img)
459

460
    return F_t.hflip(img)
461
462


463
464
465
def _get_perspective_coeffs(
        startpoints: List[List[int]], endpoints: List[List[int]]
) -> List[float]:
466
467
    """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms.

Vitaliy Chiley's avatar
Vitaliy Chiley committed
468
    In Perspective Transform each pixel (x, y) in the original image gets transformed as,
469
470
471
     (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) )

    Args:
472
473
474
475
476
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.

477
478
479
    Returns:
        octuple (a, b, c, d, e, f, g, h) for transforming each pixel.
    """
480
481
482
483
484
    a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float)

    for i, (p1, p2) in enumerate(zip(endpoints, startpoints)):
        a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])
        a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])
485

486
487
    b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8)
    res = torch.lstsq(b_matrix, a_matrix)[0]
488

489
490
    output: List[float] = res.squeeze(1).tolist()
    return output
491
492


493
494
495
496
497
498
499
500
501
502
def perspective(
        img: Tensor,
        startpoints: List[List[int]],
        endpoints: List[List[int]],
        interpolation: int = 2,
        fill: Optional[int] = None
) -> Tensor:
    """Perform perspective transform of the given image.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
503
504

    Args:
505
506
507
508
509
510
511
        img (PIL Image or Tensor): Image to be transformed.
        startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the original image.
        endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners
            ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image.
        interpolation (int): Interpolation type. If input is Tensor, only ``PIL.Image.NEAREST`` and
            ``PIL.Image.BILINEAR`` are supported. Default, ``PIL.Image.BILINEAR`` for PIL images and Tensors.
512
513
        fill (n-tuple or int or float): Pixel fill value for area outside the rotated
            image. If int or float, the value is used for all bands respectively.
514
515
            This option is only available for ``pillow>=5.0.0``. This option is not supported for Tensor
            input. Fill value for the area outside the transform in the output image is always 0.
516

517
    Returns:
518
        PIL Image or Tensor: transformed Image.
519
    """
520

521
    coeffs = _get_perspective_coeffs(startpoints, endpoints)
522

523
524
    if not isinstance(img, torch.Tensor):
        return F_pil.perspective(img, coeffs, interpolation=interpolation, fill=fill)
525

526
    return F_t.perspective(img, coeffs, interpolation=interpolation, fill=fill)
527
528


529
530
def vflip(img: Tensor) -> Tensor:
    """Vertically flip the given PIL Image or torch Tensor.
531
532

    Args:
vfdev's avatar
vfdev committed
533
        img (PIL Image or Tensor): Image to be flipped. If img
534
535
536
            is a Tensor, it is expected to be in [..., H, W] format,
            where ... means it can have an arbitrary number of trailing
            dimensions.
537
538
539
540

    Returns:
        PIL Image:  Vertically flipped image.
    """
541
542
    if not isinstance(img, torch.Tensor):
        return F_pil.vflip(img)
543

544
    return F_t.vflip(img)
545
546


vfdev's avatar
vfdev committed
547
548
549
550
def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
    """Crop the given image into four corners and the central crop.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
551
552
553
554
555
556

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
557
558
559
560
        img (PIL Image or Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
            made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
561

562
    Returns:
563
564
       tuple: tuple (tl, tr, bl, br, center)
                Corresponding top left, top right, bottom left, bottom right and center crop.
565
566
567
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
568
569
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])
570

vfdev's avatar
vfdev committed
571
572
573
574
    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")

    image_width, image_height = _get_image_size(img)
575
576
577
578
579
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

vfdev's avatar
vfdev committed
580
581
582
583
584
585
586
587
    tl = crop(img, 0, 0, crop_height, crop_width)
    tr = crop(img, 0, image_width - crop_width, crop_height, crop_width)
    bl = crop(img, image_height - crop_height, 0, crop_height, crop_width)
    br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width)

    center = center_crop(img, [crop_height, crop_width])

    return tl, tr, bl, br, center
588
589


vfdev's avatar
vfdev committed
590
591
592
def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]:
    """Generate ten cropped images from the given image.
    Crop the given image into four corners and the central crop plus the
593
    flipped version of these (horizontal flipping is used by default).
vfdev's avatar
vfdev committed
594
595
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
596
597
598
599
600

    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.

601
    Args:
vfdev's avatar
vfdev committed
602
        img (PIL Image or Tensor): Image to be cropped.
603
        size (sequence or int): Desired output size of the crop. If size is an
604
            int instead of sequence like (h, w), a square crop (size, size) is
vfdev's avatar
vfdev committed
605
            made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
606
        vertical_flip (bool): Use vertical flipping instead of horizontal
607
608

    Returns:
609
610
611
        tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
            Corresponding top left, top right, bottom left, bottom right and
            center crop and same for the flipped image.
612
613
614
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
vfdev's avatar
vfdev committed
615
616
617
618
619
    elif isinstance(size, (tuple, list)) and len(size) == 1:
        size = (size[0], size[0])

    if len(size) != 2:
        raise ValueError("Please provide only two dimensions (h, w) for size.")
620
621
622
623
624
625
626
627
628
629
630
631

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


632
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
633
634
635
    """Adjust brightness of an Image.

    Args:
vfdev's avatar
vfdev committed
636
        img (PIL Image or Tensor): Image to be adjusted.
637
638
639
640
641
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
642
        PIL Image or Tensor: Brightness adjusted image.
643
    """
644
645
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_brightness(img, brightness_factor)
646

647
    return F_t.adjust_brightness(img, brightness_factor)
648
649


650
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
651
652
653
    """Adjust contrast of an Image.

    Args:
vfdev's avatar
vfdev committed
654
        img (PIL Image or Tensor): Image to be adjusted.
655
656
657
658
659
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
660
        PIL Image or Tensor: Contrast adjusted image.
661
    """
662
663
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_contrast(img, contrast_factor)
664

665
    return F_t.adjust_contrast(img, contrast_factor)
666
667


668
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
669
670
671
    """Adjust color saturation of an image.

    Args:
vfdev's avatar
vfdev committed
672
        img (PIL Image or Tensor): Image to be adjusted.
673
674
675
676
677
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
vfdev's avatar
vfdev committed
678
        PIL Image or Tensor: Saturation adjusted image.
679
    """
680
681
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_saturation(img, saturation_factor)
682

683
    return F_t.adjust_saturation(img, saturation_factor)
684
685


686
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
687
688
689
690
691
692
693
694
695
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

696
697
698
    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue
699
700

    Args:
701
        img (PIL Image or Tensor): Image to be adjusted.
702
703
704
705
706
707
708
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
709
        PIL Image or Tensor: Hue adjusted image.
710
    """
711
712
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_hue(img, hue_factor)
713

714
    return F_t.adjust_hue(img, hue_factor)
715
716


717
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
718
    r"""Perform gamma correction on an image.
719
720
721
722

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

723
724
725
726
    .. math::
        I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}

    See `Gamma Correction`_ for more details.
727

728
    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction
729
730

    Args:
731
        img (PIL Image or Tensor): PIL Image to be adjusted.
732
733
734
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
735
        gain (float): The constant multiplier.
736
737
    Returns:
        PIL Image or Tensor: Gamma correction adjusted image.
738
    """
739
740
    if not isinstance(img, torch.Tensor):
        return F_pil.adjust_gamma(img, gamma, gain)
741

742
    return F_t.adjust_gamma(img, gamma, gain)
743
744


vfdev's avatar
vfdev committed
745
def _get_inverse_affine_matrix(
vfdev's avatar
vfdev committed
746
        center: List[float], angle: float, translate: List[float], scale: float, shear: List[float]
vfdev's avatar
vfdev committed
747
) -> List[float]:
748
749
750
751
752
753
754
    # Helper method to compute inverse matrix for affine transformation

    # As it is explained in PIL.Image.rotate
    # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1
    # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1]
    #       C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1]
    #       RSS is rotation with scale and shear matrix
755
756
757
758
759
760
761
762
763
764
    #       RSS(a, s, (sx, sy)) =
    #       = R(a) * S(s) * SHy(sy) * SHx(sx)
    #       = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ]
    #         [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ]
    #         [ 0                    , 0                                      , 1 ]
    #
    # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears:
    # SHx(s) = [1, -tan(s)] and SHy(s) = [1      , 0]
    #          [0, 1      ]              [-tan(s), 1]
    #
765
766
    # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1

767
768
769
770
771
772
773
    rot = math.radians(angle)
    sx, sy = [math.radians(s) for s in shear]

    cx, cy = center
    tx, ty = translate

    # RSS without scaling
vfdev's avatar
vfdev committed
774
775
776
777
    a = math.cos(rot - sy) / math.cos(sy)
    b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot)
    c = math.sin(rot - sy) / math.cos(sy)
    d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot)
778
779

    # Inverted rotation matrix with scale and shear
780
    # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
vfdev's avatar
vfdev committed
781
782
    matrix = [d, -b, 0.0, -c, a, 0.0]
    matrix = [x / scale for x in matrix]
783
784

    # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
vfdev's avatar
vfdev committed
785
786
    matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty)
    matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty)
787
788

    # Apply center translation: C * RSS^-1 * C^-1 * T^-1
vfdev's avatar
vfdev committed
789
790
    matrix[2] += cx
    matrix[5] += cy
791

vfdev's avatar
vfdev committed
792
    return matrix
793

vfdev's avatar
vfdev committed
794

vfdev's avatar
vfdev committed
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
def rotate(
        img: Tensor, angle: float, resample: int = 0, expand: bool = False,
        center: Optional[List[int]] = None, fill: Optional[int] = None
) -> Tensor:
    """Rotate the image by angle.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.

    Args:
        img (PIL Image or Tensor): image to be rotated.
        angle (float or int): rotation angle value in degrees, counter-clockwise.
        resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
            An optional resampling filter. See `filters`_ for more information.
            If omitted, or if the image has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
        center (list or tuple, optional): Optional center of rotation. Origin is the upper left corner.
            Default is the center of the image.
        fill (n-tuple or int or float): Pixel fill value for area outside the rotated
            image. If int or float, the value is used for all bands respectively.
            Defaults to 0 for all bands. This option is only available for ``pillow>=5.2.0``.
818
819
            This option is not supported for Tensor input. Fill value for the area outside the transform in the output
            image is always 0.
vfdev's avatar
vfdev committed
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838

    Returns:
        PIL Image or Tensor: Rotated image.

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

    """
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if center is not None and not isinstance(center, (list, tuple)):
        raise TypeError("Argument center should be a sequence")

    if not isinstance(img, torch.Tensor):
        return F_pil.rotate(img, angle=angle, resample=resample, expand=expand, center=center, fill=fill)

    center_f = [0.0, 0.0]
    if center is not None:
        img_size = _get_image_size(img)
839
840
841
        # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center.
        center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, img_size)]

vfdev's avatar
vfdev committed
842
843
844
845
846
847
    # due to current incoherence of rotation angle direction between affine and rotate implementations
    # we need to set -angle.
    matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0])
    return F_t.rotate(img, matrix=matrix, resample=resample, expand=expand, fill=fill)


vfdev's avatar
vfdev committed
848
849
850
851
852
853
854
def affine(
        img: Tensor, angle: float, translate: List[int], scale: float, shear: List[float],
        resample: int = 0, fillcolor: Optional[int] = None
) -> Tensor:
    """Apply affine transformation on the image keeping image center invariant.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
855
856

    Args:
vfdev's avatar
vfdev committed
857
        img (PIL Image or Tensor): image to transform.
858
        angle (float or int): rotation angle in degrees between -180 and 180, clockwise direction.
859
860
        translate (list or tuple of integers): horizontal and vertical translations (post-rotation translation)
        scale (float): overall scale
ptrblck's avatar
ptrblck committed
861
        shear (float or tuple or list): shear angle value in degrees between -180 to 180, clockwise direction.
vfdev's avatar
vfdev committed
862
863
            If a tuple of list is specified, the first value corresponds to a shear parallel to the x axis, while
            the second value corresponds to a shear parallel to the y axis.
864
        resample (``PIL.Image.NEAREST`` or ``PIL.Image.BILINEAR`` or ``PIL.Image.BICUBIC``, optional):
vfdev's avatar
vfdev committed
865
866
867
            An optional resampling filter. See `filters`_ for more information.
            If omitted, or if the image is PIL Image and has mode "1" or "P", it is set to ``PIL.Image.NEAREST``.
            If input is Tensor, only ``PIL.Image.NEAREST`` and ``PIL.Image.BILINEAR`` are supported.
868
869
870
        fillcolor (int): Optional fill color for the area outside the transform in the output image (Pillow>=5.0.0).
            This option is not supported for Tensor input. Fill value for the area outside the transform in the output
            image is always 0.
vfdev's avatar
vfdev committed
871
872
873

    Returns:
        PIL Image or Tensor: Transformed image.
874
    """
vfdev's avatar
vfdev committed
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
    if not isinstance(angle, (int, float)):
        raise TypeError("Argument angle should be int or float")

    if not isinstance(translate, (list, tuple)):
        raise TypeError("Argument translate should be a sequence")

    if len(translate) != 2:
        raise ValueError("Argument translate should be a sequence of length 2")

    if scale <= 0.0:
        raise ValueError("Argument scale should be positive")

    if not isinstance(shear, (numbers.Number, (list, tuple))):
        raise TypeError("Shear should be either a single value or a sequence of two values")

    if isinstance(angle, int):
        angle = float(angle)

    if isinstance(translate, tuple):
        translate = list(translate)

    if isinstance(shear, numbers.Number):
        shear = [shear, 0.0]

    if isinstance(shear, tuple):
        shear = list(shear)

    if len(shear) == 1:
        shear = [shear[0], shear[0]]

    if len(shear) != 2:
        raise ValueError("Shear should be a sequence containing two values. Got {}".format(shear))

    img_size = _get_image_size(img)
    if not isinstance(img, torch.Tensor):
        # center = (img_size[0] * 0.5 + 0.5, img_size[1] * 0.5 + 0.5)
        # it is visually better to estimate the center without 0.5 offset
        # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine
        center = [img_size[0] * 0.5, img_size[1] * 0.5]
        matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear)
915

vfdev's avatar
vfdev committed
916
        return F_pil.affine(img, matrix=matrix, resample=resample, fillcolor=fillcolor)
917

918
919
    translate_f = [1.0 * t for t in translate]
    matrix = _get_inverse_affine_matrix([0.0, 0.0], angle, translate_f, scale, shear)
vfdev's avatar
vfdev committed
920
    return F_t.affine(img, matrix=matrix, resample=resample, fillcolor=fillcolor)
921
922


923
@torch.jit.unused
924
def to_grayscale(img, num_output_channels=1):
925
    """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image.
926
927

    Args:
928
929
        img (PIL Image): PIL Image to be converted to grayscale.
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.
930
931

    Returns:
932
933
934
935
        PIL Image: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel

            if num_output_channels = 3 : returned image is 3 channel with r = g = b
936
    """
937
938
    if isinstance(img, Image.Image):
        return F_pil.to_grayscale(img, num_output_channels)
939

940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
    raise TypeError("Input should be PIL Image")


def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    """Convert RGB image to grayscale version of image.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions

    Note:
        Please, note that this method supports only RGB images as input. For inputs in other color spaces,
        please, consider using meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image.

    Args:
        img (PIL Image or Tensor): RGB Image to be converted to grayscale.
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.

    Returns:
        PIL Image or Tensor: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel

            if num_output_channels = 3 : returned image is 3 channel with r = g = b
    """
    if not isinstance(img, torch.Tensor):
        return F_pil.to_grayscale(img, num_output_channels)

    return F_t.rgb_to_grayscale(img, num_output_channels)
966
967


968
def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor:
969
970
971
972
973
974
975
976
977
    """ Erase the input Tensor Image with given value.

    Args:
        img (Tensor Image): Tensor image of size (C, H, W) to be erased
        i (int): i in (i,j) i.e coordinates of the upper left corner.
        j (int): j in (i,j) i.e coordinates of the upper left corner.
        h (int): Height of the erased region.
        w (int): Width of the erased region.
        v: Erasing value.
Zhun Zhong's avatar
Zhun Zhong committed
978
        inplace(bool, optional): For in-place operations. By default is set False.
979
980
981
982
983
984
985

    Returns:
        Tensor Image: Erased image.
    """
    if not isinstance(img, torch.Tensor):
        raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))

986
987
988
    if not inplace:
        img = img.clone()

vfdev's avatar
vfdev committed
989
    img[..., i:i + h, j:j + w] = v
990
    return img
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor:
    """Performs Gaussian blurring on the img by given kernel.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions

    Args:
        img (PIL Image or Tensor): Image to be blurred
        kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers
            like ``(kx, ky)`` or a single integer for square kernels.
            In torchscript mode kernel_size as single int is not supported, use a tuple or
            list of length 1: ``[ksize, ]``.
        sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a
            sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the
            same sigma in both X/Y directions. If None, then it is computed using
            ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``.
            Default, None. In torchscript mode sigma as single float is
            not supported, use a tuple or list of length 1: ``[sigma, ]``.

    Returns:
        PIL Image or Tensor: Gaussian Blurred version of the image.
    """
    if not isinstance(kernel_size, (int, list, tuple)):
        raise TypeError('kernel_size should be int or a sequence of integers. Got {}'.format(type(kernel_size)))
    if isinstance(kernel_size, int):
        kernel_size = [kernel_size, kernel_size]
    if len(kernel_size) != 2:
        raise ValueError('If kernel_size is a sequence its length should be 2. Got {}'.format(len(kernel_size)))
    for ksize in kernel_size:
        if ksize % 2 == 0 or ksize < 0:
            raise ValueError('kernel_size should have odd and positive integers. Got {}'.format(kernel_size))

    if sigma is None:
        sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size]

    if sigma is not None and not isinstance(sigma, (int, float, list, tuple)):
        raise TypeError('sigma should be either float or sequence of floats. Got {}'.format(type(sigma)))
    if isinstance(sigma, (int, float)):
        sigma = [float(sigma), float(sigma)]
    if isinstance(sigma, (list, tuple)) and len(sigma) == 1:
        sigma = [sigma[0], sigma[0]]
    if len(sigma) != 2:
        raise ValueError('If sigma is a sequence, its length should be 2. Got {}'.format(len(sigma)))
    for s in sigma:
        if s <= 0.:
            raise ValueError('sigma should have positive values. Got {}'.format(sigma))

    t_img = img
    if not isinstance(img, torch.Tensor):
        if not F_pil._is_pil_image(img):
            raise TypeError('img should be PIL Image or Tensor. Got {}'.format(type(img)))

        t_img = to_tensor(img)

    output = F_t.gaussian_blur(t_img, kernel_size, sigma)

    if not isinstance(img, torch.Tensor):
        output = to_pil_image(output)
    return output