functional_tensor.py 45 KB
Newer Older
vfdev's avatar
vfdev committed
1
import warnings
2
from typing import Optional, Tuple
vfdev's avatar
vfdev committed
3

4
import torch
5
from torch import Tensor
6
from torch.nn.functional import grid_sample, conv2d, interpolate, pad as torch_pad
7
from torch.jit.annotations import List, BroadcastingList2
8
9


vfdev's avatar
vfdev committed
10
11
def _is_tensor_a_torch_image(x: Tensor) -> bool:
    return x.ndim >= 2
12
13


vfdev's avatar
vfdev committed
14
def _get_image_size(img: Tensor) -> List[int]:
vfdev's avatar
vfdev committed
15
    """Returns (w, h) of tensor image"""
vfdev's avatar
vfdev committed
16
17
    if _is_tensor_a_torch_image(img):
        return [img.shape[-1], img.shape[-2]]
18
    raise TypeError("Unexpected input type")
vfdev's avatar
vfdev committed
19
20


21
22
23
24
25
26
def _get_image_num_channels(img: Tensor) -> int:
    if img.ndim == 2:
        return 1
    elif img.ndim > 2:
        return img.shape[-3]

27
    raise TypeError("Input ndim should be 2 or more. Got {}".format(img.ndim))
28
29


30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
def _max_value(dtype: torch.dtype) -> float:
    # TODO: replace this method with torch.iinfo when it gets torchscript support.
    # https://github.com/pytorch/pytorch/issues/41492

    a = torch.tensor(2, dtype=dtype)
    signed = 1 if torch.tensor(0, dtype=dtype).is_signed() else 0
    bits = 1
    max_value = torch.tensor(-signed, dtype=torch.long)
    while True:
        next_value = a.pow(bits - signed).sub(1)
        if next_value > max_value:
            max_value = next_value
            bits *= 2
        else:
            return max_value.item()
    return max_value.item()


48
49
50
51
52
53
def _assert_channels(img: Tensor, permitted: List[int]) -> None:
    c = _get_image_num_channels(img)
    if c not in permitted:
        raise TypeError("Input image tensor permitted channel values are {}, but found {}".format(permitted, c))


54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    """PRIVATE METHOD. Convert a tensor image to the given ``dtype`` and scale the values accordingly

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.

    Args:
        image (torch.Tensor): Image to be converted
        dtype (torch.dtype): Desired data type of the output

    Returns:
        (torch.Tensor): Converted image

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """
    if image.dtype == dtype:
        return image

    # TODO: replace with image.dtype.is_floating_point when torchscript supports it
    if torch.empty(0, dtype=image.dtype).is_floating_point():

        # TODO: replace with dtype.is_floating_point when torchscript supports it
        if torch.tensor(0, dtype=dtype).is_floating_point():
            return image.to(dtype)

        # float to int
        if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or (
            image.dtype == torch.float64 and dtype == torch.int64
        ):
            msg = f"The cast from {image.dtype} to {dtype} cannot be performed safely."
            raise RuntimeError(msg)

        # https://github.com/pytorch/vision/pull/2078#issuecomment-612045321
        # For data in the range 0-1, (float * 255).to(uint) is only 255
        # when float is exactly 1.0.
        # `max + 1 - epsilon` provides more evenly distributed mapping of
        # ranges of floats to ints.
        eps = 1e-3
        max_val = _max_value(dtype)
        result = image.mul(max_val + 1.0 - eps)
        return result.to(dtype)
    else:
        input_max = _max_value(image.dtype)

        # int to float
        # TODO: replace with dtype.is_floating_point when torchscript supports it
        if torch.tensor(0, dtype=dtype).is_floating_point():
            image = image.to(dtype)
            return image / input_max

115
116
        output_max = _max_value(dtype)

117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
        # int to int
        if input_max > output_max:
            # factor should be forced to int for torch jit script
            # otherwise factor is a float and image // factor can produce different results
            factor = int((input_max + 1) // (output_max + 1))
            image = image // factor
            return image.to(dtype)
        else:
            # factor should be forced to int for torch jit script
            # otherwise factor is a float and image * factor can produce different results
            factor = int((output_max + 1) // (input_max + 1))
            image = image.to(dtype)
            return image * factor


vfdev's avatar
vfdev committed
132
def vflip(img: Tensor) -> Tensor:
133
134
135
136
137
138
    """PRIVATE METHOD. Vertically flip the given the Image Tensor.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
139
140

    Args:
141
        img (Tensor): Image Tensor to be flipped in the form [..., C, H, W].
142
143
144
145

    Returns:
        Tensor:  Vertically flipped image Tensor.
    """
146
    if not _is_tensor_a_torch_image(img):
147
148
        raise TypeError('tensor is not a torch image.')

149
    return img.flip(-2)
150
151


vfdev's avatar
vfdev committed
152
def hflip(img: Tensor) -> Tensor:
153
154
155
156
157
158
    """PRIVATE METHOD. Horizontally flip the given the Image Tensor.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
159
160

    Args:
161
        img (Tensor): Image Tensor to be flipped in the form [..., C, H, W].
162
163
164
165

    Returns:
        Tensor:  Horizontally flipped image Tensor.
    """
166
    if not _is_tensor_a_torch_image(img):
167
168
        raise TypeError('tensor is not a torch image.')

169
    return img.flip(-1)
ekka's avatar
ekka committed
170
171


vfdev's avatar
vfdev committed
172
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
173
174
175
176
177
178
    """PRIVATE METHOD. Crop the given Image Tensor.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
179

ekka's avatar
ekka committed
180
    Args:
vfdev's avatar
vfdev committed
181
        img (Tensor): Image to be cropped in the form [..., H, W]. (0,0) denotes the top left corner of the image.
ekka's avatar
ekka committed
182
183
184
185
        top (int): Vertical component of the top left corner of the crop box.
        left (int): Horizontal component of the top left corner of the crop box.
        height (int): Height of the crop box.
        width (int): Width of the crop box.
186

ekka's avatar
ekka committed
187
188
189
    Returns:
        Tensor: Cropped image.
    """
190
    if not _is_tensor_a_torch_image(img):
vfdev's avatar
vfdev committed
191
        raise TypeError("tensor is not a torch image.")
ekka's avatar
ekka committed
192
193

    return img[..., top:top + height, left:left + width]
194
195


196
def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
197
198
199
200
201
202
203
    """PRIVATE METHOD. Convert the given RGB Image Tensor to Grayscale.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.

204
205
206
207
208
    For RGB to Grayscale conversion, ITU-R 601-2 luma transform is performed which
    is L = R * 0.2989 + G * 0.5870 + B * 0.1140

    Args:
        img (Tensor): Image to be converted to Grayscale in the form [C, H, W].
209
        num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1.
210
211

    Returns:
212
213
214
215
        Tensor: Grayscale version of the image.
            if num_output_channels = 1 : returned image is single channel

            if num_output_channels = 3 : returned image is 3 channel with r = g = b
216
217

    """
218
219
    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))
220
    _assert_channels(img, [3])
221
222
223
224
225
226
227
228
229
230
231
232

    if num_output_channels not in (1, 3):
        raise ValueError('num_output_channels should be either 1 or 3')

    r, g, b = img.unbind(dim=-3)
    # This implementation closely follows the TF one:
    # https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/ops/image_ops_impl.py#L2105-L2138
    l_img = (0.2989 * r + 0.587 * g + 0.114 * b).to(img.dtype)
    l_img = l_img.unsqueeze(dim=-3)

    if num_output_channels == 3:
        return l_img.expand(img.shape)
233

234
    return l_img
235
236


vfdev's avatar
vfdev committed
237
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
238
    """PRIVATE METHOD. Adjust brightness of a Grayscale or RGB image.
239
240
241
242
243

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
244
245
246
247
248
249
250
251
252
253

    Args:
        img (Tensor): Image to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
        Tensor: Brightness adjusted image.
    """
254
255
256
    if brightness_factor < 0:
        raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor))

257
    if not _is_tensor_a_torch_image(img):
258
259
        raise TypeError('tensor is not a torch image.')

260
261
    _assert_channels(img, [1, 3])

262
    return _blend(img, torch.zeros_like(img), brightness_factor)
263
264


vfdev's avatar
vfdev committed
265
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
266
267
268
269
270
271
    """PRIVATE METHOD. Adjust contrast of an RGB image.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
272
273
274
275
276
277
278
279
280
281

    Args:
        img (Tensor): Image to be adjusted.
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
        Tensor: Contrast adjusted image.
    """
282
283
284
    if contrast_factor < 0:
        raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor))

285
    if not _is_tensor_a_torch_image(img):
286
287
        raise TypeError('tensor is not a torch image.')

288
289
    _assert_channels(img, [3])

290
291
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    mean = torch.mean(rgb_to_grayscale(img).to(dtype), dim=(-3, -2, -1), keepdim=True)
292
293
294
295

    return _blend(img, mean, contrast_factor)


296
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
297
    """PRIVATE METHOD. Adjust hue of an RGB image.
298
299
300
301
302

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

    See `Hue`_ for more details.

    .. _Hue: https://en.wikipedia.org/wiki/Hue

    Args:
        img (Tensor): Image to be adjusted. Image type is either uint8 or float.
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
         Tensor: Hue adjusted image.
    """
326
    if not (-0.5 <= hue_factor <= 0.5):
327
328
        raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))

329
    if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):
330
        raise TypeError('Input img should be Tensor image')
331

332
333
    _assert_channels(img, [3])

334
335
336
337
338
    orig_dtype = img.dtype
    if img.dtype == torch.uint8:
        img = img.to(dtype=torch.float32) / 255.0

    img = _rgb2hsv(img)
339
    h, s, v = img.unbind(dim=-3)
340
    h = (h + hue_factor) % 1.0
341
    img = torch.stack((h, s, v), dim=-3)
342
343
344
345
346
347
348
349
    img_hue_adj = _hsv2rgb(img)

    if orig_dtype == torch.uint8:
        img_hue_adj = (img_hue_adj * 255.0).to(dtype=orig_dtype)

    return img_hue_adj


vfdev's avatar
vfdev committed
350
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
351
352
353
354
355
356
    """PRIVATE METHOD. Adjust color saturation of an RGB image.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
357
358
359

    Args:
        img (Tensor): Image to be adjusted.
360
361
362
        saturation_factor (float):  How much to adjust the saturation. Can be any
            non negative number. 0 gives a black and white image, 1 gives the
            original image while 2 enhances the saturation by a factor of 2.
363
364
365
366

    Returns:
        Tensor: Saturation adjusted image.
    """
367
368
369
    if saturation_factor < 0:
        raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor))

370
    if not _is_tensor_a_torch_image(img):
371
372
        raise TypeError('tensor is not a torch image.')

373
374
    _assert_channels(img, [3])

375
    return _blend(img, rgb_to_grayscale(img), saturation_factor)
376
377


378
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
379
    r"""PRIVATE METHOD. Adjust gamma of a Grayscale or RGB image.
380
381
382
383
384

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

    .. math::
        `I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma}`

    See `Gamma Correction`_ for more details.

    .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction

    Args:
        img (Tensor): Tensor of RBG values to be adjusted.
        gamma (float): Non negative real number, same as :math:`\gamma` in the equation.
            gamma larger than 1 make the shadows darker,
            while gamma smaller than 1 make dark regions lighter.
        gain (float): The constant multiplier.
    """

    if not isinstance(img, torch.Tensor):
405
        raise TypeError('Input img should be a Tensor.')
406

407
408
    _assert_channels(img, [1, 3])

409
410
411
412
413
414
    if gamma < 0:
        raise ValueError('Gamma should be a non-negative real number')

    result = img
    dtype = img.dtype
    if not torch.is_floating_point(img):
415
        result = convert_image_dtype(result, torch.float32)
416
417
418

    result = (gain * result ** gamma).clamp(0, 1)

419
    result = convert_image_dtype(result, dtype)
420
421
422
423
    result = result.to(dtype)
    return result


vfdev's avatar
vfdev committed
424
def center_crop(img: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
425
426
    """DEPRECATED. Crop the Image Tensor and resize it to desired size.

427
428
429
430
431
    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.

432
433
434
435
    .. warning::

        This method is deprecated and will be removed in future releases.
        Please, use ``F.center_crop`` instead.
436
437

    Args:
vfdev's avatar
vfdev committed
438
        img (Tensor): Image to be cropped.
439
440
441
442
443
444
        output_size (sequence or int): (height, width) of the crop box. If int,
                it is used for both directions

    Returns:
            Tensor: Cropped image.
    """
445
446
447
448
449
    warnings.warn(
        "This method is deprecated and will be removed in future releases. "
        "Please, use ``F.center_crop`` instead."
    )

450
    if not _is_tensor_a_torch_image(img):
451
452
453
454
        raise TypeError('tensor is not a torch image.')

    _, image_width, image_height = img.size()
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
455
456
457
458
459
460
461
462
    # crop_top = int(round((image_height - crop_height) / 2.))
    # Result can be different between python func and scripted func
    # Temporary workaround:
    crop_top = int((image_height - crop_height + 1) * 0.5)
    # crop_left = int(round((image_width - crop_width) / 2.))
    # Result can be different between python func and scripted func
    # Temporary workaround:
    crop_left = int((image_width - crop_width + 1) * 0.5)
463
464
465
466

    return crop(img, crop_top, crop_left, crop_height, crop_width)


vfdev's avatar
vfdev committed
467
def five_crop(img: Tensor, size: BroadcastingList2[int]) -> List[Tensor]:
468
469
    """DEPRECATED. Crop the given Image Tensor into four corners and the central crop.

470
471
472
473
474
    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.

475
476
477
478
479
    .. warning::

        This method is deprecated and will be removed in future releases.
        Please, use ``F.five_crop`` instead.

480
    .. Note::
481

482
        This transform returns a List of Tensors and there may be a
483
484
485
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
486
487
488
489
        img (Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
            made.
490
491

    Returns:
492
       List: List (tl, tr, bl, br, center)
493
494
                Corresponding top left, top right, bottom left, bottom right and center crop.
    """
495
496
497
498
499
    warnings.warn(
        "This method is deprecated and will be removed in future releases. "
        "Please, use ``F.five_crop`` instead."
    )

500
    if not _is_tensor_a_torch_image(img):
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
        raise TypeError('tensor is not a torch image.')

    assert len(size) == 2, "Please provide only two dimensions (h, w) for size."

    _, image_width, image_height = img.size()
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

    tl = crop(img, 0, 0, crop_width, crop_height)
    tr = crop(img, image_width - crop_width, 0, image_width, crop_height)
    bl = crop(img, 0, image_height - crop_height, crop_width, image_height)
    br = crop(img, image_width - crop_width, image_height - crop_height, image_width, image_height)
    center = center_crop(img, (crop_height, crop_width))

517
    return [tl, tr, bl, br, center]
518
519


vfdev's avatar
vfdev committed
520
def ten_crop(img: Tensor, size: BroadcastingList2[int], vertical_flip: bool = False) -> List[Tensor]:
521
    """DEPRECATED. Crop the given Image Tensor into four corners and the central crop plus the
522
        flipped version of these (horizontal flipping is used by default).
vfdev's avatar
vfdev committed
523

524
525
526
527
528
    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.

529
530
531
532
533
    .. warning::

        This method is deprecated and will be removed in future releases.
        Please, use ``F.ten_crop`` instead.

534
    .. Note::
535

536
        This transform returns a List of images and there may be a
537
538
539
        mismatch in the number of inputs and targets your ``Dataset`` returns.

    Args:
vfdev's avatar
vfdev committed
540
541
        img (Tensor): Image to be cropped.
        size (sequence or int): Desired output size of the crop. If size is an
542
543
            int instead of sequence like (h, w), a square crop (size, size) is
            made.
vfdev's avatar
vfdev committed
544
        vertical_flip (bool): Use vertical flipping instead of horizontal
545
546

    Returns:
547
       List: List (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip)
548
549
550
                Corresponding top left, top right, bottom left, bottom right and center crop
                and same for the flipped image's tensor.
    """
551
552
553
554
555
    warnings.warn(
        "This method is deprecated and will be removed in future releases. "
        "Please, use ``F.ten_crop`` instead."
    )

556
    if not _is_tensor_a_torch_image(img):
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
        raise TypeError('tensor is not a torch image.')

    assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)

    return first_five + second_five


vfdev's avatar
vfdev committed
572
def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor:
573
574
    bound = 1.0 if img1.is_floating_point() else 255.0
    return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype)
575
576
577


def _rgb2hsv(img):
578
    r, g, b = img.unbind(dim=-3)
579

580
581
    # Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/
    # src/libImaging/Convert.c#L330
582
583
    maxc = torch.max(img, dim=-3).values
    minc = torch.min(img, dim=-3).values
584
585
586
587
588
589
590
591
592
593

    # The algorithm erases S and H channel where `maxc = minc`. This avoids NaN
    # from happening in the results, because
    #   + S channel has division by `maxc`, which is zero only if `maxc = minc`
    #   + H channel has division by `(maxc - minc)`.
    #
    # Instead of overwriting NaN afterwards, we just prevent it from occuring so
    # we don't need to deal with it in case we save the NaN in a buffer in
    # backprop, if it is ever supported, but it doesn't hurt to do so.
    eqc = maxc == minc
594
595

    cr = maxc - minc
596
    # Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine.
597
598
    ones = torch.ones_like(maxc)
    s = cr / torch.where(eqc, ones, maxc)
599
600
601
602
    # Note that `eqc => maxc = minc = r = g = b`. So the following calculation
    # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it
    # would not matter what values `rc`, `gc`, and `bc` have here, and thus
    # replacing denominator with 1 when `eqc` is fine.
603
    cr_divisor = torch.where(eqc, ones, cr)
604
605
606
    rc = (maxc - r) / cr_divisor
    gc = (maxc - g) / cr_divisor
    bc = (maxc - b) / cr_divisor
607
608
609
610
611
612

    hr = (maxc == r) * (bc - gc)
    hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
    hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
    h = (hr + hg + hb)
    h = torch.fmod((h / 6.0 + 1.0), 1.0)
613
    return torch.stack((h, s, maxc), dim=-3)
614
615
616


def _hsv2rgb(img):
617
    h, s, v = img.unbind(dim=-3)
618
619
620
621
622
623
624
625
626
    i = torch.floor(h * 6.0)
    f = (h * 6.0) - i
    i = i.to(dtype=torch.int32)

    p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
    q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
    t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
    i = i % 6

627
    mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1)
628

629
630
631
632
    a1 = torch.stack((v, q, p, p, t, v), dim=-3)
    a2 = torch.stack((t, v, v, q, p, p), dim=-3)
    a3 = torch.stack((p, p, t, v, v, q), dim=-3)
    a4 = torch.stack((a1, a2, a3), dim=-4)
633

634
    return torch.einsum("...ijk, ...xijk -> ...xjk", mask.to(dtype=img.dtype), a4)
635
636


637
638
def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor:
    # padding is left, right, top, bottom
639
640
641
642
643
644
645

    # crop if needed
    if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0:
        crop_left, crop_right, crop_top, crop_bottom = [-min(x, 0) for x in padding]
        img = img[..., crop_top:img.shape[-2] - crop_bottom, crop_left:img.shape[-1] - crop_right]
        padding = [max(x, 0) for x in padding]

646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
    in_sizes = img.size()

    x_indices = [i for i in range(in_sizes[-1])]  # [0, 1, 2, 3, ...]
    left_indices = [i for i in range(padding[0] - 1, -1, -1)]  # e.g. [3, 2, 1, 0]
    right_indices = [-(i + 1) for i in range(padding[1])]  # e.g. [-1, -2, -3]
    x_indices = torch.tensor(left_indices + x_indices + right_indices)

    y_indices = [i for i in range(in_sizes[-2])]
    top_indices = [i for i in range(padding[2] - 1, -1, -1)]
    bottom_indices = [-(i + 1) for i in range(padding[3])]
    y_indices = torch.tensor(top_indices + y_indices + bottom_indices)

    ndim = img.ndim
    if ndim == 3:
        return img[:, y_indices[:, None], x_indices[None, :]]
    elif ndim == 4:
        return img[:, :, y_indices[:, None], x_indices[None, :]]
    else:
        raise RuntimeError("Symmetric padding of N-D tensors are not supported yet")


667
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
668
669
670
671
672
673
    r"""PRIVATE METHOD. Pad the given Tensor Image on all sides with specified padding mode and fill value.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
674
675
676
677
678
679
680
681
682
683
684

    Args:
        img (Tensor): Image to be padded.
        padding (int or tuple or list): Padding on each border. If a single int is provided this
            is used to pad all borders. If a tuple or list of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple or list of length 4 is provided
            this is the padding for the left, top, right and bottom borders
            respectively. In torchscript mode padding as single int is not supported, use a tuple or
            list of length 1: ``[padding, ]``.
        fill (int): Pixel fill value for constant fill. Default is 0.
            This value is only used when the padding_mode is constant
vfdev's avatar
vfdev committed
685
686
        padding_mode (str): Type of padding. Should be: constant, edge or reflect. Default is constant.
            Mode symmetric is not yet supported for Tensor inputs.
687
688
689

            - constant: pads with a constant value, this value is specified with fill

690
691
692
693
694
695
696
            - edge: pads with the last value on the edge of the image

            - reflect: pads with reflection of image (without repeating the last value on the edge)

                       padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
                       will result in [3, 2, 1, 2, 3, 4, 3, 2]

697
698
699
700
701
            - symmetric: pads with reflection of image (repeating the last value on the edge)

                         padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
                         will result in [2, 1, 1, 2, 3, 4, 4, 3]

702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
    Returns:
        Tensor: Padded image.
    """
    if not _is_tensor_a_torch_image(img):
        raise TypeError("tensor is not a torch image.")

    if not isinstance(padding, (int, tuple, list)):
        raise TypeError("Got inappropriate padding arg")
    if not isinstance(fill, (int, float)):
        raise TypeError("Got inappropriate fill arg")
    if not isinstance(padding_mode, str):
        raise TypeError("Got inappropriate padding_mode arg")

    if isinstance(padding, tuple):
        padding = list(padding)

    if isinstance(padding, list) and len(padding) not in [1, 2, 4]:
        raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
                         "{} element tuple".format(len(padding)))

722
723
    if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
        raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
724
725
726

    if isinstance(padding, int):
        if torch.jit.is_scripting():
vfdev's avatar
vfdev committed
727
            # This maybe unreachable
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
            raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]")
        pad_left = pad_right = pad_top = pad_bottom = padding
    elif len(padding) == 1:
        pad_left = pad_right = pad_top = pad_bottom = padding[0]
    elif len(padding) == 2:
        pad_left = pad_right = padding[0]
        pad_top = pad_bottom = padding[1]
    else:
        pad_left = padding[0]
        pad_top = padding[1]
        pad_right = padding[2]
        pad_bottom = padding[3]

    p = [pad_left, pad_right, pad_top, pad_bottom]

743
744
745
    if padding_mode == "edge":
        # remap padding_mode str
        padding_mode = "replicate"
746
747
748
    elif padding_mode == "symmetric":
        # route to another implementation
        return _pad_symmetric(img, p)
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763

    need_squeeze = False
    if img.ndim < 4:
        img = img.unsqueeze(dim=0)
        need_squeeze = True

    out_dtype = img.dtype
    need_cast = False
    if (padding_mode != "constant") and img.dtype not in (torch.float32, torch.float64):
        # Here we temporary cast input tensor to float
        # until pytorch issue is resolved :
        # https://github.com/pytorch/pytorch/issues/40763
        need_cast = True
        img = img.to(torch.float32)

764
    img = torch_pad(img, p, mode=padding_mode, value=float(fill))
765
766
767
768
769
770
771

    if need_squeeze:
        img = img.squeeze(dim=0)

    if need_cast:
        img = img.to(out_dtype)

772
    return img
vfdev's avatar
vfdev committed
773
774


775
def resize(img: Tensor, size: List[int], interpolation: str = "bilinear") -> Tensor:
776
777
778
779
780
781
    r"""PRIVATE METHOD. Resize the input Tensor to the given size.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
vfdev's avatar
vfdev committed
782
783
784
785
786
787
788
789
790
791

    Args:
        img (Tensor): Image to be resized.
        size (int or tuple or list): Desired output size. If size is a sequence like
            (h, w), the output size will be matched to this. If size is an int,
            the smaller edge of the image will be matched to this number maintaining
            the aspect ratio. i.e, if height > width, then image will be rescaled to
            :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`.
            In torchscript mode padding as a single int is not supported, use a tuple or
            list of length 1: ``[size, ]``.
792
793
        interpolation (str): Desired interpolation. Default is "bilinear". Other supported values:
            "nearest" and "bicubic".
vfdev's avatar
vfdev committed
794
795
796
797
798
799
800
801
802

    Returns:
        Tensor: Resized image.
    """
    if not _is_tensor_a_torch_image(img):
        raise TypeError("tensor is not a torch image.")

    if not isinstance(size, (int, tuple, list)):
        raise TypeError("Got inappropriate size arg")
803
    if not isinstance(interpolation, str):
vfdev's avatar
vfdev committed
804
805
        raise TypeError("Got inappropriate interpolation arg")

806
    if interpolation not in ["nearest", "bilinear", "bicubic"]:
vfdev's avatar
vfdev committed
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
        raise ValueError("This interpolation mode is unsupported with Tensor input")

    if isinstance(size, tuple):
        size = list(size)

    if isinstance(size, list) and len(size) not in [1, 2]:
        raise ValueError("Size must be an int or a 1 or 2 element tuple/list, not a "
                         "{} element tuple/list".format(len(size)))

    w, h = _get_image_size(img)

    if isinstance(size, int):
        size_w, size_h = size, size
    elif len(size) < 2:
        size_w, size_h = size[0], size[0]
    else:
823
        size_w, size_h = size[1], size[0]  # Convention (h, w)
vfdev's avatar
vfdev committed
824
825
826
827
828
829
830

    if isinstance(size, int) or len(size) < 2:
        if w < h:
            size_h = int(size_w * h / w)
        else:
            size_w = int(size_h * w / h)

831
832
        if (w <= h and w == size_w) or (h <= w and h == size_h):
            return img
vfdev's avatar
vfdev committed
833

vfdev's avatar
vfdev committed
834
    img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [torch.float32, torch.float64])
vfdev's avatar
vfdev committed
835
836

    # Define align_corners to avoid warnings
837
    align_corners = False if interpolation in ["bilinear", "bicubic"] else None
vfdev's avatar
vfdev committed
838

839
    img = interpolate(img, size=[size_h, size_w], mode=interpolation, align_corners=align_corners)
vfdev's avatar
vfdev committed
840

841
    if interpolation == "bicubic" and out_dtype == torch.uint8:
vfdev's avatar
vfdev committed
842
        img = img.clamp(min=0, max=255)
vfdev's avatar
vfdev committed
843

vfdev's avatar
vfdev committed
844
    img = _cast_squeeze_out(img, need_cast=need_cast, need_squeeze=need_squeeze, out_dtype=out_dtype)
vfdev's avatar
vfdev committed
845
846

    return img
vfdev's avatar
vfdev committed
847
848


vfdev's avatar
vfdev committed
849
def _assert_grid_transform_inputs(
850
851
        img: Tensor,
        matrix: Optional[List[float]],
852
        interpolation: str,
853
        fill: Optional[List[float]],
854
        supported_interpolation_modes: List[str],
855
        coeffs: Optional[List[float]] = None,
vfdev's avatar
vfdev committed
856
857
):
    if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):
858
        raise TypeError("Input img should be Tensor Image")
vfdev's avatar
vfdev committed
859

860
    if matrix is not None and not isinstance(matrix, list):
861
        raise TypeError("Argument matrix should be a list")
vfdev's avatar
vfdev committed
862

863
    if matrix is not None and len(matrix) != 6:
vfdev's avatar
vfdev committed
864
        raise ValueError("Argument matrix should have 6 float values")
vfdev's avatar
vfdev committed
865

866
867
868
    if coeffs is not None and len(coeffs) != 8:
        raise ValueError("Argument coeffs should have 8 float values")

869
870
871
872
873
874
875
876
877
    if fill is not None and not isinstance(fill, (int, float, tuple, list)):
        warnings.warn("Argument fill should be either int, float, tuple or list")

    # Check fill
    num_channels = _get_image_num_channels(img)
    if isinstance(fill, (tuple, list)) and (len(fill) > 1 and len(fill) != num_channels):
        msg = ("The number of elements in 'fill' cannot broadcast to match the number of "
               "channels of the image ({} != {})")
        raise ValueError(msg.format(len(fill), num_channels))
vfdev's avatar
vfdev committed
878

879
880
    if interpolation not in supported_interpolation_modes:
        raise ValueError("Interpolation mode '{}' is unsupported with Tensor input".format(interpolation))
vfdev's avatar
vfdev committed
881
882


vfdev's avatar
vfdev committed
883
def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor, bool, bool, torch.dtype]:
vfdev's avatar
vfdev committed
884
    need_squeeze = False
885
    # make image NCHW
vfdev's avatar
vfdev committed
886
887
888
889
890
891
    if img.ndim < 4:
        img = img.unsqueeze(dim=0)
        need_squeeze = True

    out_dtype = img.dtype
    need_cast = False
vfdev's avatar
vfdev committed
892
    if out_dtype not in req_dtypes:
vfdev's avatar
vfdev committed
893
        need_cast = True
vfdev's avatar
vfdev committed
894
        req_dtype = req_dtypes[0]
895
896
        img = img.to(req_dtype)
    return img, need_cast, need_squeeze, out_dtype
vfdev's avatar
vfdev committed
897
898


899
def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype):
vfdev's avatar
vfdev committed
900
901
902
903
    if need_squeeze:
        img = img.squeeze(dim=0)

    if need_cast:
vfdev's avatar
vfdev committed
904
905
906
907
        if out_dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
            # it is better to round before cast
            img = torch.round(img)
        img = img.to(out_dtype)
vfdev's avatar
vfdev committed
908
909

    return img
vfdev's avatar
vfdev committed
910
911


912
def _apply_grid_transform(img: Tensor, grid: Tensor, mode: str, fill: Optional[List[float]]) -> Tensor:
913

vfdev's avatar
vfdev committed
914
    img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [grid.dtype, ])
915
916
917
918

    if img.shape[0] > 1:
        # Apply same grid to a batch of images
        grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3])
919
920
921
922
923
924

    # Append a dummy mask for customized fill colors, should be faster than grid_sample() twice
    if fill is not None:
        dummy = torch.ones((img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype, device=img.device)
        img = torch.cat((img, dummy), dim=1)

925
926
    img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False)

927
928
929
930
931
932
933
934
935
936
937
938
939
    # Fill with required color
    if fill is not None:
        mask = img[:, -1:, :, :]  # N * 1 * H * W
        img = img[:, :-1, :, :]  # N * C * H * W
        mask = mask.expand_as(img)
        len_fill = len(fill) if isinstance(fill, (tuple, list)) else 1
        fill_img = torch.tensor(fill, dtype=img.dtype, device=img.device).view(1, len_fill, 1, 1).expand_as(img)
        if mode == 'nearest':
            mask = mask < 0.5
            img[mask] = fill_img[mask]
        else:  # 'bilinear'
            img = img * mask + (1.0 - mask) * fill_img

940
941
942
943
    img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
    return img


944
945
946
947
948
949
950
951
952
953
def _gen_affine_grid(
        theta: Tensor, w: int, h: int, ow: int, oh: int,
) -> Tensor:
    # https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/
    # AffineGridGenerator.cpp#L18
    # Difference with AffineGridGenerator is that:
    # 1) we normalize grid values after applying theta
    # 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate

    d = 0.5
954
    base_grid = torch.empty(1, oh, ow, 3, dtype=theta.dtype, device=theta.device)
955
956
957
958
    x_grid = torch.linspace(-ow * 0.5 + d, ow * 0.5 + d - 1, steps=ow, device=theta.device)
    base_grid[..., 0].copy_(x_grid)
    y_grid = torch.linspace(-oh * 0.5 + d, oh * 0.5 + d - 1, steps=oh, device=theta.device).unsqueeze_(-1)
    base_grid[..., 1].copy_(y_grid)
959
960
    base_grid[..., 2].fill_(1)

961
962
    rescaled_theta = theta.transpose(1, 2) / torch.tensor([0.5 * w, 0.5 * h], dtype=theta.dtype, device=theta.device)
    output_grid = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta)
963
964
965
    return output_grid.view(1, oh, ow, 2)


vfdev's avatar
vfdev committed
966
def affine(
967
        img: Tensor, matrix: List[float], interpolation: str = "nearest", fill: Optional[List[float]] = None
vfdev's avatar
vfdev committed
968
) -> Tensor:
969
970
971
972
973
974
    """PRIVATE METHOD. Apply affine transformation on the Tensor image keeping image center invariant.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
vfdev's avatar
vfdev committed
975
976
977
978

    Args:
        img (Tensor): image to be rotated.
        matrix (list of floats): list of 6 float values representing inverse matrix for affine transformation.
979
        interpolation (str): An optional resampling filter. Default is "nearest". Other supported values: "bilinear".
980
981
        fill (sequence or int or float, optional): Optional fill value, default None.
            If None, fill with 0.
vfdev's avatar
vfdev committed
982
983
984
985

    Returns:
        Tensor: Transformed image.
    """
986
    _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"])
vfdev's avatar
vfdev committed
987

988
989
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
vfdev's avatar
vfdev committed
990
    shape = img.shape
991
    # grid will be generated on the same device as theta and img
992
    grid = _gen_affine_grid(theta, w=shape[-1], h=shape[-2], ow=shape[-1], oh=shape[-2])
993
    return _apply_grid_transform(img, grid, interpolation, fill=fill)
vfdev's avatar
vfdev committed
994
995


996
def _compute_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]:
vfdev's avatar
vfdev committed
997

998
999
1000
    # Inspired of PIL implementation:
    # https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054

vfdev's avatar
vfdev committed
1001
1002
    # pts are Top-Left, Top-Right, Bottom-Left, Bottom-Right points.
    pts = torch.tensor([
1003
1004
1005
1006
        [-0.5 * w, -0.5 * h, 1.0],
        [-0.5 * w, 0.5 * h, 1.0],
        [0.5 * w, 0.5 * h, 1.0],
        [0.5 * w, -0.5 * h, 1.0],
vfdev's avatar
vfdev committed
1007
    ])
1008
    theta = torch.tensor(matrix, dtype=torch.float).reshape(1, 2, 3)
1009
    new_pts = pts.view(1, 4, 3).bmm(theta.transpose(1, 2)).view(4, 2)
vfdev's avatar
vfdev committed
1010
1011
1012
    min_vals, _ = new_pts.min(dim=0)
    max_vals, _ = new_pts.max(dim=0)

1013
1014
1015
1016
1017
1018
    # Truncate precision to 1e-4 to avoid ceil of Xe-15 to 1.0
    tol = 1e-4
    cmax = torch.ceil((max_vals / tol).trunc_() * tol)
    cmin = torch.floor((min_vals / tol).trunc_() * tol)
    size = cmax - cmin
    return int(size[0]), int(size[1])
vfdev's avatar
vfdev committed
1019
1020
1021


def rotate(
1022
    img: Tensor, matrix: List[float], interpolation: str = "nearest",
1023
    expand: bool = False, fill: Optional[List[float]] = None
vfdev's avatar
vfdev committed
1024
) -> Tensor:
1025
1026
1027
1028
1029
1030
    """PRIVATE METHOD. Rotate the Tensor image by angle.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
vfdev's avatar
vfdev committed
1031
1032
1033
1034

    Args:
        img (Tensor): image to be rotated.
        matrix (list of floats): list of 6 float values representing inverse matrix for rotation transformation.
1035
            Translation part (``matrix[2]`` and ``matrix[5]``) should be in pixel coordinates.
1036
        interpolation (str): An optional resampling filter. Default is "nearest". Other supported values: "bilinear".
vfdev's avatar
vfdev committed
1037
1038
1039
1040
        expand (bool, optional): Optional expansion flag.
            If true, expands the output image to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
1041
1042
        fill (sequence or int or float, optional): Optional fill value, default None.
            If None, fill with 0.
vfdev's avatar
vfdev committed
1043
1044
1045
1046
1047
1048
1049

    Returns:
        Tensor: Rotated image.

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

    """
1050
    _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"])
1051
    w, h = img.shape[-1], img.shape[-2]
1052
    ow, oh = _compute_output_size(matrix, w, h) if expand else (w, h)
1053
1054
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
1055
    # grid will be generated on the same device as theta and img
1056
    grid = _gen_affine_grid(theta, w=w, h=h, ow=ow, oh=oh)
1057
1058

    return _apply_grid_transform(img, grid, interpolation, fill=fill)
1059
1060


1061
def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device):
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
    # https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/
    # src/libImaging/Geometry.c#L394

    #
    # x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
    # y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1)
    #
    theta1 = torch.tensor([[
        [coeffs[0], coeffs[1], coeffs[2]],
        [coeffs[3], coeffs[4], coeffs[5]]
1072
    ]], dtype=dtype, device=device)
1073
1074
1075
    theta2 = torch.tensor([[
        [coeffs[6], coeffs[7], 1.0],
        [coeffs[6], coeffs[7], 1.0]
1076
    ]], dtype=dtype, device=device)
1077
1078

    d = 0.5
1079
    base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device)
1080
1081
1082
1083
    x_grid = torch.linspace(d, ow * 1.0 + d - 1.0, steps=ow, device=device)
    base_grid[..., 0].copy_(x_grid)
    y_grid = torch.linspace(d, oh * 1.0 + d - 1.0, steps=oh, device=device).unsqueeze_(-1)
    base_grid[..., 1].copy_(y_grid)
1084
1085
    base_grid[..., 2].fill_(1)

1086
    rescaled_theta1 = theta1.transpose(1, 2) / torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device)
1087
    output_grid1 = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta1)
1088
1089
1090
1091
1092
1093
1094
    output_grid2 = base_grid.view(1, oh * ow, 3).bmm(theta2.transpose(1, 2))

    output_grid = output_grid1 / output_grid2 - 1.0
    return output_grid.view(1, oh, ow, 2)


def perspective(
1095
    img: Tensor, perspective_coeffs: List[float], interpolation: str = "bilinear", fill: Optional[List[float]] = None
1096
) -> Tensor:
1097
1098
1099
1100
1101
1102
    """PRIVATE METHOD. Perform perspective transform of the given Tensor image.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.
1103
1104
1105
1106

    Args:
        img (Tensor): Image to be transformed.
        perspective_coeffs (list of float): perspective transformation coefficients.
1107
        interpolation (str): Interpolation type. Default, "bilinear".
1108
1109
        fill (sequence or int or float, optional): Optional fill value, default None.
            If None, fill with 0.
1110
1111
1112
1113
1114

    Returns:
        Tensor: transformed image.
    """
    if not (isinstance(img, torch.Tensor) and _is_tensor_a_torch_image(img)):
1115
        raise TypeError('Input img should be Tensor Image')
1116
1117
1118
1119

    _assert_grid_transform_inputs(
        img,
        matrix=None,
1120
1121
1122
        interpolation=interpolation,
        fill=fill,
        supported_interpolation_modes=["nearest", "bilinear"],
1123
1124
1125
1126
        coeffs=perspective_coeffs
    )

    ow, oh = img.shape[-1], img.shape[-2]
1127
1128
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=img.device)
1129
    return _apply_grid_transform(img, grid, interpolation, fill=fill)
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173


def _get_gaussian_kernel1d(kernel_size: int, sigma: float) -> Tensor:
    ksize_half = (kernel_size - 1) * 0.5

    x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
    pdf = torch.exp(-0.5 * (x / sigma).pow(2))
    kernel1d = pdf / pdf.sum()

    return kernel1d


def _get_gaussian_kernel2d(
        kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device
) -> Tensor:
    kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0]).to(device, dtype=dtype)
    kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1]).to(device, dtype=dtype)
    kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :])
    return kernel2d


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor:
    """PRIVATE METHOD. Performs Gaussian blurring on the img by given kernel.

    .. warning::

        Module ``transforms.functional_tensor`` is private and should not be used in user application.
        Please, consider instead using methods from `transforms.functional` module.

    Args:
        img (Tensor): Image to be blurred
        kernel_size (sequence of int or int): Kernel size of the Gaussian kernel ``(kx, ky)``.
        sigma (sequence of float or float, optional): Standard deviation of the Gaussian kernel ``(sx, sy)``.

    Returns:
        Tensor: An image that is blurred using gaussian kernel of given parameters
    """
    if not (isinstance(img, torch.Tensor) or _is_tensor_a_torch_image(img)):
        raise TypeError('img should be Tensor Image. Got {}'.format(type(img)))

    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device)
    kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])

vfdev's avatar
vfdev committed
1174
    img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype, ])
1175
1176
1177
1178
1179
1180
1181
1182

    # padding = (left, right, top, bottom)
    padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2]
    img = torch_pad(img, padding, mode="reflect")
    img = conv2d(img, kernel, groups=img.shape[-3])

    img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
    return img