functional_tensor.py 32.1 KB
Newer Older
vfdev's avatar
vfdev committed
1
2
import warnings

3
import torch
4
from torch import Tensor
5
from torch.nn.functional import grid_sample, conv2d, interpolate, pad as torch_pad
6
7
from torch.jit.annotations import BroadcastingList2
from typing import Optional, Tuple, List
8
9


vfdev's avatar
vfdev committed
10
11
def _is_tensor_a_torch_image(x: Tensor) -> bool:
    return x.ndim >= 2
12
13


14
15
16
17
18
def _assert_image_tensor(img):
    if not _is_tensor_a_torch_image(img):
        raise TypeError("Tensor is not a torch image.")


vfdev's avatar
vfdev committed
19
def _get_image_size(img: Tensor) -> List[int]:
20
    # Returns (w, h) of tensor image
21
22
    _assert_image_tensor(img)
    return [img.shape[-1], img.shape[-2]]
vfdev's avatar
vfdev committed
23
24


25
26
27
28
29
30
def _get_image_num_channels(img: Tensor) -> int:
    if img.ndim == 2:
        return 1
    elif img.ndim > 2:
        return img.shape[-3]

31
    raise TypeError("Input ndim should be 2 or more. Got {}".format(img.ndim))
32
33


34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
def _max_value(dtype: torch.dtype) -> float:
    # TODO: replace this method with torch.iinfo when it gets torchscript support.
    # https://github.com/pytorch/pytorch/issues/41492

    a = torch.tensor(2, dtype=dtype)
    signed = 1 if torch.tensor(0, dtype=dtype).is_signed() else 0
    bits = 1
    max_value = torch.tensor(-signed, dtype=torch.long)
    while True:
        next_value = a.pow(bits - signed).sub(1)
        if next_value > max_value:
            max_value = next_value
            bits *= 2
        else:
            return max_value.item()
    return max_value.item()


52
53
54
55
56
57
def _assert_channels(img: Tensor, permitted: List[int]) -> None:
    c = _get_image_num_channels(img)
    if c not in permitted:
        raise TypeError("Input image tensor permitted channel values are {}, but found {}".format(permitted, c))


58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    if image.dtype == dtype:
        return image

    # TODO: replace with image.dtype.is_floating_point when torchscript supports it
    if torch.empty(0, dtype=image.dtype).is_floating_point():

        # TODO: replace with dtype.is_floating_point when torchscript supports it
        if torch.tensor(0, dtype=dtype).is_floating_point():
            return image.to(dtype)

        # float to int
        if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or (
            image.dtype == torch.float64 and dtype == torch.int64
        ):
            msg = f"The cast from {image.dtype} to {dtype} cannot be performed safely."
            raise RuntimeError(msg)

        # https://github.com/pytorch/vision/pull/2078#issuecomment-612045321
        # For data in the range 0-1, (float * 255).to(uint) is only 255
        # when float is exactly 1.0.
        # `max + 1 - epsilon` provides more evenly distributed mapping of
        # ranges of floats to ints.
        eps = 1e-3
        max_val = _max_value(dtype)
        result = image.mul(max_val + 1.0 - eps)
        return result.to(dtype)
    else:
        input_max = _max_value(image.dtype)

        # int to float
        # TODO: replace with dtype.is_floating_point when torchscript supports it
        if torch.tensor(0, dtype=dtype).is_floating_point():
            image = image.to(dtype)
            return image / input_max

94
95
        output_max = _max_value(dtype)

96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
        # int to int
        if input_max > output_max:
            # factor should be forced to int for torch jit script
            # otherwise factor is a float and image // factor can produce different results
            factor = int((input_max + 1) // (output_max + 1))
            image = image // factor
            return image.to(dtype)
        else:
            # factor should be forced to int for torch jit script
            # otherwise factor is a float and image * factor can produce different results
            factor = int((output_max + 1) // (input_max + 1))
            image = image.to(dtype)
            return image * factor


vfdev's avatar
vfdev committed
111
def vflip(img: Tensor) -> Tensor:
112
    _assert_image_tensor(img)
113

114
    return img.flip(-2)
115
116


vfdev's avatar
vfdev committed
117
def hflip(img: Tensor) -> Tensor:
118
    _assert_image_tensor(img)
119

120
    return img.flip(-1)
ekka's avatar
ekka committed
121
122


vfdev's avatar
vfdev committed
123
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
124
    _assert_image_tensor(img)
ekka's avatar
ekka committed
125
126

    return img[..., top:top + height, left:left + width]
127
128


129
130
131
def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))
132
    _assert_channels(img, [3])
133
134
135
136
137
138
139
140
141
142
143
144

    if num_output_channels not in (1, 3):
        raise ValueError('num_output_channels should be either 1 or 3')

    r, g, b = img.unbind(dim=-3)
    # This implementation closely follows the TF one:
    # https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/ops/image_ops_impl.py#L2105-L2138
    l_img = (0.2989 * r + 0.587 * g + 0.114 * b).to(img.dtype)
    l_img = l_img.unsqueeze(dim=-3)

    if num_output_channels == 3:
        return l_img.expand(img.shape)
145

146
    return l_img
147
148


vfdev's avatar
vfdev committed
149
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
150
151
152
    if brightness_factor < 0:
        raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor))

153
    _assert_image_tensor(img)
154

155
156
    _assert_channels(img, [1, 3])

157
    return _blend(img, torch.zeros_like(img), brightness_factor)
158
159


vfdev's avatar
vfdev committed
160
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
161
162
163
    if contrast_factor < 0:
        raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor))

164
    _assert_image_tensor(img)
165

166
167
    _assert_channels(img, [3])

168
169
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    mean = torch.mean(rgb_to_grayscale(img).to(dtype), dim=(-3, -2, -1), keepdim=True)
170
171
172
173

    return _blend(img, mean, contrast_factor)


174
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
175
    if not (-0.5 <= hue_factor <= 0.5):
176
177
        raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))

178
    if not (isinstance(img, torch.Tensor)):
179
        raise TypeError('Input img should be Tensor image')
180

181
182
    _assert_image_tensor(img)

183
184
    _assert_channels(img, [3])

185
186
187
188
189
    orig_dtype = img.dtype
    if img.dtype == torch.uint8:
        img = img.to(dtype=torch.float32) / 255.0

    img = _rgb2hsv(img)
190
    h, s, v = img.unbind(dim=-3)
191
    h = (h + hue_factor) % 1.0
192
    img = torch.stack((h, s, v), dim=-3)
193
194
195
196
197
198
199
200
    img_hue_adj = _hsv2rgb(img)

    if orig_dtype == torch.uint8:
        img_hue_adj = (img_hue_adj * 255.0).to(dtype=orig_dtype)

    return img_hue_adj


vfdev's avatar
vfdev committed
201
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
202
203
204
    if saturation_factor < 0:
        raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor))

205
    _assert_image_tensor(img)
206

207
208
    _assert_channels(img, [3])

209
    return _blend(img, rgb_to_grayscale(img), saturation_factor)
210
211


212
213
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
    if not isinstance(img, torch.Tensor):
214
        raise TypeError('Input img should be a Tensor.')
215

216
217
    _assert_channels(img, [1, 3])

218
219
220
221
222
223
    if gamma < 0:
        raise ValueError('Gamma should be a non-negative real number')

    result = img
    dtype = img.dtype
    if not torch.is_floating_point(img):
224
        result = convert_image_dtype(result, torch.float32)
225
226
227

    result = (gain * result ** gamma).clamp(0, 1)

228
    result = convert_image_dtype(result, dtype)
229
230
231
232
    result = result.to(dtype)
    return result


vfdev's avatar
vfdev committed
233
def center_crop(img: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
234
    """DEPRECATED
235
    """
236
237
238
239
240
    warnings.warn(
        "This method is deprecated and will be removed in future releases. "
        "Please, use ``F.center_crop`` instead."
    )

241
    _assert_image_tensor(img)
242
243
244

    _, image_width, image_height = img.size()
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
245
246
247
248
249
250
251
252
    # crop_top = int(round((image_height - crop_height) / 2.))
    # Result can be different between python func and scripted func
    # Temporary workaround:
    crop_top = int((image_height - crop_height + 1) * 0.5)
    # crop_left = int(round((image_width - crop_width) / 2.))
    # Result can be different between python func and scripted func
    # Temporary workaround:
    crop_left = int((image_width - crop_width + 1) * 0.5)
253
254
255
256

    return crop(img, crop_top, crop_left, crop_height, crop_width)


vfdev's avatar
vfdev committed
257
def five_crop(img: Tensor, size: BroadcastingList2[int]) -> List[Tensor]:
258
    """DEPRECATED
259
    """
260
261
262
263
264
    warnings.warn(
        "This method is deprecated and will be removed in future releases. "
        "Please, use ``F.five_crop`` instead."
    )

265
    _assert_image_tensor(img)
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280

    assert len(size) == 2, "Please provide only two dimensions (h, w) for size."

    _, image_width, image_height = img.size()
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

    tl = crop(img, 0, 0, crop_width, crop_height)
    tr = crop(img, image_width - crop_width, 0, image_width, crop_height)
    bl = crop(img, 0, image_height - crop_height, crop_width, image_height)
    br = crop(img, image_width - crop_width, image_height - crop_height, image_width, image_height)
    center = center_crop(img, (crop_height, crop_width))

281
    return [tl, tr, bl, br, center]
282
283


vfdev's avatar
vfdev committed
284
def ten_crop(img: Tensor, size: BroadcastingList2[int], vertical_flip: bool = False) -> List[Tensor]:
285
    """DEPRECATED
286
    """
287
288
289
290
291
    warnings.warn(
        "This method is deprecated and will be removed in future releases. "
        "Please, use ``F.ten_crop`` instead."
    )

292
    _assert_image_tensor(img)
293
294
295
296
297
298
299
300
301
302
303
304
305
306

    assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)

    return first_five + second_five


vfdev's avatar
vfdev committed
307
def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor:
308
    ratio = float(ratio)
309
310
    bound = 1.0 if img1.is_floating_point() else 255.0
    return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype)
311
312
313


def _rgb2hsv(img):
314
    r, g, b = img.unbind(dim=-3)
315

316
317
    # Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/
    # src/libImaging/Convert.c#L330
318
319
    maxc = torch.max(img, dim=-3).values
    minc = torch.min(img, dim=-3).values
320
321
322
323
324
325
326
327
328
329

    # The algorithm erases S and H channel where `maxc = minc`. This avoids NaN
    # from happening in the results, because
    #   + S channel has division by `maxc`, which is zero only if `maxc = minc`
    #   + H channel has division by `(maxc - minc)`.
    #
    # Instead of overwriting NaN afterwards, we just prevent it from occuring so
    # we don't need to deal with it in case we save the NaN in a buffer in
    # backprop, if it is ever supported, but it doesn't hurt to do so.
    eqc = maxc == minc
330
331

    cr = maxc - minc
332
    # Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine.
333
334
    ones = torch.ones_like(maxc)
    s = cr / torch.where(eqc, ones, maxc)
335
336
337
338
    # Note that `eqc => maxc = minc = r = g = b`. So the following calculation
    # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it
    # would not matter what values `rc`, `gc`, and `bc` have here, and thus
    # replacing denominator with 1 when `eqc` is fine.
339
    cr_divisor = torch.where(eqc, ones, cr)
340
341
342
    rc = (maxc - r) / cr_divisor
    gc = (maxc - g) / cr_divisor
    bc = (maxc - b) / cr_divisor
343
344
345
346
347
348

    hr = (maxc == r) * (bc - gc)
    hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
    hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
    h = (hr + hg + hb)
    h = torch.fmod((h / 6.0 + 1.0), 1.0)
349
    return torch.stack((h, s, maxc), dim=-3)
350
351
352


def _hsv2rgb(img):
353
    h, s, v = img.unbind(dim=-3)
354
355
356
357
358
359
360
361
362
    i = torch.floor(h * 6.0)
    f = (h * 6.0) - i
    i = i.to(dtype=torch.int32)

    p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
    q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
    t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
    i = i % 6

363
    mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1)
364

365
366
367
368
    a1 = torch.stack((v, q, p, p, t, v), dim=-3)
    a2 = torch.stack((t, v, v, q, p, p), dim=-3)
    a3 = torch.stack((p, p, t, v, v, q), dim=-3)
    a4 = torch.stack((a1, a2, a3), dim=-4)
369

370
    return torch.einsum("...ijk, ...xijk -> ...xjk", mask.to(dtype=img.dtype), a4)
371
372


373
374
def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor:
    # padding is left, right, top, bottom
375
376
377
378
379
380
381

    # crop if needed
    if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0:
        crop_left, crop_right, crop_top, crop_bottom = [-min(x, 0) for x in padding]
        img = img[..., crop_top:img.shape[-2] - crop_bottom, crop_left:img.shape[-1] - crop_right]
        padding = [max(x, 0) for x in padding]

382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
    in_sizes = img.size()

    x_indices = [i for i in range(in_sizes[-1])]  # [0, 1, 2, 3, ...]
    left_indices = [i for i in range(padding[0] - 1, -1, -1)]  # e.g. [3, 2, 1, 0]
    right_indices = [-(i + 1) for i in range(padding[1])]  # e.g. [-1, -2, -3]
    x_indices = torch.tensor(left_indices + x_indices + right_indices)

    y_indices = [i for i in range(in_sizes[-2])]
    top_indices = [i for i in range(padding[2] - 1, -1, -1)]
    bottom_indices = [-(i + 1) for i in range(padding[3])]
    y_indices = torch.tensor(top_indices + y_indices + bottom_indices)

    ndim = img.ndim
    if ndim == 3:
        return img[:, y_indices[:, None], x_indices[None, :]]
    elif ndim == 4:
        return img[:, :, y_indices[:, None], x_indices[None, :]]
    else:
        raise RuntimeError("Symmetric padding of N-D tensors are not supported yet")


403
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
404
    _assert_image_tensor(img)
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419

    if not isinstance(padding, (int, tuple, list)):
        raise TypeError("Got inappropriate padding arg")
    if not isinstance(fill, (int, float)):
        raise TypeError("Got inappropriate fill arg")
    if not isinstance(padding_mode, str):
        raise TypeError("Got inappropriate padding_mode arg")

    if isinstance(padding, tuple):
        padding = list(padding)

    if isinstance(padding, list) and len(padding) not in [1, 2, 4]:
        raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
                         "{} element tuple".format(len(padding)))

420
421
    if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
        raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
422
423
424

    if isinstance(padding, int):
        if torch.jit.is_scripting():
vfdev's avatar
vfdev committed
425
            # This maybe unreachable
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
            raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]")
        pad_left = pad_right = pad_top = pad_bottom = padding
    elif len(padding) == 1:
        pad_left = pad_right = pad_top = pad_bottom = padding[0]
    elif len(padding) == 2:
        pad_left = pad_right = padding[0]
        pad_top = pad_bottom = padding[1]
    else:
        pad_left = padding[0]
        pad_top = padding[1]
        pad_right = padding[2]
        pad_bottom = padding[3]

    p = [pad_left, pad_right, pad_top, pad_bottom]

441
442
443
    if padding_mode == "edge":
        # remap padding_mode str
        padding_mode = "replicate"
444
445
446
    elif padding_mode == "symmetric":
        # route to another implementation
        return _pad_symmetric(img, p)
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461

    need_squeeze = False
    if img.ndim < 4:
        img = img.unsqueeze(dim=0)
        need_squeeze = True

    out_dtype = img.dtype
    need_cast = False
    if (padding_mode != "constant") and img.dtype not in (torch.float32, torch.float64):
        # Here we temporary cast input tensor to float
        # until pytorch issue is resolved :
        # https://github.com/pytorch/pytorch/issues/40763
        need_cast = True
        img = img.to(torch.float32)

462
    img = torch_pad(img, p, mode=padding_mode, value=float(fill))
463
464
465
466
467
468
469

    if need_squeeze:
        img = img.squeeze(dim=0)

    if need_cast:
        img = img.to(out_dtype)

470
    return img
vfdev's avatar
vfdev committed
471
472


473
def resize(img: Tensor, size: List[int], interpolation: str = "bilinear") -> Tensor:
474
    _assert_image_tensor(img)
vfdev's avatar
vfdev committed
475
476
477

    if not isinstance(size, (int, tuple, list)):
        raise TypeError("Got inappropriate size arg")
478
    if not isinstance(interpolation, str):
vfdev's avatar
vfdev committed
479
480
        raise TypeError("Got inappropriate interpolation arg")

481
    if interpolation not in ["nearest", "bilinear", "bicubic"]:
vfdev's avatar
vfdev committed
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
        raise ValueError("This interpolation mode is unsupported with Tensor input")

    if isinstance(size, tuple):
        size = list(size)

    if isinstance(size, list) and len(size) not in [1, 2]:
        raise ValueError("Size must be an int or a 1 or 2 element tuple/list, not a "
                         "{} element tuple/list".format(len(size)))

    w, h = _get_image_size(img)

    if isinstance(size, int):
        size_w, size_h = size, size
    elif len(size) < 2:
        size_w, size_h = size[0], size[0]
    else:
498
        size_w, size_h = size[1], size[0]  # Convention (h, w)
vfdev's avatar
vfdev committed
499
500
501
502
503
504
505

    if isinstance(size, int) or len(size) < 2:
        if w < h:
            size_h = int(size_w * h / w)
        else:
            size_w = int(size_h * w / h)

506
507
        if (w <= h and w == size_w) or (h <= w and h == size_h):
            return img
vfdev's avatar
vfdev committed
508

vfdev's avatar
vfdev committed
509
    img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [torch.float32, torch.float64])
vfdev's avatar
vfdev committed
510
511

    # Define align_corners to avoid warnings
512
    align_corners = False if interpolation in ["bilinear", "bicubic"] else None
vfdev's avatar
vfdev committed
513

514
    img = interpolate(img, size=[size_h, size_w], mode=interpolation, align_corners=align_corners)
vfdev's avatar
vfdev committed
515

516
    if interpolation == "bicubic" and out_dtype == torch.uint8:
vfdev's avatar
vfdev committed
517
        img = img.clamp(min=0, max=255)
vfdev's avatar
vfdev committed
518

vfdev's avatar
vfdev committed
519
    img = _cast_squeeze_out(img, need_cast=need_cast, need_squeeze=need_squeeze, out_dtype=out_dtype)
vfdev's avatar
vfdev committed
520
521

    return img
vfdev's avatar
vfdev committed
522
523


vfdev's avatar
vfdev committed
524
def _assert_grid_transform_inputs(
525
526
        img: Tensor,
        matrix: Optional[List[float]],
527
        interpolation: str,
528
        fill: Optional[List[float]],
529
        supported_interpolation_modes: List[str],
530
        coeffs: Optional[List[float]] = None,
vfdev's avatar
vfdev committed
531
):
532
533
534
535
536

    if not (isinstance(img, torch.Tensor)):
        raise TypeError("Input img should be Tensor")

    _assert_image_tensor(img)
vfdev's avatar
vfdev committed
537

538
    if matrix is not None and not isinstance(matrix, list):
539
        raise TypeError("Argument matrix should be a list")
vfdev's avatar
vfdev committed
540

541
    if matrix is not None and len(matrix) != 6:
vfdev's avatar
vfdev committed
542
        raise ValueError("Argument matrix should have 6 float values")
vfdev's avatar
vfdev committed
543

544
545
546
    if coeffs is not None and len(coeffs) != 8:
        raise ValueError("Argument coeffs should have 8 float values")

547
548
549
550
551
552
553
554
555
    if fill is not None and not isinstance(fill, (int, float, tuple, list)):
        warnings.warn("Argument fill should be either int, float, tuple or list")

    # Check fill
    num_channels = _get_image_num_channels(img)
    if isinstance(fill, (tuple, list)) and (len(fill) > 1 and len(fill) != num_channels):
        msg = ("The number of elements in 'fill' cannot broadcast to match the number of "
               "channels of the image ({} != {})")
        raise ValueError(msg.format(len(fill), num_channels))
vfdev's avatar
vfdev committed
556

557
558
    if interpolation not in supported_interpolation_modes:
        raise ValueError("Interpolation mode '{}' is unsupported with Tensor input".format(interpolation))
vfdev's avatar
vfdev committed
559
560


vfdev's avatar
vfdev committed
561
def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor, bool, bool, torch.dtype]:
vfdev's avatar
vfdev committed
562
    need_squeeze = False
563
    # make image NCHW
vfdev's avatar
vfdev committed
564
565
566
567
568
569
    if img.ndim < 4:
        img = img.unsqueeze(dim=0)
        need_squeeze = True

    out_dtype = img.dtype
    need_cast = False
vfdev's avatar
vfdev committed
570
    if out_dtype not in req_dtypes:
vfdev's avatar
vfdev committed
571
        need_cast = True
vfdev's avatar
vfdev committed
572
        req_dtype = req_dtypes[0]
573
574
        img = img.to(req_dtype)
    return img, need_cast, need_squeeze, out_dtype
vfdev's avatar
vfdev committed
575
576


577
def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype):
vfdev's avatar
vfdev committed
578
579
580
581
    if need_squeeze:
        img = img.squeeze(dim=0)

    if need_cast:
vfdev's avatar
vfdev committed
582
583
584
585
        if out_dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
            # it is better to round before cast
            img = torch.round(img)
        img = img.to(out_dtype)
vfdev's avatar
vfdev committed
586
587

    return img
vfdev's avatar
vfdev committed
588
589


590
def _apply_grid_transform(img: Tensor, grid: Tensor, mode: str, fill: Optional[List[float]]) -> Tensor:
591

vfdev's avatar
vfdev committed
592
    img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [grid.dtype, ])
593
594
595
596

    if img.shape[0] > 1:
        # Apply same grid to a batch of images
        grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3])
597
598
599
600
601
602

    # Append a dummy mask for customized fill colors, should be faster than grid_sample() twice
    if fill is not None:
        dummy = torch.ones((img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype, device=img.device)
        img = torch.cat((img, dummy), dim=1)

603
604
    img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False)

605
606
607
608
609
610
611
612
613
614
615
616
617
    # Fill with required color
    if fill is not None:
        mask = img[:, -1:, :, :]  # N * 1 * H * W
        img = img[:, :-1, :, :]  # N * C * H * W
        mask = mask.expand_as(img)
        len_fill = len(fill) if isinstance(fill, (tuple, list)) else 1
        fill_img = torch.tensor(fill, dtype=img.dtype, device=img.device).view(1, len_fill, 1, 1).expand_as(img)
        if mode == 'nearest':
            mask = mask < 0.5
            img[mask] = fill_img[mask]
        else:  # 'bilinear'
            img = img * mask + (1.0 - mask) * fill_img

618
619
620
621
    img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
    return img


622
623
624
625
626
627
628
629
630
631
def _gen_affine_grid(
        theta: Tensor, w: int, h: int, ow: int, oh: int,
) -> Tensor:
    # https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/
    # AffineGridGenerator.cpp#L18
    # Difference with AffineGridGenerator is that:
    # 1) we normalize grid values after applying theta
    # 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate

    d = 0.5
632
    base_grid = torch.empty(1, oh, ow, 3, dtype=theta.dtype, device=theta.device)
633
634
635
636
    x_grid = torch.linspace(-ow * 0.5 + d, ow * 0.5 + d - 1, steps=ow, device=theta.device)
    base_grid[..., 0].copy_(x_grid)
    y_grid = torch.linspace(-oh * 0.5 + d, oh * 0.5 + d - 1, steps=oh, device=theta.device).unsqueeze_(-1)
    base_grid[..., 1].copy_(y_grid)
637
638
    base_grid[..., 2].fill_(1)

639
640
    rescaled_theta = theta.transpose(1, 2) / torch.tensor([0.5 * w, 0.5 * h], dtype=theta.dtype, device=theta.device)
    output_grid = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta)
641
642
643
    return output_grid.view(1, oh, ow, 2)


vfdev's avatar
vfdev committed
644
def affine(
645
        img: Tensor, matrix: List[float], interpolation: str = "nearest", fill: Optional[List[float]] = None
vfdev's avatar
vfdev committed
646
) -> Tensor:
647
    _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"])
vfdev's avatar
vfdev committed
648

649
650
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
vfdev's avatar
vfdev committed
651
    shape = img.shape
652
    # grid will be generated on the same device as theta and img
653
    grid = _gen_affine_grid(theta, w=shape[-1], h=shape[-2], ow=shape[-1], oh=shape[-2])
654
    return _apply_grid_transform(img, grid, interpolation, fill=fill)
vfdev's avatar
vfdev committed
655
656


657
def _compute_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]:
vfdev's avatar
vfdev committed
658

659
660
661
    # Inspired of PIL implementation:
    # https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054

vfdev's avatar
vfdev committed
662
663
    # pts are Top-Left, Top-Right, Bottom-Left, Bottom-Right points.
    pts = torch.tensor([
664
665
666
667
        [-0.5 * w, -0.5 * h, 1.0],
        [-0.5 * w, 0.5 * h, 1.0],
        [0.5 * w, 0.5 * h, 1.0],
        [0.5 * w, -0.5 * h, 1.0],
vfdev's avatar
vfdev committed
668
    ])
669
    theta = torch.tensor(matrix, dtype=torch.float).reshape(1, 2, 3)
670
    new_pts = pts.view(1, 4, 3).bmm(theta.transpose(1, 2)).view(4, 2)
vfdev's avatar
vfdev committed
671
672
673
    min_vals, _ = new_pts.min(dim=0)
    max_vals, _ = new_pts.max(dim=0)

674
675
676
677
678
679
    # Truncate precision to 1e-4 to avoid ceil of Xe-15 to 1.0
    tol = 1e-4
    cmax = torch.ceil((max_vals / tol).trunc_() * tol)
    cmin = torch.floor((min_vals / tol).trunc_() * tol)
    size = cmax - cmin
    return int(size[0]), int(size[1])
vfdev's avatar
vfdev committed
680
681
682


def rotate(
683
    img: Tensor, matrix: List[float], interpolation: str = "nearest",
684
    expand: bool = False, fill: Optional[List[float]] = None
vfdev's avatar
vfdev committed
685
) -> Tensor:
686
    _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"])
687
    w, h = img.shape[-1], img.shape[-2]
688
    ow, oh = _compute_output_size(matrix, w, h) if expand else (w, h)
689
690
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
691
    # grid will be generated on the same device as theta and img
692
    grid = _gen_affine_grid(theta, w=w, h=h, ow=ow, oh=oh)
693
694

    return _apply_grid_transform(img, grid, interpolation, fill=fill)
695
696


697
def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device):
698
699
700
701
702
703
704
705
706
707
    # https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/
    # src/libImaging/Geometry.c#L394

    #
    # x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
    # y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1)
    #
    theta1 = torch.tensor([[
        [coeffs[0], coeffs[1], coeffs[2]],
        [coeffs[3], coeffs[4], coeffs[5]]
708
    ]], dtype=dtype, device=device)
709
710
711
    theta2 = torch.tensor([[
        [coeffs[6], coeffs[7], 1.0],
        [coeffs[6], coeffs[7], 1.0]
712
    ]], dtype=dtype, device=device)
713
714

    d = 0.5
715
    base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device)
716
717
718
719
    x_grid = torch.linspace(d, ow * 1.0 + d - 1.0, steps=ow, device=device)
    base_grid[..., 0].copy_(x_grid)
    y_grid = torch.linspace(d, oh * 1.0 + d - 1.0, steps=oh, device=device).unsqueeze_(-1)
    base_grid[..., 1].copy_(y_grid)
720
721
    base_grid[..., 2].fill_(1)

722
    rescaled_theta1 = theta1.transpose(1, 2) / torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device)
723
    output_grid1 = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta1)
724
725
726
727
728
729
730
    output_grid2 = base_grid.view(1, oh * ow, 3).bmm(theta2.transpose(1, 2))

    output_grid = output_grid1 / output_grid2 - 1.0
    return output_grid.view(1, oh, ow, 2)


def perspective(
731
    img: Tensor, perspective_coeffs: List[float], interpolation: str = "bilinear", fill: Optional[List[float]] = None
732
) -> Tensor:
733
734
735
736
    if not (isinstance(img, torch.Tensor)):
        raise TypeError('Input img should be Tensor.')

    _assert_image_tensor(img)
737
738
739
740

    _assert_grid_transform_inputs(
        img,
        matrix=None,
741
742
743
        interpolation=interpolation,
        fill=fill,
        supported_interpolation_modes=["nearest", "bilinear"],
744
745
746
747
        coeffs=perspective_coeffs
    )

    ow, oh = img.shape[-1], img.shape[-2]
748
749
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=img.device)
750
    return _apply_grid_transform(img, grid, interpolation, fill=fill)
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772


def _get_gaussian_kernel1d(kernel_size: int, sigma: float) -> Tensor:
    ksize_half = (kernel_size - 1) * 0.5

    x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
    pdf = torch.exp(-0.5 * (x / sigma).pow(2))
    kernel1d = pdf / pdf.sum()

    return kernel1d


def _get_gaussian_kernel2d(
        kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device
) -> Tensor:
    kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0]).to(device, dtype=dtype)
    kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1]).to(device, dtype=dtype)
    kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :])
    return kernel2d


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor:
773
774
775
776
    if not (isinstance(img, torch.Tensor)):
        raise TypeError('img should be Tensor. Got {}'.format(type(img)))

    _assert_image_tensor(img)
777
778
779
780
781

    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device)
    kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])

vfdev's avatar
vfdev committed
782
    img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype, ])
783
784
785
786
787
788
789
790

    # padding = (left, right, top, bottom)
    padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2]
    img = torch_pad(img, padding, mode="reflect")
    img = conv2d(img, kernel, groups=img.shape[-3])

    img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
    return img
791
792
793


def invert(img: Tensor) -> Tensor:
794
795

    _assert_image_tensor(img)
796
797
798
799
800
801
802
803
804
805
806

    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))

    _assert_channels(img, [1, 3])

    bound = torch.tensor(1 if img.is_floating_point() else 255, dtype=img.dtype, device=img.device)
    return bound - img


def posterize(img: Tensor, bits: int) -> Tensor:
807
808

    _assert_image_tensor(img)
809
810
811
812
813
814
815
816
817
818
819
820

    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))
    if img.dtype != torch.uint8:
        raise TypeError("Only torch.uint8 image tensors are supported, but found {}".format(img.dtype))

    _assert_channels(img, [1, 3])
    mask = -int(2**(8 - bits))  # JIT-friendly for: ~(2 ** (8 - bits) - 1)
    return img & mask


def solarize(img: Tensor, threshold: float) -> Tensor:
821
822

    _assert_image_tensor(img)
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854

    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))

    _assert_channels(img, [1, 3])

    inverted_img = invert(img)
    return torch.where(img >= threshold, inverted_img, img)


def _blurred_degenerate_image(img: Tensor) -> Tensor:
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32

    kernel = torch.ones((3, 3), dtype=dtype, device=img.device)
    kernel[1, 1] = 5.0
    kernel /= kernel.sum()
    kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])

    result_tmp, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype, ])
    result_tmp = conv2d(result_tmp, kernel, groups=result_tmp.shape[-3])
    result_tmp = _cast_squeeze_out(result_tmp, need_cast, need_squeeze, out_dtype)

    result = img.clone()
    result[..., 1:-1, 1:-1] = result_tmp

    return result


def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
    if sharpness_factor < 0:
        raise ValueError('sharpness_factor ({}) is not non-negative.'.format(sharpness_factor))

855
    _assert_image_tensor(img)
856
857
858
859
860
861
862
863
864
865

    _assert_channels(img, [1, 3])

    if img.size(-1) <= 2 or img.size(-2) <= 2:
        return img

    return _blend(img, _blurred_degenerate_image(img), sharpness_factor)


def autocontrast(img: Tensor) -> Tensor:
866
867

    _assert_image_tensor(img)
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905

    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))

    _assert_channels(img, [1, 3])

    bound = 1.0 if img.is_floating_point() else 255.0
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32

    minimum = img.amin(dim=(-2, -1), keepdim=True).to(dtype)
    maximum = img.amax(dim=(-2, -1), keepdim=True).to(dtype)
    eq_idxs = torch.where(minimum == maximum)[0]
    minimum[eq_idxs] = 0
    maximum[eq_idxs] = bound
    scale = bound / (maximum - minimum)

    return ((img - minimum) * scale).clamp(0, bound).to(img.dtype)


def _scale_channel(img_chan):
    hist = torch.histc(img_chan.to(torch.float32), bins=256, min=0, max=255)

    nonzero_hist = hist[hist != 0]
    step = nonzero_hist[:-1].sum() // 255
    if step == 0:
        return img_chan

    lut = (torch.cumsum(hist, 0) + (step // 2)) // step
    lut = torch.nn.functional.pad(lut, [1, 0])[:-1].clamp(0, 255)

    return lut[img_chan.to(torch.int64)].to(torch.uint8)


def _equalize_single_image(img: Tensor) -> Tensor:
    return torch.stack([_scale_channel(img[c]) for c in range(img.size(0))])


def equalize(img: Tensor) -> Tensor:
906
907

    _assert_image_tensor(img)
908
909
910
911
912
913
914
915
916
917
918
919

    if not (3 <= img.ndim <= 4):
        raise TypeError("Input image tensor should have 3 or 4 dimensions, but found {}".format(img.ndim))
    if img.dtype != torch.uint8:
        raise TypeError("Only torch.uint8 image tensors are supported, but found {}".format(img.dtype))

    _assert_channels(img, [1, 3])

    if img.ndim == 3:
        return _equalize_single_image(img)

    return torch.stack([_equalize_single_image(x) for x in img])