functional_tensor.py 34.5 KB
Newer Older
vfdev's avatar
vfdev committed
1
2
import warnings

3
import torch
4
from torch import Tensor
5
from torch.nn.functional import grid_sample, conv2d, interpolate, pad as torch_pad
6
7
from torch.jit.annotations import BroadcastingList2
from typing import Optional, Tuple, List
8
9


vfdev's avatar
vfdev committed
10
11
def _is_tensor_a_torch_image(x: Tensor) -> bool:
    return x.ndim >= 2
12
13


14
def _assert_image_tensor(img: Tensor) -> None:
15
16
17
18
    if not _is_tensor_a_torch_image(img):
        raise TypeError("Tensor is not a torch image.")


19
def get_image_size(img: Tensor) -> List[int]:
20
    # Returns (w, h) of tensor image
21
22
    _assert_image_tensor(img)
    return [img.shape[-1], img.shape[-2]]
vfdev's avatar
vfdev committed
23
24


25
def get_image_num_channels(img: Tensor) -> int:
26
27
28
29
30
    if img.ndim == 2:
        return 1
    elif img.ndim > 2:
        return img.shape[-3]

31
    raise TypeError("Input ndim should be 2 or more. Got {}".format(img.ndim))
32
33


34
35
36
37
38
39
40
41
42
43
44
45
46
47
def _max_value(dtype: torch.dtype) -> float:
    # TODO: replace this method with torch.iinfo when it gets torchscript support.
    # https://github.com/pytorch/pytorch/issues/41492

    a = torch.tensor(2, dtype=dtype)
    signed = 1 if torch.tensor(0, dtype=dtype).is_signed() else 0
    bits = 1
    max_value = torch.tensor(-signed, dtype=torch.long)
    while True:
        next_value = a.pow(bits - signed).sub(1)
        if next_value > max_value:
            max_value = next_value
            bits *= 2
        else:
48
            break
49
50
51
    return max_value.item()


52
def _assert_channels(img: Tensor, permitted: List[int]) -> None:
53
    c = get_image_num_channels(img)
54
55
56
57
    if c not in permitted:
        raise TypeError("Input image tensor permitted channel values are {}, but found {}".format(permitted, c))


58
59
60
61
def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor:
    if image.dtype == dtype:
        return image

62
    if image.is_floating_point():
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92

        # TODO: replace with dtype.is_floating_point when torchscript supports it
        if torch.tensor(0, dtype=dtype).is_floating_point():
            return image.to(dtype)

        # float to int
        if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or (
            image.dtype == torch.float64 and dtype == torch.int64
        ):
            msg = f"The cast from {image.dtype} to {dtype} cannot be performed safely."
            raise RuntimeError(msg)

        # https://github.com/pytorch/vision/pull/2078#issuecomment-612045321
        # For data in the range 0-1, (float * 255).to(uint) is only 255
        # when float is exactly 1.0.
        # `max + 1 - epsilon` provides more evenly distributed mapping of
        # ranges of floats to ints.
        eps = 1e-3
        max_val = _max_value(dtype)
        result = image.mul(max_val + 1.0 - eps)
        return result.to(dtype)
    else:
        input_max = _max_value(image.dtype)

        # int to float
        # TODO: replace with dtype.is_floating_point when torchscript supports it
        if torch.tensor(0, dtype=dtype).is_floating_point():
            image = image.to(dtype)
            return image / input_max

93
94
        output_max = _max_value(dtype)

95
96
97
98
99
        # int to int
        if input_max > output_max:
            # factor should be forced to int for torch jit script
            # otherwise factor is a float and image // factor can produce different results
            factor = int((input_max + 1) // (output_max + 1))
100
            image = torch.div(image, factor, rounding_mode='floor')
101
102
103
104
105
106
107
108
109
            return image.to(dtype)
        else:
            # factor should be forced to int for torch jit script
            # otherwise factor is a float and image * factor can produce different results
            factor = int((output_max + 1) // (input_max + 1))
            image = image.to(dtype)
            return image * factor


vfdev's avatar
vfdev committed
110
def vflip(img: Tensor) -> Tensor:
111
    _assert_image_tensor(img)
112

113
    return img.flip(-2)
114
115


vfdev's avatar
vfdev committed
116
def hflip(img: Tensor) -> Tensor:
117
    _assert_image_tensor(img)
118

119
    return img.flip(-1)
ekka's avatar
ekka committed
120
121


vfdev's avatar
vfdev committed
122
def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor:
123
    _assert_image_tensor(img)
ekka's avatar
ekka committed
124

125
    w, h = get_image_size(img)
126
127
128
129
130
131
132
    right = left + width
    bottom = top + height

    if left < 0 or top < 0 or right > w or bottom > h:
        padding_ltrb = [max(-left, 0), max(-top, 0), max(right - w, 0), max(bottom - h, 0)]
        return pad(img[..., max(top, 0):bottom, max(left, 0):right], padding_ltrb, fill=0)
    return img[..., top:bottom, left:right]
133
134


135
136
137
def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor:
    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))
138
    _assert_channels(img, [3])
139
140
141
142
143
144
145
146
147
148
149
150

    if num_output_channels not in (1, 3):
        raise ValueError('num_output_channels should be either 1 or 3')

    r, g, b = img.unbind(dim=-3)
    # This implementation closely follows the TF one:
    # https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/ops/image_ops_impl.py#L2105-L2138
    l_img = (0.2989 * r + 0.587 * g + 0.114 * b).to(img.dtype)
    l_img = l_img.unsqueeze(dim=-3)

    if num_output_channels == 3:
        return l_img.expand(img.shape)
151

152
    return l_img
153
154


vfdev's avatar
vfdev committed
155
def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor:
156
157
158
    if brightness_factor < 0:
        raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor))

159
    _assert_image_tensor(img)
160

161
162
    _assert_channels(img, [1, 3])

163
    return _blend(img, torch.zeros_like(img), brightness_factor)
164
165


vfdev's avatar
vfdev committed
166
def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor:
167
168
169
    if contrast_factor < 0:
        raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor))

170
    _assert_image_tensor(img)
171

172
173
    _assert_channels(img, [3, 1])
    c = get_image_num_channels(img)
174
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
175
176
177
178
    if c == 3:
        mean = torch.mean(rgb_to_grayscale(img).to(dtype), dim=(-3, -2, -1), keepdim=True)
    else:
        mean = torch.mean(img.to(dtype), dim=(-3, -2, -1), keepdim=True)
179
180
181
182

    return _blend(img, mean, contrast_factor)


183
def adjust_hue(img: Tensor, hue_factor: float) -> Tensor:
184
    if not (-0.5 <= hue_factor <= 0.5):
185
186
        raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))

187
    if not (isinstance(img, torch.Tensor)):
188
        raise TypeError('Input img should be Tensor image')
189

190
191
    _assert_image_tensor(img)

192
    _assert_channels(img, [1, 3])
193
    if get_image_num_channels(img) == 1:  # Match PIL behaviour
194
        return img
195

196
197
198
199
200
    orig_dtype = img.dtype
    if img.dtype == torch.uint8:
        img = img.to(dtype=torch.float32) / 255.0

    img = _rgb2hsv(img)
201
    h, s, v = img.unbind(dim=-3)
202
    h = (h + hue_factor) % 1.0
203
    img = torch.stack((h, s, v), dim=-3)
204
205
206
207
208
209
210
211
    img_hue_adj = _hsv2rgb(img)

    if orig_dtype == torch.uint8:
        img_hue_adj = (img_hue_adj * 255.0).to(dtype=orig_dtype)

    return img_hue_adj


vfdev's avatar
vfdev committed
212
def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor:
213
214
215
    if saturation_factor < 0:
        raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor))

216
    _assert_image_tensor(img)
217

218
219
220
221
    _assert_channels(img, [1, 3])

    if get_image_num_channels(img) == 1:  # Match PIL behaviour
        return img
222

223
    return _blend(img, rgb_to_grayscale(img), saturation_factor)
224
225


226
227
def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor:
    if not isinstance(img, torch.Tensor):
228
        raise TypeError('Input img should be a Tensor.')
229

230
231
    _assert_channels(img, [1, 3])

232
233
234
235
236
237
    if gamma < 0:
        raise ValueError('Gamma should be a non-negative real number')

    result = img
    dtype = img.dtype
    if not torch.is_floating_point(img):
238
        result = convert_image_dtype(result, torch.float32)
239
240
241

    result = (gain * result ** gamma).clamp(0, 1)

242
    result = convert_image_dtype(result, dtype)
243
244
245
    return result


vfdev's avatar
vfdev committed
246
def center_crop(img: Tensor, output_size: BroadcastingList2[int]) -> Tensor:
247
    """DEPRECATED
248
    """
249
250
251
252
253
    warnings.warn(
        "This method is deprecated and will be removed in future releases. "
        "Please, use ``F.center_crop`` instead."
    )

254
    _assert_image_tensor(img)
255
256
257

    _, image_width, image_height = img.size()
    crop_height, crop_width = output_size
vfdev's avatar
vfdev committed
258
259
260
261
262
263
264
265
    # crop_top = int(round((image_height - crop_height) / 2.))
    # Result can be different between python func and scripted func
    # Temporary workaround:
    crop_top = int((image_height - crop_height + 1) * 0.5)
    # crop_left = int(round((image_width - crop_width) / 2.))
    # Result can be different between python func and scripted func
    # Temporary workaround:
    crop_left = int((image_width - crop_width + 1) * 0.5)
266
267
268
269

    return crop(img, crop_top, crop_left, crop_height, crop_width)


vfdev's avatar
vfdev committed
270
def five_crop(img: Tensor, size: BroadcastingList2[int]) -> List[Tensor]:
271
    """DEPRECATED
272
    """
273
274
275
276
277
    warnings.warn(
        "This method is deprecated and will be removed in future releases. "
        "Please, use ``F.five_crop`` instead."
    )

278
    _assert_image_tensor(img)
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293

    assert len(size) == 2, "Please provide only two dimensions (h, w) for size."

    _, image_width, image_height = img.size()
    crop_height, crop_width = size
    if crop_width > image_width or crop_height > image_height:
        msg = "Requested crop size {} is bigger than input size {}"
        raise ValueError(msg.format(size, (image_height, image_width)))

    tl = crop(img, 0, 0, crop_width, crop_height)
    tr = crop(img, image_width - crop_width, 0, image_width, crop_height)
    bl = crop(img, 0, image_height - crop_height, crop_width, image_height)
    br = crop(img, image_width - crop_width, image_height - crop_height, image_width, image_height)
    center = center_crop(img, (crop_height, crop_width))

294
    return [tl, tr, bl, br, center]
295
296


vfdev's avatar
vfdev committed
297
def ten_crop(img: Tensor, size: BroadcastingList2[int], vertical_flip: bool = False) -> List[Tensor]:
298
    """DEPRECATED
299
    """
300
301
302
303
304
    warnings.warn(
        "This method is deprecated and will be removed in future releases. "
        "Please, use ``F.ten_crop`` instead."
    )

305
    _assert_image_tensor(img)
306
307
308
309
310
311
312
313
314
315
316
317
318
319

    assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)

    return first_five + second_five


vfdev's avatar
vfdev committed
320
def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor:
321
    ratio = float(ratio)
322
323
    bound = 1.0 if img1.is_floating_point() else 255.0
    return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype)
324
325


326
def _rgb2hsv(img: Tensor) -> Tensor:
327
    r, g, b = img.unbind(dim=-3)
328

329
330
    # Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/
    # src/libImaging/Convert.c#L330
331
332
    maxc = torch.max(img, dim=-3).values
    minc = torch.min(img, dim=-3).values
333
334
335
336
337
338
339
340
341
342

    # The algorithm erases S and H channel where `maxc = minc`. This avoids NaN
    # from happening in the results, because
    #   + S channel has division by `maxc`, which is zero only if `maxc = minc`
    #   + H channel has division by `(maxc - minc)`.
    #
    # Instead of overwriting NaN afterwards, we just prevent it from occuring so
    # we don't need to deal with it in case we save the NaN in a buffer in
    # backprop, if it is ever supported, but it doesn't hurt to do so.
    eqc = maxc == minc
343
344

    cr = maxc - minc
345
    # Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine.
346
347
    ones = torch.ones_like(maxc)
    s = cr / torch.where(eqc, ones, maxc)
348
349
350
351
    # Note that `eqc => maxc = minc = r = g = b`. So the following calculation
    # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it
    # would not matter what values `rc`, `gc`, and `bc` have here, and thus
    # replacing denominator with 1 when `eqc` is fine.
352
    cr_divisor = torch.where(eqc, ones, cr)
353
354
355
    rc = (maxc - r) / cr_divisor
    gc = (maxc - g) / cr_divisor
    bc = (maxc - b) / cr_divisor
356
357
358
359
360
361

    hr = (maxc == r) * (bc - gc)
    hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
    hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
    h = (hr + hg + hb)
    h = torch.fmod((h / 6.0 + 1.0), 1.0)
362
    return torch.stack((h, s, maxc), dim=-3)
363
364


365
def _hsv2rgb(img: Tensor) -> Tensor:
366
    h, s, v = img.unbind(dim=-3)
367
368
369
370
371
372
373
374
375
    i = torch.floor(h * 6.0)
    f = (h * 6.0) - i
    i = i.to(dtype=torch.int32)

    p = torch.clamp((v * (1.0 - s)), 0.0, 1.0)
    q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0)
    t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
    i = i % 6

376
    mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1)
377

378
379
380
381
    a1 = torch.stack((v, q, p, p, t, v), dim=-3)
    a2 = torch.stack((t, v, v, q, p, p), dim=-3)
    a3 = torch.stack((p, p, t, v, v, q), dim=-3)
    a4 = torch.stack((a1, a2, a3), dim=-4)
382

383
    return torch.einsum("...ijk, ...xijk -> ...xjk", mask.to(dtype=img.dtype), a4)
384
385


386
387
def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor:
    # padding is left, right, top, bottom
388
389
390
391
392
393
394

    # crop if needed
    if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0:
        crop_left, crop_right, crop_top, crop_bottom = [-min(x, 0) for x in padding]
        img = img[..., crop_top:img.shape[-2] - crop_bottom, crop_left:img.shape[-1] - crop_right]
        padding = [max(x, 0) for x in padding]

395
396
    in_sizes = img.size()

397
    _x_indices = [i for i in range(in_sizes[-1])]  # [0, 1, 2, 3, ...]
398
399
    left_indices = [i for i in range(padding[0] - 1, -1, -1)]  # e.g. [3, 2, 1, 0]
    right_indices = [-(i + 1) for i in range(padding[1])]  # e.g. [-1, -2, -3]
400
    x_indices = torch.tensor(left_indices + _x_indices + right_indices, device=img.device)
401

402
    _y_indices = [i for i in range(in_sizes[-2])]
403
404
    top_indices = [i for i in range(padding[2] - 1, -1, -1)]
    bottom_indices = [-(i + 1) for i in range(padding[3])]
405
    y_indices = torch.tensor(top_indices + _y_indices + bottom_indices, device=img.device)
406
407
408
409
410
411
412
413
414
415

    ndim = img.ndim
    if ndim == 3:
        return img[:, y_indices[:, None], x_indices[None, :]]
    elif ndim == 4:
        return img[:, :, y_indices[:, None], x_indices[None, :]]
    else:
        raise RuntimeError("Symmetric padding of N-D tensors are not supported yet")


416
def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor:
417
    _assert_image_tensor(img)
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432

    if not isinstance(padding, (int, tuple, list)):
        raise TypeError("Got inappropriate padding arg")
    if not isinstance(fill, (int, float)):
        raise TypeError("Got inappropriate fill arg")
    if not isinstance(padding_mode, str):
        raise TypeError("Got inappropriate padding_mode arg")

    if isinstance(padding, tuple):
        padding = list(padding)

    if isinstance(padding, list) and len(padding) not in [1, 2, 4]:
        raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
                         "{} element tuple".format(len(padding)))

433
434
    if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
        raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
435
436
437

    if isinstance(padding, int):
        if torch.jit.is_scripting():
vfdev's avatar
vfdev committed
438
            # This maybe unreachable
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
            raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]")
        pad_left = pad_right = pad_top = pad_bottom = padding
    elif len(padding) == 1:
        pad_left = pad_right = pad_top = pad_bottom = padding[0]
    elif len(padding) == 2:
        pad_left = pad_right = padding[0]
        pad_top = pad_bottom = padding[1]
    else:
        pad_left = padding[0]
        pad_top = padding[1]
        pad_right = padding[2]
        pad_bottom = padding[3]

    p = [pad_left, pad_right, pad_top, pad_bottom]

454
455
456
    if padding_mode == "edge":
        # remap padding_mode str
        padding_mode = "replicate"
457
458
459
    elif padding_mode == "symmetric":
        # route to another implementation
        return _pad_symmetric(img, p)
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474

    need_squeeze = False
    if img.ndim < 4:
        img = img.unsqueeze(dim=0)
        need_squeeze = True

    out_dtype = img.dtype
    need_cast = False
    if (padding_mode != "constant") and img.dtype not in (torch.float32, torch.float64):
        # Here we temporary cast input tensor to float
        # until pytorch issue is resolved :
        # https://github.com/pytorch/pytorch/issues/40763
        need_cast = True
        img = img.to(torch.float32)

475
    img = torch_pad(img, p, mode=padding_mode, value=float(fill))
476
477
478
479
480
481
482

    if need_squeeze:
        img = img.squeeze(dim=0)

    if need_cast:
        img = img.to(out_dtype)

483
    return img
vfdev's avatar
vfdev committed
484
485


486
487
488
489
490
491
492
def resize(
    img: Tensor,
    size: List[int],
    interpolation: str = "bilinear",
    max_size: Optional[int] = None,
    antialias: Optional[bool] = None
) -> Tensor:
493
    _assert_image_tensor(img)
vfdev's avatar
vfdev committed
494
495
496

    if not isinstance(size, (int, tuple, list)):
        raise TypeError("Got inappropriate size arg")
497
    if not isinstance(interpolation, str):
vfdev's avatar
vfdev committed
498
499
        raise TypeError("Got inappropriate interpolation arg")

500
    if interpolation not in ["nearest", "bilinear", "bicubic"]:
vfdev's avatar
vfdev committed
501
502
503
504
505
        raise ValueError("This interpolation mode is unsupported with Tensor input")

    if isinstance(size, tuple):
        size = list(size)

506
507
508
509
510
511
512
513
514
    if isinstance(size, list):
        if len(size) not in [1, 2]:
            raise ValueError("Size must be an int or a 1 or 2 element tuple/list, not a "
                             "{} element tuple/list".format(len(size)))
        if max_size is not None and len(size) != 1:
            raise ValueError(
                "max_size should only be passed if size specifies the length of the smaller edge, "
                "i.e. size should be an int or a sequence of length 1 in torchscript mode."
            )
vfdev's avatar
vfdev committed
515

516
517
518
    if antialias is None:
        antialias = False

519
520
    if antialias and interpolation not in ["bilinear", "bicubic"]:
        raise ValueError("Antialias option is supported for bilinear and bicubic interpolation modes only")
521

522
    w, h = get_image_size(img)
vfdev's avatar
vfdev committed
523

524
525
    if isinstance(size, int) or len(size) == 1:  # specified size only for the smallest edge
        short, long = (w, h) if w <= h else (h, w)
Nicolas Hug's avatar
Nicolas Hug committed
526
        requested_new_short = size if isinstance(size, int) else size[0]
vfdev's avatar
vfdev committed
527

528
        if short == requested_new_short:
529
            return img
vfdev's avatar
vfdev committed
530

531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
        new_short, new_long = requested_new_short, int(requested_new_short * long / short)

        if max_size is not None:
            if max_size <= requested_new_short:
                raise ValueError(
                    f"max_size = {max_size} must be strictly greater than the requested "
                    f"size for the smaller edge size = {size}"
                )
            if new_long > max_size:
                new_short, new_long = int(max_size * new_short / new_long), max_size

        new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)

    else:  # specified both h and w
        new_w, new_h = size[1], size[0]

vfdev's avatar
vfdev committed
547
    img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [torch.float32, torch.float64])
vfdev's avatar
vfdev committed
548
549

    # Define align_corners to avoid warnings
550
    align_corners = False if interpolation in ["bilinear", "bicubic"] else None
vfdev's avatar
vfdev committed
551

552
    if antialias:
553
        if interpolation == "bilinear":
554
            img = torch.ops.torchvision._interpolate_bilinear2d_aa(img, [new_h, new_w], align_corners=False)
555
        elif interpolation == "bicubic":
556
            img = torch.ops.torchvision._interpolate_bicubic2d_aa(img, [new_h, new_w], align_corners=False)
557
558
    else:
        img = interpolate(img, size=[new_h, new_w], mode=interpolation, align_corners=align_corners)
vfdev's avatar
vfdev committed
559

560
    if interpolation == "bicubic" and out_dtype == torch.uint8:
vfdev's avatar
vfdev committed
561
        img = img.clamp(min=0, max=255)
vfdev's avatar
vfdev committed
562

vfdev's avatar
vfdev committed
563
    img = _cast_squeeze_out(img, need_cast=need_cast, need_squeeze=need_squeeze, out_dtype=out_dtype)
vfdev's avatar
vfdev committed
564
565

    return img
vfdev's avatar
vfdev committed
566
567


vfdev's avatar
vfdev committed
568
def _assert_grid_transform_inputs(
569
570
571
572
573
574
575
    img: Tensor,
    matrix: Optional[List[float]],
    interpolation: str,
    fill: Optional[List[float]],
    supported_interpolation_modes: List[str],
    coeffs: Optional[List[float]] = None,
) -> None:
576
577
578
579
580

    if not (isinstance(img, torch.Tensor)):
        raise TypeError("Input img should be Tensor")

    _assert_image_tensor(img)
vfdev's avatar
vfdev committed
581

582
    if matrix is not None and not isinstance(matrix, list):
583
        raise TypeError("Argument matrix should be a list")
vfdev's avatar
vfdev committed
584

585
    if matrix is not None and len(matrix) != 6:
vfdev's avatar
vfdev committed
586
        raise ValueError("Argument matrix should have 6 float values")
vfdev's avatar
vfdev committed
587

588
589
590
    if coeffs is not None and len(coeffs) != 8:
        raise ValueError("Argument coeffs should have 8 float values")

591
592
593
594
    if fill is not None and not isinstance(fill, (int, float, tuple, list)):
        warnings.warn("Argument fill should be either int, float, tuple or list")

    # Check fill
595
    num_channels = get_image_num_channels(img)
596
597
598
599
    if isinstance(fill, (tuple, list)) and (len(fill) > 1 and len(fill) != num_channels):
        msg = ("The number of elements in 'fill' cannot broadcast to match the number of "
               "channels of the image ({} != {})")
        raise ValueError(msg.format(len(fill), num_channels))
vfdev's avatar
vfdev committed
600

601
602
    if interpolation not in supported_interpolation_modes:
        raise ValueError("Interpolation mode '{}' is unsupported with Tensor input".format(interpolation))
vfdev's avatar
vfdev committed
603
604


vfdev's avatar
vfdev committed
605
def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor, bool, bool, torch.dtype]:
vfdev's avatar
vfdev committed
606
    need_squeeze = False
607
    # make image NCHW
vfdev's avatar
vfdev committed
608
609
610
611
612
613
    if img.ndim < 4:
        img = img.unsqueeze(dim=0)
        need_squeeze = True

    out_dtype = img.dtype
    need_cast = False
vfdev's avatar
vfdev committed
614
    if out_dtype not in req_dtypes:
vfdev's avatar
vfdev committed
615
        need_cast = True
vfdev's avatar
vfdev committed
616
        req_dtype = req_dtypes[0]
617
618
        img = img.to(req_dtype)
    return img, need_cast, need_squeeze, out_dtype
vfdev's avatar
vfdev committed
619
620


621
def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype) -> Tensor:
vfdev's avatar
vfdev committed
622
623
624
625
    if need_squeeze:
        img = img.squeeze(dim=0)

    if need_cast:
vfdev's avatar
vfdev committed
626
627
628
629
        if out_dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64):
            # it is better to round before cast
            img = torch.round(img)
        img = img.to(out_dtype)
vfdev's avatar
vfdev committed
630
631

    return img
vfdev's avatar
vfdev committed
632
633


634
def _apply_grid_transform(img: Tensor, grid: Tensor, mode: str, fill: Optional[List[float]]) -> Tensor:
635

vfdev's avatar
vfdev committed
636
    img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [grid.dtype, ])
637
638
639
640

    if img.shape[0] > 1:
        # Apply same grid to a batch of images
        grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3])
641
642
643
644
645
646

    # Append a dummy mask for customized fill colors, should be faster than grid_sample() twice
    if fill is not None:
        dummy = torch.ones((img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype, device=img.device)
        img = torch.cat((img, dummy), dim=1)

647
648
    img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False)

649
650
651
652
653
654
655
656
657
658
659
660
661
    # Fill with required color
    if fill is not None:
        mask = img[:, -1:, :, :]  # N * 1 * H * W
        img = img[:, :-1, :, :]  # N * C * H * W
        mask = mask.expand_as(img)
        len_fill = len(fill) if isinstance(fill, (tuple, list)) else 1
        fill_img = torch.tensor(fill, dtype=img.dtype, device=img.device).view(1, len_fill, 1, 1).expand_as(img)
        if mode == 'nearest':
            mask = mask < 0.5
            img[mask] = fill_img[mask]
        else:  # 'bilinear'
            img = img * mask + (1.0 - mask) * fill_img

662
663
664
665
    img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
    return img


666
667
668
669
670
671
672
673
674
675
def _gen_affine_grid(
        theta: Tensor, w: int, h: int, ow: int, oh: int,
) -> Tensor:
    # https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/
    # AffineGridGenerator.cpp#L18
    # Difference with AffineGridGenerator is that:
    # 1) we normalize grid values after applying theta
    # 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate

    d = 0.5
676
    base_grid = torch.empty(1, oh, ow, 3, dtype=theta.dtype, device=theta.device)
677
678
679
680
    x_grid = torch.linspace(-ow * 0.5 + d, ow * 0.5 + d - 1, steps=ow, device=theta.device)
    base_grid[..., 0].copy_(x_grid)
    y_grid = torch.linspace(-oh * 0.5 + d, oh * 0.5 + d - 1, steps=oh, device=theta.device).unsqueeze_(-1)
    base_grid[..., 1].copy_(y_grid)
681
682
    base_grid[..., 2].fill_(1)

683
684
    rescaled_theta = theta.transpose(1, 2) / torch.tensor([0.5 * w, 0.5 * h], dtype=theta.dtype, device=theta.device)
    output_grid = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta)
685
686
687
    return output_grid.view(1, oh, ow, 2)


vfdev's avatar
vfdev committed
688
def affine(
689
        img: Tensor, matrix: List[float], interpolation: str = "nearest", fill: Optional[List[float]] = None
vfdev's avatar
vfdev committed
690
) -> Tensor:
691
    _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"])
vfdev's avatar
vfdev committed
692

693
694
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
vfdev's avatar
vfdev committed
695
    shape = img.shape
696
    # grid will be generated on the same device as theta and img
697
    grid = _gen_affine_grid(theta, w=shape[-1], h=shape[-2], ow=shape[-1], oh=shape[-2])
698
    return _apply_grid_transform(img, grid, interpolation, fill=fill)
vfdev's avatar
vfdev committed
699
700


701
def _compute_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]:
vfdev's avatar
vfdev committed
702

703
704
705
    # Inspired of PIL implementation:
    # https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054

vfdev's avatar
vfdev committed
706
707
    # pts are Top-Left, Top-Right, Bottom-Left, Bottom-Right points.
    pts = torch.tensor([
708
709
710
711
        [-0.5 * w, -0.5 * h, 1.0],
        [-0.5 * w, 0.5 * h, 1.0],
        [0.5 * w, 0.5 * h, 1.0],
        [0.5 * w, -0.5 * h, 1.0],
vfdev's avatar
vfdev committed
712
    ])
713
    theta = torch.tensor(matrix, dtype=torch.float).reshape(1, 2, 3)
714
    new_pts = pts.view(1, 4, 3).bmm(theta.transpose(1, 2)).view(4, 2)
vfdev's avatar
vfdev committed
715
716
717
    min_vals, _ = new_pts.min(dim=0)
    max_vals, _ = new_pts.max(dim=0)

718
719
720
721
722
723
    # Truncate precision to 1e-4 to avoid ceil of Xe-15 to 1.0
    tol = 1e-4
    cmax = torch.ceil((max_vals / tol).trunc_() * tol)
    cmin = torch.floor((min_vals / tol).trunc_() * tol)
    size = cmax - cmin
    return int(size[0]), int(size[1])
vfdev's avatar
vfdev committed
724
725
726


def rotate(
727
    img: Tensor, matrix: List[float], interpolation: str = "nearest",
728
    expand: bool = False, fill: Optional[List[float]] = None
vfdev's avatar
vfdev committed
729
) -> Tensor:
730
    _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"])
731
    w, h = img.shape[-1], img.shape[-2]
732
    ow, oh = _compute_output_size(matrix, w, h) if expand else (w, h)
733
734
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3)
735
    # grid will be generated on the same device as theta and img
736
    grid = _gen_affine_grid(theta, w=w, h=h, ow=ow, oh=oh)
737
738

    return _apply_grid_transform(img, grid, interpolation, fill=fill)
739
740


741
def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device) -> Tensor:
742
743
744
745
746
747
748
749
750
751
    # https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/
    # src/libImaging/Geometry.c#L394

    #
    # x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1)
    # y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1)
    #
    theta1 = torch.tensor([[
        [coeffs[0], coeffs[1], coeffs[2]],
        [coeffs[3], coeffs[4], coeffs[5]]
752
    ]], dtype=dtype, device=device)
753
754
755
    theta2 = torch.tensor([[
        [coeffs[6], coeffs[7], 1.0],
        [coeffs[6], coeffs[7], 1.0]
756
    ]], dtype=dtype, device=device)
757
758

    d = 0.5
759
    base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device)
760
761
762
763
    x_grid = torch.linspace(d, ow * 1.0 + d - 1.0, steps=ow, device=device)
    base_grid[..., 0].copy_(x_grid)
    y_grid = torch.linspace(d, oh * 1.0 + d - 1.0, steps=oh, device=device).unsqueeze_(-1)
    base_grid[..., 1].copy_(y_grid)
764
765
    base_grid[..., 2].fill_(1)

766
    rescaled_theta1 = theta1.transpose(1, 2) / torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device)
767
    output_grid1 = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta1)
768
769
770
771
772
773
774
    output_grid2 = base_grid.view(1, oh * ow, 3).bmm(theta2.transpose(1, 2))

    output_grid = output_grid1 / output_grid2 - 1.0
    return output_grid.view(1, oh, ow, 2)


def perspective(
775
    img: Tensor, perspective_coeffs: List[float], interpolation: str = "bilinear", fill: Optional[List[float]] = None
776
) -> Tensor:
777
778
779
780
    if not (isinstance(img, torch.Tensor)):
        raise TypeError('Input img should be Tensor.')

    _assert_image_tensor(img)
781
782
783
784

    _assert_grid_transform_inputs(
        img,
        matrix=None,
785
786
787
        interpolation=interpolation,
        fill=fill,
        supported_interpolation_modes=["nearest", "bilinear"],
788
789
790
791
        coeffs=perspective_coeffs
    )

    ow, oh = img.shape[-1], img.shape[-2]
792
793
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=img.device)
794
    return _apply_grid_transform(img, grid, interpolation, fill=fill)
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816


def _get_gaussian_kernel1d(kernel_size: int, sigma: float) -> Tensor:
    ksize_half = (kernel_size - 1) * 0.5

    x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size)
    pdf = torch.exp(-0.5 * (x / sigma).pow(2))
    kernel1d = pdf / pdf.sum()

    return kernel1d


def _get_gaussian_kernel2d(
        kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device
) -> Tensor:
    kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0]).to(device, dtype=dtype)
    kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1]).to(device, dtype=dtype)
    kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :])
    return kernel2d


def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor:
817
818
819
820
    if not (isinstance(img, torch.Tensor)):
        raise TypeError('img should be Tensor. Got {}'.format(type(img)))

    _assert_image_tensor(img)
821
822
823
824
825

    dtype = img.dtype if torch.is_floating_point(img) else torch.float32
    kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device)
    kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])

vfdev's avatar
vfdev committed
826
    img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype, ])
827
828
829
830
831
832
833
834

    # padding = (left, right, top, bottom)
    padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2]
    img = torch_pad(img, padding, mode="reflect")
    img = conv2d(img, kernel, groups=img.shape[-3])

    img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype)
    return img
835
836
837


def invert(img: Tensor) -> Tensor:
838
839

    _assert_image_tensor(img)
840
841
842
843
844
845
846
847
848
849
850

    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))

    _assert_channels(img, [1, 3])

    bound = torch.tensor(1 if img.is_floating_point() else 255, dtype=img.dtype, device=img.device)
    return bound - img


def posterize(img: Tensor, bits: int) -> Tensor:
851
852

    _assert_image_tensor(img)
853
854
855
856
857
858
859
860
861
862
863
864

    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))
    if img.dtype != torch.uint8:
        raise TypeError("Only torch.uint8 image tensors are supported, but found {}".format(img.dtype))

    _assert_channels(img, [1, 3])
    mask = -int(2**(8 - bits))  # JIT-friendly for: ~(2 ** (8 - bits) - 1)
    return img & mask


def solarize(img: Tensor, threshold: float) -> Tensor:
865
866

    _assert_image_tensor(img)
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898

    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))

    _assert_channels(img, [1, 3])

    inverted_img = invert(img)
    return torch.where(img >= threshold, inverted_img, img)


def _blurred_degenerate_image(img: Tensor) -> Tensor:
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32

    kernel = torch.ones((3, 3), dtype=dtype, device=img.device)
    kernel[1, 1] = 5.0
    kernel /= kernel.sum()
    kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1])

    result_tmp, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype, ])
    result_tmp = conv2d(result_tmp, kernel, groups=result_tmp.shape[-3])
    result_tmp = _cast_squeeze_out(result_tmp, need_cast, need_squeeze, out_dtype)

    result = img.clone()
    result[..., 1:-1, 1:-1] = result_tmp

    return result


def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor:
    if sharpness_factor < 0:
        raise ValueError('sharpness_factor ({}) is not non-negative.'.format(sharpness_factor))

899
    _assert_image_tensor(img)
900
901
902
903
904
905
906
907
908
909

    _assert_channels(img, [1, 3])

    if img.size(-1) <= 2 or img.size(-2) <= 2:
        return img

    return _blend(img, _blurred_degenerate_image(img), sharpness_factor)


def autocontrast(img: Tensor) -> Tensor:
910
911

    _assert_image_tensor(img)
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930

    if img.ndim < 3:
        raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim))

    _assert_channels(img, [1, 3])

    bound = 1.0 if img.is_floating_point() else 255.0
    dtype = img.dtype if torch.is_floating_point(img) else torch.float32

    minimum = img.amin(dim=(-2, -1), keepdim=True).to(dtype)
    maximum = img.amax(dim=(-2, -1), keepdim=True).to(dtype)
    eq_idxs = torch.where(minimum == maximum)[0]
    minimum[eq_idxs] = 0
    maximum[eq_idxs] = bound
    scale = bound / (maximum - minimum)

    return ((img - minimum) * scale).clamp(0, bound).to(img.dtype)


931
def _scale_channel(img_chan: Tensor) -> Tensor:
932
933
934
935
936
937
938
939
    # TODO: we should expect bincount to always be faster than histc, but this
    # isn't always the case. Once
    # https://github.com/pytorch/pytorch/issues/53194 is fixed, remove the if
    # block and only use bincount.
    if img_chan.is_cuda:
        hist = torch.histc(img_chan.to(torch.float32), bins=256, min=0, max=255)
    else:
        hist = torch.bincount(img_chan.view(-1), minlength=256)
940
941

    nonzero_hist = hist[hist != 0]
942
    step = torch.div(nonzero_hist[:-1].sum(), 255, rounding_mode='floor')
943
944
945
    if step == 0:
        return img_chan

946
947
948
    lut = torch.div(
        torch.cumsum(hist, 0) + torch.div(step, 2, rounding_mode='floor'),
        step, rounding_mode='floor')
949
950
951
952
953
954
955
956
957
958
    lut = torch.nn.functional.pad(lut, [1, 0])[:-1].clamp(0, 255)

    return lut[img_chan.to(torch.int64)].to(torch.uint8)


def _equalize_single_image(img: Tensor) -> Tensor:
    return torch.stack([_scale_channel(img[c]) for c in range(img.size(0))])


def equalize(img: Tensor) -> Tensor:
959
960

    _assert_image_tensor(img)
961
962
963
964
965
966
967
968
969
970
971
972

    if not (3 <= img.ndim <= 4):
        raise TypeError("Input image tensor should have 3 or 4 dimensions, but found {}".format(img.ndim))
    if img.dtype != torch.uint8:
        raise TypeError("Only torch.uint8 image tensors are supported, but found {}".format(img.dtype))

    _assert_channels(img, [1, 3])

    if img.ndim == 3:
        return _equalize_single_image(img)

    return torch.stack([_equalize_single_image(x) for x in img])