transforms.py 78.8 KB
Newer Older
1
import math
vfdev's avatar
vfdev committed
2
import numbers
3
import random
vfdev's avatar
vfdev committed
4
import warnings
vfdev's avatar
vfdev committed
5
from collections.abc import Sequence
6
from typing import Tuple, List, Optional
vfdev's avatar
vfdev committed
7
8
9
10

import torch
from torch import Tensor

11
12
13
14
15
try:
    import accimage
except ImportError:
    accimage = None

16
from ..utils import _log_api_usage_once
17
from . import functional as F
18
from .functional import InterpolationMode, _interpolation_modes_from_int
19

20

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
__all__ = [
    "Compose",
    "ToTensor",
    "PILToTensor",
    "ConvertImageDtype",
    "ToPILImage",
    "Normalize",
    "Resize",
    "CenterCrop",
    "Pad",
    "Lambda",
    "RandomApply",
    "RandomChoice",
    "RandomOrder",
    "RandomCrop",
    "RandomHorizontalFlip",
    "RandomVerticalFlip",
    "RandomResizedCrop",
    "FiveCrop",
    "TenCrop",
    "LinearTransformation",
    "ColorJitter",
    "RandomRotation",
    "RandomAffine",
    "Grayscale",
    "RandomGrayscale",
    "RandomPerspective",
    "RandomErasing",
    "GaussianBlur",
    "InterpolationMode",
    "RandomInvert",
    "RandomPosterize",
    "RandomSolarize",
    "RandomAdjustSharpness",
    "RandomAutocontrast",
    "RandomEqualize",
]
58

59

60
class Compose:
61
62
    """Composes several transforms together. This transform does not support torchscript.
    Please, see the note below.
63
64
65
66
67
68
69

    Args:
        transforms (list of ``Transform`` objects): list of transforms to compose.

    Example:
        >>> transforms.Compose([
        >>>     transforms.CenterCrop(10),
70
71
        >>>     transforms.PILToTensor(),
        >>>     transforms.ConvertImageDtype(torch.float),
72
        >>> ])
73
74
75
76
77
78
79
80
81
82
83
84
85

    .. note::
        In order to script the transformations, please use ``torch.nn.Sequential`` as below.

        >>> transforms = torch.nn.Sequential(
        >>>     transforms.CenterCrop(10),
        >>>     transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        >>> )
        >>> scripted_transforms = torch.jit.script(transforms)

        Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
        `lambda` functions or ``PIL.Image``.

86
87
88
    """

    def __init__(self, transforms):
89
90
        if not torch.jit.is_scripting() and not torch.jit.is_tracing():
            _log_api_usage_once(self)
91
92
93
94
95
96
97
        self.transforms = transforms

    def __call__(self, img):
        for t in self.transforms:
            img = t(img)
        return img

Joao Gomes's avatar
Joao Gomes committed
98
    def __repr__(self) -> str:
99
        format_string = self.__class__.__name__ + "("
100
        for t in self.transforms:
101
            format_string += "\n"
102
            format_string += f"    {t}"
103
        format_string += "\n)"
104
105
        return format_string

106

107
class ToTensor:
108
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript.
109
110

    Converts a PIL Image or numpy.ndarray (H x W x C) in the range
surgan12's avatar
surgan12 committed
111
112
113
114
115
    [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
    if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
    or if the numpy.ndarray has dtype = np.uint8

    In the other cases, tensors are returned without scaling.
116
117
118
119
120

    .. note::
        Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
        transforming target image masks. See the `references`_ for implementing the transforms for image masks.

121
    .. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
122
123
    """

124
125
126
    def __init__(self) -> None:
        _log_api_usage_once(self)

127
128
129
130
131
132
133
134
135
136
    def __call__(self, pic):
        """
        Args:
            pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

        Returns:
            Tensor: Converted image.
        """
        return F.to_tensor(pic)

Joao Gomes's avatar
Joao Gomes committed
137
138
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}()"
139

140

141
class PILToTensor:
142
    """Convert a ``PIL Image`` to a tensor of the same type. This transform does not support torchscript.
143

vfdev's avatar
vfdev committed
144
    Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
145
146
    """

147
148
149
    def __init__(self) -> None:
        _log_api_usage_once(self)

150
151
    def __call__(self, pic):
        """
152
153
154
155
        .. note::

            A deep copy of the underlying array is performed.

156
157
158
159
160
161
162
163
        Args:
            pic (PIL Image): Image to be converted to tensor.

        Returns:
            Tensor: Converted image.
        """
        return F.pil_to_tensor(pic)

Joao Gomes's avatar
Joao Gomes committed
164
165
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}()"
166
167


168
class ConvertImageDtype(torch.nn.Module):
169
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly
170
    This function does not support PIL Image.
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187

    Args:
        dtype (torch.dtype): Desired data type of the output

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """

    def __init__(self, dtype: torch.dtype) -> None:
188
        super().__init__()
189
        _log_api_usage_once(self)
190
191
        self.dtype = dtype

vfdev's avatar
vfdev committed
192
    def forward(self, image):
193
194
195
        return F.convert_image_dtype(image, self.dtype)


196
class ToPILImage:
197
    """Convert a tensor or an ndarray to PIL Image. This transform does not support torchscript.
198
199
200
201
202
203
204

    Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
    H x W x C to a PIL Image while preserving the value range.

    Args:
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
            If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
vfdev's avatar
vfdev committed
205
206
207
208
209
            - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
            - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
            - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
            - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
            ``short``).
210

csukuangfj's avatar
csukuangfj committed
211
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
212
    """
213

214
    def __init__(self, mode=None):
215
        _log_api_usage_once(self)
216
217
218
219
220
221
222
223
224
225
226
227
228
        self.mode = mode

    def __call__(self, pic):
        """
        Args:
            pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.

        Returns:
            PIL Image: Image converted to PIL Image.

        """
        return F.to_pil_image(pic, self.mode)

Joao Gomes's avatar
Joao Gomes committed
229
    def __repr__(self) -> str:
230
        format_string = self.__class__.__name__ + "("
231
        if self.mode is not None:
232
            format_string += f"mode={self.mode}"
233
        format_string += ")"
234
        return format_string
235

236

237
class Normalize(torch.nn.Module):
Fang Gao's avatar
Fang Gao committed
238
    """Normalize a tensor image with mean and standard deviation.
239
    This transform does not support PIL Image.
240
241
242
    Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
    channels, this transform will normalize each channel of the input
    ``torch.*Tensor`` i.e.,
abdjava's avatar
abdjava committed
243
    ``output[channel] = (input[channel] - mean[channel]) / std[channel]``
244

245
    .. note::
246
        This transform acts out of place, i.e., it does not mutate the input tensor.
247

248
249
250
    Args:
        mean (sequence): Sequence of means for each channel.
        std (sequence): Sequence of standard deviations for each channel.
251
252
        inplace(bool,optional): Bool to make this operation in-place.

253
254
    """

surgan12's avatar
surgan12 committed
255
    def __init__(self, mean, std, inplace=False):
256
        super().__init__()
257
        _log_api_usage_once(self)
258
259
        self.mean = mean
        self.std = std
surgan12's avatar
surgan12 committed
260
        self.inplace = inplace
261

262
    def forward(self, tensor: Tensor) -> Tensor:
263
264
        """
        Args:
vfdev's avatar
vfdev committed
265
            tensor (Tensor): Tensor image to be normalized.
266
267
268
269

        Returns:
            Tensor: Normalized Tensor image.
        """
surgan12's avatar
surgan12 committed
270
        return F.normalize(tensor, self.mean, self.std, self.inplace)
271

Joao Gomes's avatar
Joao Gomes committed
272
273
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(mean={self.mean}, std={self.std})"
274

275

vfdev's avatar
vfdev committed
276
277
class Resize(torch.nn.Module):
    """Resize the input image to the given size.
278
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
279
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
280

281
282
283
284
    .. warning::
        The output image might be different depending on its type: when downsampling, the interpolation of PIL images
        and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences
        in the performance of a network. Therefore, it is preferable to train and serve a model with the same input
285
286
        types. See also below the ``antialias`` parameter, which can help making the output of PIL images and tensors
        closer.
287

288
289
290
291
292
    Args:
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), output size will be matched to this. If size is an int,
            smaller edge of the image will be matched to this number.
            i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
293
            (size * height / width, size).
294
295
296

            .. note::
                In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
297
298
299
300
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
            ``InterpolationMode.BICUBIC`` are supported.
301
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
302
303
304
305
        max_size (int, optional): The maximum allowed for the longer edge of
            the resized image: if the longer edge of the image is greater
            than ``max_size`` after being resized according to ``size``, then
            the image is resized again so that the longer edge is equal to
306
            ``max_size``. As a result, ``size`` might be overruled, i.e the
307
308
309
            smaller edge may be shorter than ``size``. This is only supported
            if ``size`` is an int (or a sequence of length 1 in torchscript
            mode).
310
        antialias (bool, optional): antialias flag. If ``img`` is PIL Image, the flag is ignored and anti-alias
311
312
313
            is always used. If ``img`` is Tensor, the flag is False by default and can be set to True for
            ``InterpolationMode.BILINEAR`` only mode. This can help making the output for PIL images and tensors
            closer.
314
315
316

            .. warning::
                There is no autodiff support for ``antialias=True`` option with input ``img`` as Tensor.
317

318
319
    """

320
    def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None, antialias=None):
vfdev's avatar
vfdev committed
321
        super().__init__()
322
        _log_api_usage_once(self)
323
        if not isinstance(size, (int, Sequence)):
324
            raise TypeError(f"Size should be int or sequence. Got {type(size)}")
325
326
327
        if isinstance(size, Sequence) and len(size) not in (1, 2):
            raise ValueError("If size is a sequence, it should have 1 or 2 values")
        self.size = size
328
        self.max_size = max_size
329
330
331
332

        # Backward compatibility with integer value
        if isinstance(interpolation, int):
            warnings.warn(
333
334
                "Argument interpolation should be of type InterpolationMode instead of int. "
                "Please, use InterpolationMode enum."
335
336
337
            )
            interpolation = _interpolation_modes_from_int(interpolation)

338
        self.interpolation = interpolation
339
        self.antialias = antialias
340

vfdev's avatar
vfdev committed
341
    def forward(self, img):
342
343
        """
        Args:
vfdev's avatar
vfdev committed
344
            img (PIL Image or Tensor): Image to be scaled.
345
346

        Returns:
vfdev's avatar
vfdev committed
347
            PIL Image or Tensor: Rescaled image.
348
        """
349
        return F.resize(img, self.size, self.interpolation, self.max_size, self.antialias)
350

Joao Gomes's avatar
Joao Gomes committed
351
    def __repr__(self) -> str:
352
        detail = f"(size={self.size}, interpolation={self.interpolation.value}, max_size={self.max_size}, antialias={self.antialias})"
Joao Gomes's avatar
Joao Gomes committed
353
        return f"{self.__class__.__name__}{detail}"
354

355

vfdev's avatar
vfdev committed
356
357
class CenterCrop(torch.nn.Module):
    """Crops the given image at the center.
358
    If the image is torch Tensor, it is expected
359
360
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
    If image size is smaller than output size along any edge, image is padded with 0 and then center cropped.
361
362
363
364

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
365
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
366
367
368
    """

    def __init__(self, size):
vfdev's avatar
vfdev committed
369
        super().__init__()
370
        _log_api_usage_once(self)
371
        self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
372

vfdev's avatar
vfdev committed
373
    def forward(self, img):
374
375
        """
        Args:
vfdev's avatar
vfdev committed
376
            img (PIL Image or Tensor): Image to be cropped.
377
378

        Returns:
vfdev's avatar
vfdev committed
379
            PIL Image or Tensor: Cropped image.
380
381
382
        """
        return F.center_crop(img, self.size)

Joao Gomes's avatar
Joao Gomes committed
383
384
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(size={self.size})"
385

386

387
388
class Pad(torch.nn.Module):
    """Pad the given image on all sides with the given "pad" value.
389
    If the image is torch Tensor, it is expected
390
391
392
    to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric,
    at most 3 leading dimensions for mode edge,
    and an arbitrary number of leading dimensions for mode constant
393
394

    Args:
395
396
397
        padding (int or sequence): Padding on each border. If a single int is provided this
            is used to pad all borders. If sequence of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a sequence of length 4 is provided
398
            this is the padding for the left, top, right and bottom borders respectively.
399
400
401
402

            .. note::
                In torchscript mode padding as single int is not supported, use a sequence of
                length 1: ``[padding, ]``.
403
        fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
404
            length 3, it is used to fill R, G, B channels respectively.
405
406
407
            This value is only used when the padding_mode is constant.
            Only number is supported for torch Tensor.
            Only int or str or tuple value is supported for PIL Image.
408
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
409
            Default is constant.
410
411
412

            - constant: pads with a constant value, this value is specified with fill

413
414
            - edge: pads with the last value at the edge of the image.
              If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
415

416
417
418
            - reflect: pads with reflection of image without repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
              will result in [3, 2, 1, 2, 3, 4, 3, 2]
419

420
421
422
            - symmetric: pads with reflection of image repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
              will result in [2, 1, 1, 2, 3, 4, 4, 3]
423
424
    """

425
426
    def __init__(self, padding, fill=0, padding_mode="constant"):
        super().__init__()
427
        _log_api_usage_once(self)
428
429
430
431
432
433
434
435
436
437
        if not isinstance(padding, (numbers.Number, tuple, list)):
            raise TypeError("Got inappropriate padding arg")

        if not isinstance(fill, (numbers.Number, str, tuple)):
            raise TypeError("Got inappropriate fill arg")

        if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
            raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")

        if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:
438
            raise ValueError(
439
                f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple"
440
            )
441
442
443

        self.padding = padding
        self.fill = fill
444
        self.padding_mode = padding_mode
445

446
    def forward(self, img):
447
448
        """
        Args:
449
            img (PIL Image or Tensor): Image to be padded.
450
451

        Returns:
452
            PIL Image or Tensor: Padded image.
453
        """
454
        return F.pad(img, self.padding, self.fill, self.padding_mode)
455

Joao Gomes's avatar
Joao Gomes committed
456
457
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(padding={self.padding}, fill={self.fill}, padding_mode={self.padding_mode})"
458

459

460
class Lambda:
461
    """Apply a user-defined lambda as a transform. This transform does not support torchscript.
462
463
464
465
466
467

    Args:
        lambd (function): Lambda/function to be used for transform.
    """

    def __init__(self, lambd):
468
        _log_api_usage_once(self)
469
        if not callable(lambd):
470
            raise TypeError(f"Argument lambd should be callable, got {repr(type(lambd).__name__)}")
471
472
473
474
475
        self.lambd = lambd

    def __call__(self, img):
        return self.lambd(img)

Joao Gomes's avatar
Joao Gomes committed
476
477
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}()"
478

479

480
class RandomTransforms:
481
482
483
    """Base class for a list of transformations with randomness

    Args:
484
        transforms (sequence): list of transformations
485
486
487
    """

    def __init__(self, transforms):
488
        _log_api_usage_once(self)
489
490
        if not isinstance(transforms, Sequence):
            raise TypeError("Argument transforms should be a sequence")
491
492
493
494
495
        self.transforms = transforms

    def __call__(self, *args, **kwargs):
        raise NotImplementedError()

Joao Gomes's avatar
Joao Gomes committed
496
    def __repr__(self) -> str:
497
        format_string = self.__class__.__name__ + "("
498
        for t in self.transforms:
499
            format_string += "\n"
500
            format_string += f"    {t}"
501
        format_string += "\n)"
502
503
504
        return format_string


505
class RandomApply(torch.nn.Module):
506
    """Apply randomly a list of transformations with a given probability.
507
508
509
510
511
512
513
514
515
516
517
518

    .. note::
        In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of
        transforms as shown below:

        >>> transforms = transforms.RandomApply(torch.nn.ModuleList([
        >>>     transforms.ColorJitter(),
        >>> ]), p=0.3)
        >>> scripted_transforms = torch.jit.script(transforms)

        Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require
        `lambda` functions or ``PIL.Image``.
519
520

    Args:
521
        transforms (sequence or torch.nn.Module): list of transformations
522
523
524
525
        p (float): probability
    """

    def __init__(self, transforms, p=0.5):
526
        super().__init__()
527
        _log_api_usage_once(self)
528
        self.transforms = transforms
529
530
        self.p = p

531
532
    def forward(self, img):
        if self.p < torch.rand(1):
533
534
535
536
537
            return img
        for t in self.transforms:
            img = t(img)
        return img

Joao Gomes's avatar
Joao Gomes committed
538
    def __repr__(self) -> str:
539
        format_string = self.__class__.__name__ + "("
540
        format_string += f"\n    p={self.p}"
541
        for t in self.transforms:
542
            format_string += "\n"
543
            format_string += f"    {t}"
544
        format_string += "\n)"
545
546
547
548
        return format_string


class RandomOrder(RandomTransforms):
549
550
    """Apply a list of transformations in a random order. This transform does not support torchscript."""

551
552
553
554
555
556
557
558
559
    def __call__(self, img):
        order = list(range(len(self.transforms)))
        random.shuffle(order)
        for i in order:
            img = self.transforms[i](img)
        return img


class RandomChoice(RandomTransforms):
560
561
    """Apply single transformation randomly picked from a list. This transform does not support torchscript."""

562
563
564
    def __init__(self, transforms, p=None):
        super().__init__(transforms)
        if p is not None and not isinstance(p, Sequence):
565
            raise TypeError("Argument p should be a sequence")
566
567
568
569
570
571
        self.p = p

    def __call__(self, *args):
        t = random.choices(self.transforms, weights=self.p)[0]
        return t(*args)

Joao Gomes's avatar
Joao Gomes committed
572
573
    def __repr__(self) -> str:
        return f"{super().__repr__()}(p={self.p})"
574
575


vfdev's avatar
vfdev committed
576
577
class RandomCrop(torch.nn.Module):
    """Crop the given image at a random location.
578
    If the image is torch Tensor, it is expected
579
580
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions,
    but if non-constant padding is used, the input is expected to have at most 2 leading dimensions
581
582
583
584

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
585
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
586
        padding (int or sequence, optional): Optional padding on each border
vfdev's avatar
vfdev committed
587
            of the image. Default is None. If a single int is provided this
588
589
            is used to pad all borders. If sequence of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a sequence of length 4 is provided
vfdev's avatar
vfdev committed
590
            this is the padding for the left, top, right and bottom borders respectively.
591
592
593
594

            .. note::
                In torchscript mode padding as single int is not supported, use a sequence of
                length 1: ``[padding, ]``.
595
        pad_if_needed (boolean): It will pad the image if smaller than the
ekka's avatar
ekka committed
596
            desired size to avoid raising an exception. Since cropping is done
597
            after padding, the padding seems to be done at a random offset.
598
        fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
599
            length 3, it is used to fill R, G, B channels respectively.
600
601
602
            This value is only used when the padding_mode is constant.
            Only number is supported for torch Tensor.
            Only int or str or tuple value is supported for PIL Image.
603
604
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
            Default is constant.
605

606
            - constant: pads with a constant value, this value is specified with fill
607

608
609
            - edge: pads with the last value at the edge of the image.
              If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2
610

611
612
613
            - reflect: pads with reflection of image without repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
              will result in [3, 2, 1, 2, 3, 4, 3, 2]
614

615
616
617
            - symmetric: pads with reflection of image repeating the last value on the edge.
              For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
              will result in [2, 1, 1, 2, 3, 4, 4, 3]
618
619
620
    """

    @staticmethod
vfdev's avatar
vfdev committed
621
    def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
622
623
624
        """Get parameters for ``crop`` for a random crop.

        Args:
vfdev's avatar
vfdev committed
625
            img (PIL Image or Tensor): Image to be cropped.
626
627
628
629
630
            output_size (tuple): Expected output size of the crop.

        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
        """
631
        _, h, w = F.get_dimensions(img)
632
        th, tw = output_size
vfdev's avatar
vfdev committed
633
634

        if h + 1 < th or w + 1 < tw:
635
            raise ValueError(f"Required crop size {(th, tw)} is larger then input image size {(h, w)}")
vfdev's avatar
vfdev committed
636

637
638
639
        if w == tw and h == th:
            return 0, 0, h, w

640
641
        i = torch.randint(0, h - th + 1, size=(1,)).item()
        j = torch.randint(0, w - tw + 1, size=(1,)).item()
642
643
        return i, j, th, tw

vfdev's avatar
vfdev committed
644
645
    def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"):
        super().__init__()
646
        _log_api_usage_once(self)
vfdev's avatar
vfdev committed
647

648
        self.size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
649

vfdev's avatar
vfdev committed
650
651
652
653
654
655
        self.padding = padding
        self.pad_if_needed = pad_if_needed
        self.fill = fill
        self.padding_mode = padding_mode

    def forward(self, img):
656
657
        """
        Args:
vfdev's avatar
vfdev committed
658
            img (PIL Image or Tensor): Image to be cropped.
659
660

        Returns:
vfdev's avatar
vfdev committed
661
            PIL Image or Tensor: Cropped image.
662
        """
663
664
        if self.padding is not None:
            img = F.pad(img, self.padding, self.fill, self.padding_mode)
665

666
        _, height, width = F.get_dimensions(img)
667
        # pad the width if needed
vfdev's avatar
vfdev committed
668
669
670
        if self.pad_if_needed and width < self.size[1]:
            padding = [self.size[1] - width, 0]
            img = F.pad(img, padding, self.fill, self.padding_mode)
671
        # pad the height if needed
vfdev's avatar
vfdev committed
672
673
674
        if self.pad_if_needed and height < self.size[0]:
            padding = [0, self.size[0] - height]
            img = F.pad(img, padding, self.fill, self.padding_mode)
675

676
677
678
679
        i, j, h, w = self.get_params(img, self.size)

        return F.crop(img, i, j, h, w)

Joao Gomes's avatar
Joao Gomes committed
680
681
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(size={self.size}, padding={self.padding})"
682

683

684
685
class RandomHorizontalFlip(torch.nn.Module):
    """Horizontally flip the given image randomly with a given probability.
686
    If the image is torch Tensor, it is expected
687
688
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
689
690
691
692
693
694

    Args:
        p (float): probability of the image being flipped. Default value is 0.5
    """

    def __init__(self, p=0.5):
695
        super().__init__()
696
        _log_api_usage_once(self)
697
        self.p = p
698

699
    def forward(self, img):
700
701
        """
        Args:
702
            img (PIL Image or Tensor): Image to be flipped.
703
704

        Returns:
705
            PIL Image or Tensor: Randomly flipped image.
706
        """
707
        if torch.rand(1) < self.p:
708
709
710
            return F.hflip(img)
        return img

Joao Gomes's avatar
Joao Gomes committed
711
712
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(p={self.p})"
713

714

715
class RandomVerticalFlip(torch.nn.Module):
vfdev's avatar
vfdev committed
716
    """Vertically flip the given image randomly with a given probability.
717
    If the image is torch Tensor, it is expected
718
719
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
720
721
722
723
724
725

    Args:
        p (float): probability of the image being flipped. Default value is 0.5
    """

    def __init__(self, p=0.5):
726
        super().__init__()
727
        _log_api_usage_once(self)
728
        self.p = p
729

730
    def forward(self, img):
731
732
        """
        Args:
733
            img (PIL Image or Tensor): Image to be flipped.
734
735

        Returns:
736
            PIL Image or Tensor: Randomly flipped image.
737
        """
738
        if torch.rand(1) < self.p:
739
740
741
            return F.vflip(img)
        return img

Joao Gomes's avatar
Joao Gomes committed
742
743
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(p={self.p})"
744

745

746
747
class RandomPerspective(torch.nn.Module):
    """Performs a random perspective transformation of the given image with a given probability.
748
    If the image is torch Tensor, it is expected
749
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
750
751

    Args:
752
753
754
        distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.
            Default is 0.5.
        p (float): probability of the image being transformed. Default is 0.5.
755
756
757
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
758
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
759
760
        fill (sequence or number): Pixel fill value for the area outside the transformed
            image. Default is ``0``. If given a number, the value is used for all bands respectively.
761
762
    """

763
    def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0):
764
        super().__init__()
765
        _log_api_usage_once(self)
766
        self.p = p
767
768
769
770

        # Backward compatibility with integer value
        if isinstance(interpolation, int):
            warnings.warn(
771
772
                "Argument interpolation should be of type InterpolationMode instead of int. "
                "Please, use InterpolationMode enum."
773
774
775
            )
            interpolation = _interpolation_modes_from_int(interpolation)

776
777
        self.interpolation = interpolation
        self.distortion_scale = distortion_scale
778
779
780
781
782
783

        if fill is None:
            fill = 0
        elif not isinstance(fill, (Sequence, numbers.Number)):
            raise TypeError("Fill should be either a sequence or a number.")

784
        self.fill = fill
785

786
    def forward(self, img):
787
788
        """
        Args:
789
            img (PIL Image or Tensor): Image to be Perspectively transformed.
790
791

        Returns:
792
            PIL Image or Tensor: Randomly transformed image.
793
        """
794
795

        fill = self.fill
796
        channels, height, width = F.get_dimensions(img)
797
798
        if isinstance(img, Tensor):
            if isinstance(fill, (int, float)):
799
                fill = [float(fill)] * channels
800
801
802
            else:
                fill = [float(f) for f in fill]

803
        if torch.rand(1) < self.p:
804
            startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
805
            return F.perspective(img, startpoints, endpoints, self.interpolation, fill)
806
807
808
        return img

    @staticmethod
809
    def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]:
810
811
812
        """Get parameters for ``perspective`` for a random perspective transform.

        Args:
813
814
815
            width (int): width of the image.
            height (int): height of the image.
            distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1.
816
817

        Returns:
818
            List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
819
820
            List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
        """
821
822
823
        half_height = height // 2
        half_width = width // 2
        topleft = [
824
825
            int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1,)).item()),
            int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1,)).item()),
826
827
        ]
        topright = [
828
829
            int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1,)).item()),
            int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1,)).item()),
830
831
        ]
        botright = [
832
833
            int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1,)).item()),
            int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1,)).item()),
834
835
        ]
        botleft = [
836
837
            int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1,)).item()),
            int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1,)).item()),
838
839
        ]
        startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]]
840
841
842
        endpoints = [topleft, topright, botright, botleft]
        return startpoints, endpoints

Joao Gomes's avatar
Joao Gomes committed
843
844
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(p={self.p})"
845
846


847
class RandomResizedCrop(torch.nn.Module):
848
849
    """Crop a random portion of image and resize it to a given size.

850
    If the image is torch Tensor, it is expected
851
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
852

853
854
855
    A crop of the original image is made: the crop has a random area (H * W)
    and a random aspect ratio. This crop is finally resized to the given
    size. This is popularly used to train the Inception networks.
856
857

    Args:
858
        size (int or sequence): expected output size of the crop, for each edge. If size is an
859
            int instead of sequence like (h, w), a square output size ``(size, size)`` is
860
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
861
862
863

            .. note::
                In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``.
Nicolas Hug's avatar
Nicolas Hug committed
864
865
        scale (tuple of float): Specifies the lower and upper bounds for the random area of the crop,
            before resizing. The scale is defined with respect to the area of the original image.
866
867
        ratio (tuple of float): lower and upper bounds for the random aspect ratio of the crop, before
            resizing.
868
869
870
871
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and
            ``InterpolationMode.BICUBIC`` are supported.
872
873
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.

874
875
    """

876
    def __init__(self, size, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation=InterpolationMode.BILINEAR):
877
        super().__init__()
878
        _log_api_usage_once(self)
879
        self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
880

881
        if not isinstance(scale, Sequence):
882
            raise TypeError("Scale should be a sequence")
883
        if not isinstance(ratio, Sequence):
884
            raise TypeError("Ratio should be a sequence")
885
        if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
886
            warnings.warn("Scale and ratio should be of kind (min, max)")
887

888
889
890
        # Backward compatibility with integer value
        if isinstance(interpolation, int):
            warnings.warn(
891
892
                "Argument interpolation should be of type InterpolationMode instead of int. "
                "Please, use InterpolationMode enum."
893
894
895
            )
            interpolation = _interpolation_modes_from_int(interpolation)

896
        self.interpolation = interpolation
897
898
        self.scale = scale
        self.ratio = ratio
899
900

    @staticmethod
901
    def get_params(img: Tensor, scale: List[float], ratio: List[float]) -> Tuple[int, int, int, int]:
902
903
904
        """Get parameters for ``crop`` for a random sized crop.

        Args:
905
            img (PIL Image or Tensor): Input image.
906
907
            scale (list): range of scale of the origin size cropped
            ratio (list): range of aspect ratio of the origin aspect ratio cropped
908
909
910

        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for a random
911
            sized crop.
912
        """
913
        _, height, width = F.get_dimensions(img)
Zhicheng Yan's avatar
Zhicheng Yan committed
914
        area = height * width
915

916
        log_ratio = torch.log(torch.tensor(ratio))
917
        for _ in range(10):
918
            target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
919
            aspect_ratio = torch.exp(torch.empty(1).uniform_(log_ratio[0], log_ratio[1])).item()
920
921
922
923

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

Zhicheng Yan's avatar
Zhicheng Yan committed
924
            if 0 < w <= width and 0 < h <= height:
925
926
                i = torch.randint(0, height - h + 1, size=(1,)).item()
                j = torch.randint(0, width - w + 1, size=(1,)).item()
927
928
                return i, j, h, w

929
        # Fallback to central crop
Zhicheng Yan's avatar
Zhicheng Yan committed
930
        in_ratio = float(width) / float(height)
931
        if in_ratio < min(ratio):
Zhicheng Yan's avatar
Zhicheng Yan committed
932
            w = width
933
            h = int(round(w / min(ratio)))
934
        elif in_ratio > max(ratio):
Zhicheng Yan's avatar
Zhicheng Yan committed
935
            h = height
936
            w = int(round(h * max(ratio)))
937
        else:  # whole image
Zhicheng Yan's avatar
Zhicheng Yan committed
938
939
940
941
            w = width
            h = height
        i = (height - h) // 2
        j = (width - w) // 2
942
        return i, j, h, w
943

944
    def forward(self, img):
945
946
        """
        Args:
947
            img (PIL Image or Tensor): Image to be cropped and resized.
948
949

        Returns:
950
            PIL Image or Tensor: Randomly cropped and resized image.
951
        """
952
        i, j, h, w = self.get_params(img, self.scale, self.ratio)
953
954
        return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)

Joao Gomes's avatar
Joao Gomes committed
955
    def __repr__(self) -> str:
956
        interpolate_str = self.interpolation.value
957
958
959
960
        format_string = self.__class__.__name__ + f"(size={self.size}"
        format_string += f", scale={tuple(round(s, 4) for s in self.scale)}"
        format_string += f", ratio={tuple(round(r, 4) for r in self.ratio)}"
        format_string += f", interpolation={interpolate_str})"
961
        return format_string
962

963

vfdev's avatar
vfdev committed
964
965
class FiveCrop(torch.nn.Module):
    """Crop the given image into four corners and the central crop.
966
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
967
968
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
969
970
971
972
973
974
975
976
977

    .. Note::
         This transform returns a tuple of images and there may be a mismatch in the number of
         inputs and targets your Dataset returns. See below for an example of how to deal with
         this.

    Args:
         size (sequence or int): Desired output size of the crop. If size is an ``int``
            instead of sequence like (h, w), a square crop of size (size, size) is made.
978
            If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
979
980
981
982
983
984
985
986
987
988
989
990
991
992

    Example:
         >>> transform = Compose([
         >>>    FiveCrop(size), # this is a list of PIL Images
         >>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
         >>> ])
         >>> #In your test loop you can do the following:
         >>> input, target = batch # input is a 5d tensor, target is 2d
         >>> bs, ncrops, c, h, w = input.size()
         >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
         >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
    """

    def __init__(self, size):
vfdev's avatar
vfdev committed
993
        super().__init__()
994
        _log_api_usage_once(self)
995
        self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
996

vfdev's avatar
vfdev committed
997
998
999
1000
1001
1002
1003
1004
    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be cropped.

        Returns:
            tuple of 5 images. Image can be PIL Image or Tensor
        """
1005
1006
        return F.five_crop(img, self.size)

Joao Gomes's avatar
Joao Gomes committed
1007
1008
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(size={self.size})"
1009

1010

vfdev's avatar
vfdev committed
1011
1012
1013
class TenCrop(torch.nn.Module):
    """Crop the given image into four corners and the central crop plus the flipped version of
    these (horizontal flipping is used by default).
1014
    If the image is torch Tensor, it is expected
vfdev's avatar
vfdev committed
1015
1016
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
1017
1018
1019
1020
1021
1022
1023
1024
1025

    .. Note::
         This transform returns a tuple of images and there may be a mismatch in the number of
         inputs and targets your Dataset returns. See below for an example of how to deal with
         this.

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
1026
            made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]).
1027
        vertical_flip (bool): Use vertical flipping instead of horizontal
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041

    Example:
         >>> transform = Compose([
         >>>    TenCrop(size), # this is a list of PIL Images
         >>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
         >>> ])
         >>> #In your test loop you can do the following:
         >>> input, target = batch # input is a 5d tensor, target is 2d
         >>> bs, ncrops, c, h, w = input.size()
         >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
         >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
    """

    def __init__(self, size, vertical_flip=False):
vfdev's avatar
vfdev committed
1042
        super().__init__()
1043
        _log_api_usage_once(self)
1044
        self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.")
1045
1046
        self.vertical_flip = vertical_flip

vfdev's avatar
vfdev committed
1047
1048
1049
1050
1051
1052
1053
1054
    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be cropped.

        Returns:
            tuple of 10 images. Image can be PIL Image or Tensor
        """
1055
1056
        return F.ten_crop(img, self.size, self.vertical_flip)

Joao Gomes's avatar
Joao Gomes committed
1057
1058
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(size={self.size}, vertical_flip={self.vertical_flip})"
1059

1060

1061
class LinearTransformation(torch.nn.Module):
ekka's avatar
ekka committed
1062
    """Transform a tensor image with a square transformation matrix and a mean_vector computed
1063
    offline.
1064
    This transform does not support PIL Image.
ekka's avatar
ekka committed
1065
1066
1067
    Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
    subtract mean_vector from it which is then followed by computing the dot
    product with the transformation matrix and then reshaping the tensor to its
1068
    original shape.
1069

1070
    Applications:
1071
        whitening transformation: Suppose X is a column vector zero-centered data.
1072
1073
1074
        Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
        perform SVD on this matrix and pass it as transformation_matrix.

1075
1076
    Args:
        transformation_matrix (Tensor): tensor [D x D], D = C x H x W
ekka's avatar
ekka committed
1077
        mean_vector (Tensor): tensor [D], D = C x H x W
1078
1079
    """

ekka's avatar
ekka committed
1080
    def __init__(self, transformation_matrix, mean_vector):
1081
        super().__init__()
1082
        _log_api_usage_once(self)
1083
        if transformation_matrix.size(0) != transformation_matrix.size(1):
1084
1085
            raise ValueError(
                "transformation_matrix should be square. Got "
1086
                f"{tuple(transformation_matrix.size())} rectangular matrix."
1087
            )
ekka's avatar
ekka committed
1088
1089

        if mean_vector.size(0) != transformation_matrix.size(0):
1090
            raise ValueError(
1091
1092
                f"mean_vector should have the same length {mean_vector.size(0)}"
                f" as any one of the dimensions of the transformation_matrix [{tuple(transformation_matrix.size())}]"
1093
            )
ekka's avatar
ekka committed
1094

1095
        if transformation_matrix.device != mean_vector.device:
1096
            raise ValueError(
1097
                f"Input tensors should be on the same device. Got {transformation_matrix.device} and {mean_vector.device}"
1098
            )
1099

1100
        self.transformation_matrix = transformation_matrix
ekka's avatar
ekka committed
1101
        self.mean_vector = mean_vector
1102

1103
    def forward(self, tensor: Tensor) -> Tensor:
1104
1105
        """
        Args:
vfdev's avatar
vfdev committed
1106
            tensor (Tensor): Tensor image to be whitened.
1107
1108
1109
1110

        Returns:
            Tensor: Transformed image.
        """
1111
1112
1113
        shape = tensor.shape
        n = shape[-3] * shape[-2] * shape[-1]
        if n != self.transformation_matrix.shape[0]:
1114
1115
            raise ValueError(
                "Input tensor and transformation matrix have incompatible shape."
1116
1117
                + f"[{shape[-3]} x {shape[-2]} x {shape[-1]}] != "
                + f"{self.transformation_matrix.shape[0]}"
1118
            )
1119
1120

        if tensor.device.type != self.mean_vector.device.type:
1121
1122
            raise ValueError(
                "Input tensor should be on the same device as transformation matrix and mean vector. "
1123
                f"Got {tensor.device} vs {self.mean_vector.device}"
1124
            )
1125
1126

        flat_tensor = tensor.view(-1, n) - self.mean_vector
1127
        transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
1128
        tensor = transformed_tensor.view(shape)
1129
1130
        return tensor

Joao Gomes's avatar
Joao Gomes committed
1131
1132
1133
1134
1135
1136
1137
    def __repr__(self) -> str:
        s = (
            f"{self.__class__.__name__}(transformation_matrix="
            f"{self.transformation_matrix.tolist()}"
            f", mean_vector={self.mean_vector.tolist()})"
        )
        return s
1138

1139

1140
class ColorJitter(torch.nn.Module):
1141
    """Randomly change the brightness, contrast, saturation and hue of an image.
1142
    If the image is torch Tensor, it is expected
1143
1144
    to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
    If img is PIL Image, mode "1", "I", "F" and modes with transparency (alpha channel) are not supported.
1145
1146

    Args:
yaox12's avatar
yaox12 committed
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
        brightness (float or tuple of float (min, max)): How much to jitter brightness.
            brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
            or the given [min, max]. Should be non negative numbers.
        contrast (float or tuple of float (min, max)): How much to jitter contrast.
            contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
            or the given [min, max]. Should be non negative numbers.
        saturation (float or tuple of float (min, max)): How much to jitter saturation.
            saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
            or the given [min, max]. Should be non negative numbers.
        hue (float or tuple of float (min, max)): How much to jitter hue.
            hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
            Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
1159
    """
1160

1161
    def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
1162
        super().__init__()
1163
        _log_api_usage_once(self)
1164
1165
1166
1167
        self.brightness = self._check_input(brightness, "brightness")
        self.contrast = self._check_input(contrast, "contrast")
        self.saturation = self._check_input(saturation, "saturation")
        self.hue = self._check_input(hue, "hue", center=0, bound=(-0.5, 0.5), clip_first_on_zero=False)
yaox12's avatar
yaox12 committed
1168

1169
    @torch.jit.unused
1170
    def _check_input(self, value, name, center=1, bound=(0, float("inf")), clip_first_on_zero=True):
yaox12's avatar
yaox12 committed
1171
1172
        if isinstance(value, numbers.Number):
            if value < 0:
1173
                raise ValueError(f"If {name} is a single number, it must be non negative.")
1174
            value = [center - float(value), center + float(value)]
yaox12's avatar
yaox12 committed
1175
            if clip_first_on_zero:
1176
                value[0] = max(value[0], 0.0)
yaox12's avatar
yaox12 committed
1177
1178
        elif isinstance(value, (tuple, list)) and len(value) == 2:
            if not bound[0] <= value[0] <= value[1] <= bound[1]:
1179
                raise ValueError(f"{name} values should be between {bound}")
yaox12's avatar
yaox12 committed
1180
        else:
1181
            raise TypeError(f"{name} should be a single number or a list/tuple with length 2.")
yaox12's avatar
yaox12 committed
1182
1183
1184
1185
1186
1187

        # if value is 0 or (1., 1.) for brightness/contrast/saturation
        # or (0., 0.) for hue, do nothing
        if value[0] == value[1] == center:
            value = None
        return value
1188
1189

    @staticmethod
1190
1191
1192
1193
1194
1195
    def get_params(
        brightness: Optional[List[float]],
        contrast: Optional[List[float]],
        saturation: Optional[List[float]],
        hue: Optional[List[float]],
    ) -> Tuple[Tensor, Optional[float], Optional[float], Optional[float], Optional[float]]:
1196
        """Get the parameters for the randomized transform to be applied on image.
1197

1198
1199
1200
1201
1202
1203
1204
1205
1206
        Args:
            brightness (tuple of float (min, max), optional): The range from which the brightness_factor is chosen
                uniformly. Pass None to turn off the transformation.
            contrast (tuple of float (min, max), optional): The range from which the contrast_factor is chosen
                uniformly. Pass None to turn off the transformation.
            saturation (tuple of float (min, max), optional): The range from which the saturation_factor is chosen
                uniformly. Pass None to turn off the transformation.
            hue (tuple of float (min, max), optional): The range from which the hue_factor is chosen uniformly.
                Pass None to turn off the transformation.
1207
1208

        Returns:
1209
1210
            tuple: The parameters used to apply the randomized transform
            along with their random order.
1211
        """
1212
        fn_idx = torch.randperm(4)
1213

1214
1215
1216
1217
        b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1]))
        c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1]))
        s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1]))
        h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1]))
1218

1219
        return fn_idx, b, c, s, h
1220

1221
    def forward(self, img):
1222
1223
        """
        Args:
1224
            img (PIL Image or Tensor): Input image.
1225
1226

        Returns:
1227
1228
            PIL Image or Tensor: Color jittered image.
        """
1229
1230
1231
        fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = self.get_params(
            self.brightness, self.contrast, self.saturation, self.hue
        )
1232

1233
        for fn_id in fn_idx:
1234
            if fn_id == 0 and brightness_factor is not None:
1235
                img = F.adjust_brightness(img, brightness_factor)
1236
            elif fn_id == 1 and contrast_factor is not None:
1237
                img = F.adjust_contrast(img, contrast_factor)
1238
            elif fn_id == 2 and saturation_factor is not None:
1239
                img = F.adjust_saturation(img, saturation_factor)
1240
            elif fn_id == 3 and hue_factor is not None:
1241
1242
1243
                img = F.adjust_hue(img, hue_factor)

        return img
1244

Joao Gomes's avatar
Joao Gomes committed
1245
1246
1247
1248
1249
1250
1251
1252
1253
    def __repr__(self) -> str:
        s = (
            f"{self.__class__.__name__}("
            f"brightness={self.brightness}"
            f", contrast={self.contrast}"
            f", saturation={self.saturation}"
            f", hue={self.hue})"
        )
        return s
1254

1255

1256
class RandomRotation(torch.nn.Module):
1257
    """Rotate the image by angle.
1258
    If the image is torch Tensor, it is expected
1259
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1260
1261

    Args:
1262
        degrees (sequence or number): Range of degrees to select from.
1263
1264
            If degrees is a number instead of sequence like (min, max), the range of degrees
            will be (-degrees, +degrees).
1265
1266
1267
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1268
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
1269
1270
1271
1272
        expand (bool, optional): Optional expansion flag.
            If true, expands the output to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
1273
        center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.
1274
            Default is the center of the image.
1275
1276
        fill (sequence or number): Pixel fill value for the area outside the rotated
            image. Default is ``0``. If given a number, the value is used for all bands respectively.
1277
1278
1279
1280
        resample (int, optional):
            .. warning::
                This parameter was deprecated in ``0.12`` and will be removed in ``0.14``. Please use ``interpolation``
                instead.
1281
1282
1283

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

1284
1285
    """

1286
    def __init__(
1287
        self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None
1288
    ):
1289
        super().__init__()
1290
        _log_api_usage_once(self)
1291
1292
        if resample is not None:
            warnings.warn(
1293
1294
                "The parameter 'resample' is deprecated since 0.12 and will be removed 0.14. "
                "Please use 'interpolation' instead."
1295
1296
1297
1298
1299
1300
            )
            interpolation = _interpolation_modes_from_int(resample)

        # Backward compatibility with integer value
        if isinstance(interpolation, int):
            warnings.warn(
1301
1302
                "Argument interpolation should be of type InterpolationMode instead of int. "
                "Please, use InterpolationMode enum."
1303
1304
1305
            )
            interpolation = _interpolation_modes_from_int(interpolation)

1306
        self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2,))
1307
1308

        if center is not None:
1309
            _check_sequence_input(center, "center", req_sizes=(2,))
1310
1311

        self.center = center
1312

1313
        self.resample = self.interpolation = interpolation
1314
        self.expand = expand
1315
1316
1317
1318
1319
1320

        if fill is None:
            fill = 0
        elif not isinstance(fill, (Sequence, numbers.Number)):
            raise TypeError("Fill should be either a sequence or a number.")

1321
        self.fill = fill
1322
1323

    @staticmethod
1324
    def get_params(degrees: List[float]) -> float:
1325
1326
1327
        """Get parameters for ``rotate`` for a random rotation.

        Returns:
1328
            float: angle parameter to be passed to ``rotate`` for random rotation.
1329
        """
1330
        angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
1331
1332
        return angle

1333
    def forward(self, img):
1334
        """
1335
        Args:
1336
            img (PIL Image or Tensor): Image to be rotated.
1337
1338

        Returns:
1339
            PIL Image or Tensor: Rotated image.
1340
        """
1341
        fill = self.fill
1342
        channels, _, _ = F.get_dimensions(img)
1343
1344
        if isinstance(img, Tensor):
            if isinstance(fill, (int, float)):
1345
                fill = [float(fill)] * channels
1346
1347
            else:
                fill = [float(f) for f in fill]
1348
        angle = self.get_params(self.degrees)
1349
1350

        return F.rotate(img, angle, self.resample, self.expand, self.center, fill)
1351

Joao Gomes's avatar
Joao Gomes committed
1352
    def __repr__(self) -> str:
1353
        interpolate_str = self.interpolation.value
1354
1355
1356
        format_string = self.__class__.__name__ + f"(degrees={self.degrees}"
        format_string += f", interpolation={interpolate_str}"
        format_string += f", expand={self.expand}"
1357
        if self.center is not None:
1358
            format_string += f", center={self.center}"
1359
        if self.fill is not None:
1360
            format_string += f", fill={self.fill}"
1361
        format_string += ")"
1362
        return format_string
1363

1364

1365
1366
class RandomAffine(torch.nn.Module):
    """Random affine transformation of the image keeping center invariant.
1367
    If the image is torch Tensor, it is expected
1368
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions.
1369
1370

    Args:
1371
        degrees (sequence or number): Range of degrees to select from.
1372
            If degrees is a number instead of sequence like (min, max), the range of degrees
1373
            will be (-degrees, +degrees). Set to 0 to deactivate rotations.
1374
1375
1376
1377
1378
1379
        translate (tuple, optional): tuple of maximum absolute fraction for horizontal
            and vertical translations. For example translate=(a, b), then horizontal shift
            is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
            randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
        scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
            randomly sampled from the range a <= scale <= b. Will keep original scale by default.
1380
        shear (sequence or number, optional): Range of degrees to select from.
ptrblck's avatar
ptrblck committed
1381
            If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)
1382
1383
            will be applied. Else if shear is a sequence of 2 values a shear parallel to the x axis in the
            range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values,
ptrblck's avatar
ptrblck committed
1384
            a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
1385
            Will not apply shear by default.
1386
1387
1388
        interpolation (InterpolationMode): Desired interpolation enum defined by
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
1389
            For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable.
1390
1391
        fill (sequence or number): Pixel fill value for the area outside the transformed
            image. Default is ``0``. If given a number, the value is used for all bands respectively.
1392
1393
1394
1395
1396
1397
1398
        fillcolor (sequence or number, optional):
            .. warning::
                This parameter was deprecated in ``0.12`` and will be removed in ``0.14``. Please use ``fill`` instead.
        resample (int, optional):
            .. warning::
                This parameter was deprecated in ``0.12`` and will be removed in ``0.14``. Please use ``interpolation``
                instead.
1399
1400
        center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner.
            Default is the center of the image.
1401
1402
1403

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

1404
1405
    """

1406
    def __init__(
1407
1408
1409
1410
1411
1412
1413
1414
1415
        self,
        degrees,
        translate=None,
        scale=None,
        shear=None,
        interpolation=InterpolationMode.NEAREST,
        fill=0,
        fillcolor=None,
        resample=None,
1416
        center=None,
1417
    ):
1418
        super().__init__()
1419
        _log_api_usage_once(self)
1420
1421
        if resample is not None:
            warnings.warn(
1422
1423
                "The parameter 'resample' is deprecated since 0.12 and will be removed in 0.14. "
                "Please use 'interpolation' instead."
1424
1425
1426
1427
1428
1429
            )
            interpolation = _interpolation_modes_from_int(resample)

        # Backward compatibility with integer value
        if isinstance(interpolation, int):
            warnings.warn(
1430
1431
                "Argument interpolation should be of type InterpolationMode instead of int. "
                "Please, use InterpolationMode enum."
1432
1433
1434
1435
1436
            )
            interpolation = _interpolation_modes_from_int(interpolation)

        if fillcolor is not None:
            warnings.warn(
1437
1438
                "The parameter 'fillcolor' is deprecated since 0.12 and will be removed in 0.14. "
                "Please use 'fill' instead."
1439
1440
1441
            )
            fill = fillcolor

1442
        self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2,))
1443
1444

        if translate is not None:
1445
            _check_sequence_input(translate, "translate", req_sizes=(2,))
1446
1447
1448
1449
1450
1451
            for t in translate:
                if not (0.0 <= t <= 1.0):
                    raise ValueError("translation values should be between 0 and 1")
        self.translate = translate

        if scale is not None:
1452
            _check_sequence_input(scale, "scale", req_sizes=(2,))
1453
1454
1455
1456
1457
1458
            for s in scale:
                if s <= 0:
                    raise ValueError("scale values should be positive")
        self.scale = scale

        if shear is not None:
1459
            self.shear = _setup_angle(shear, name="shear", req_sizes=(2, 4))
1460
1461
1462
        else:
            self.shear = shear

1463
        self.resample = self.interpolation = interpolation
1464
1465
1466
1467
1468
1469

        if fill is None:
            fill = 0
        elif not isinstance(fill, (Sequence, numbers.Number)):
            raise TypeError("Fill should be either a sequence or a number.")

1470
        self.fillcolor = self.fill = fill
1471

1472
1473
1474
1475
1476
        if center is not None:
            _check_sequence_input(center, "center", req_sizes=(2,))

        self.center = center

1477
    @staticmethod
1478
    def get_params(
1479
1480
1481
1482
1483
        degrees: List[float],
        translate: Optional[List[float]],
        scale_ranges: Optional[List[float]],
        shears: Optional[List[float]],
        img_size: List[int],
1484
    ) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]:
1485
1486
1487
        """Get parameters for affine transformation

        Returns:
1488
            params to be passed to the affine transformation
1489
        """
1490
        angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
1491
        if translate is not None:
1492
1493
1494
1495
1496
            max_dx = float(translate[0] * img_size[0])
            max_dy = float(translate[1] * img_size[1])
            tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item()))
            ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item()))
            translations = (tx, ty)
1497
1498
1499
1500
        else:
            translations = (0, 0)

        if scale_ranges is not None:
1501
            scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item())
1502
1503
1504
        else:
            scale = 1.0

1505
        shear_x = shear_y = 0.0
1506
        if shears is not None:
1507
1508
1509
1510
1511
            shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item())
            if len(shears) == 4:
                shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item())

        shear = (shear_x, shear_y)
1512
1513
1514

        return angle, translations, scale, shear

1515
    def forward(self, img):
1516
        """
1517
            img (PIL Image or Tensor): Image to be transformed.
1518
1519

        Returns:
1520
            PIL Image or Tensor: Affine transformed image.
1521
        """
1522
        fill = self.fill
1523
        channels, height, width = F.get_dimensions(img)
1524
1525
        if isinstance(img, Tensor):
            if isinstance(fill, (int, float)):
1526
                fill = [float(fill)] * channels
1527
1528
            else:
                fill = [float(f) for f in fill]
1529

1530
        img_size = [width, height]  # flip for keeping BC on get_params call
1531
1532

        ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size)
1533

1534
        return F.affine(img, *ret, interpolation=self.interpolation, fill=fill, center=self.center)
1535

Joao Gomes's avatar
Joao Gomes committed
1536
1537
1538
1539
1540
1541
1542
1543
    def __repr__(self) -> str:
        s = f"{self.__class__.__name__}(degrees={self.degrees}"
        s += f", translate={self.translate}" if self.translate is not None else ""
        s += f", scale={self.scale}" if self.scale is not None else ""
        s += f", shear={self.shear}" if self.shear is not None else ""
        s += f", interpolation={self.interpolation.value}" if self.interpolation != InterpolationMode.NEAREST else ""
        s += f", fill={self.fill}" if self.fill != 0 else ""
        s += f", center={self.center}" if self.center is not None else ""
1544
        s += ")"
Joao Gomes's avatar
Joao Gomes committed
1545
1546

        return s
1547
1548


1549
class Grayscale(torch.nn.Module):
1550
    """Convert image to grayscale.
1551
1552
    If the image is torch Tensor, it is expected
    to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1553

1554
1555
1556
1557
    Args:
        num_output_channels (int): (1 or 3) number of channels desired for output image

    Returns:
1558
        PIL Image: Grayscale version of the input.
1559
1560
1561

        - If ``num_output_channels == 1`` : returned image is single channel
        - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b
1562
1563
1564
1565

    """

    def __init__(self, num_output_channels=1):
1566
        super().__init__()
1567
        _log_api_usage_once(self)
1568
1569
        self.num_output_channels = num_output_channels

vfdev's avatar
vfdev committed
1570
    def forward(self, img):
1571
1572
        """
        Args:
1573
            img (PIL Image or Tensor): Image to be converted to grayscale.
1574
1575

        Returns:
1576
            PIL Image or Tensor: Grayscaled image.
1577
        """
1578
        return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels)
1579

Joao Gomes's avatar
Joao Gomes committed
1580
1581
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(num_output_channels={self.num_output_channels})"
1582

1583

1584
class RandomGrayscale(torch.nn.Module):
1585
    """Randomly convert image to grayscale with a probability of p (default 0.1).
1586
1587
    If the image is torch Tensor, it is expected
    to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions
1588

1589
1590
1591
1592
    Args:
        p (float): probability that image should be converted to grayscale.

    Returns:
1593
        PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged
1594
1595
1596
        with probability (1-p).
        - If input image is 1 channel: grayscale version is 1 channel
        - If input image is 3 channel: grayscale version is 3 channel with r == g == b
1597
1598
1599
1600

    """

    def __init__(self, p=0.1):
1601
        super().__init__()
1602
        _log_api_usage_once(self)
1603
1604
        self.p = p

vfdev's avatar
vfdev committed
1605
    def forward(self, img):
1606
1607
        """
        Args:
1608
            img (PIL Image or Tensor): Image to be converted to grayscale.
1609
1610

        Returns:
1611
            PIL Image or Tensor: Randomly grayscaled image.
1612
        """
1613
        num_output_channels, _, _ = F.get_dimensions(img)
1614
1615
        if torch.rand(1) < self.p:
            return F.rgb_to_grayscale(img, num_output_channels=num_output_channels)
1616
        return img
1617

Joao Gomes's avatar
Joao Gomes committed
1618
1619
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(p={self.p})"
1620
1621


1622
class RandomErasing(torch.nn.Module):
1623
    """Randomly selects a rectangle region in an torch Tensor image and erases its pixels.
1624
    This transform does not support PIL Image.
vfdev's avatar
vfdev committed
1625
    'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896
1626

1627
1628
1629
1630
1631
1632
1633
1634
    Args:
         p: probability that the random erasing operation will be performed.
         scale: range of proportion of erased area against input image.
         ratio: range of aspect ratio of erased area.
         value: erasing value. Default is 0. If a single int, it is used to
            erase all pixels. If a tuple of length 3, it is used to erase
            R, G, B channels respectively.
            If a str of 'random', erasing each pixel with random values.
Zhun Zhong's avatar
Zhun Zhong committed
1635
         inplace: boolean to make this transform inplace. Default set to False.
1636

1637
1638
    Returns:
        Erased Image.
1639

vfdev's avatar
vfdev committed
1640
    Example:
1641
        >>> transform = transforms.Compose([
1642
        >>>   transforms.RandomHorizontalFlip(),
1643
1644
        >>>   transforms.PILToTensor(),
        >>>   transforms.ConvertImageDtype(torch.float),
1645
1646
        >>>   transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        >>>   transforms.RandomErasing(),
1647
1648
1649
        >>> ])
    """

Zhun Zhong's avatar
Zhun Zhong committed
1650
    def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):
1651
        super().__init__()
1652
        _log_api_usage_once(self)
1653
1654
1655
1656
1657
1658
1659
1660
        if not isinstance(value, (numbers.Number, str, tuple, list)):
            raise TypeError("Argument value should be either a number or str or a sequence")
        if isinstance(value, str) and value != "random":
            raise ValueError("If value is str, it should be 'random'")
        if not isinstance(scale, (tuple, list)):
            raise TypeError("Scale should be a sequence")
        if not isinstance(ratio, (tuple, list)):
            raise TypeError("Ratio should be a sequence")
1661
        if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
1662
            warnings.warn("Scale and ratio should be of kind (min, max)")
1663
        if scale[0] < 0 or scale[1] > 1:
1664
            raise ValueError("Scale should be between 0 and 1")
1665
        if p < 0 or p > 1:
1666
            raise ValueError("Random erasing probability should be between 0 and 1")
1667
1668
1669
1670
1671

        self.p = p
        self.scale = scale
        self.ratio = ratio
        self.value = value
1672
        self.inplace = inplace
1673
1674

    @staticmethod
1675
    def get_params(
1676
        img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None
1677
    ) -> Tuple[int, int, int, int, Tensor]:
1678
1679
1680
        """Get parameters for ``erase`` for a random erasing.

        Args:
vfdev's avatar
vfdev committed
1681
            img (Tensor): Tensor image to be erased.
1682
1683
            scale (sequence): range of proportion of erased area against input image.
            ratio (sequence): range of aspect ratio of erased area.
1684
1685
1686
            value (list, optional): erasing value. If None, it is interpreted as "random"
                (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,
                i.e. ``value[0]``.
1687
1688
1689
1690

        Returns:
            tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.
        """
vfdev's avatar
vfdev committed
1691
        img_c, img_h, img_w = img.shape[-3], img.shape[-2], img.shape[-1]
1692
        area = img_h * img_w
1693

1694
        log_ratio = torch.log(torch.tensor(ratio))
1695
        for _ in range(10):
1696
            erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
1697
            aspect_ratio = torch.exp(torch.empty(1).uniform_(log_ratio[0], log_ratio[1])).item()
1698
1699
1700

            h = int(round(math.sqrt(erase_area * aspect_ratio)))
            w = int(round(math.sqrt(erase_area / aspect_ratio)))
1701
1702
1703
1704
1705
1706
1707
            if not (h < img_h and w < img_w):
                continue

            if value is None:
                v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()
            else:
                v = torch.tensor(value)[:, None, None]
1708

1709
1710
            i = torch.randint(0, img_h - h + 1, size=(1,)).item()
            j = torch.randint(0, img_w - w + 1, size=(1,)).item()
1711
            return i, j, h, w, v
1712

Zhun Zhong's avatar
Zhun Zhong committed
1713
1714
1715
        # Return original image
        return 0, 0, img_h, img_w, img

1716
    def forward(self, img):
1717
1718
        """
        Args:
vfdev's avatar
vfdev committed
1719
            img (Tensor): Tensor image to be erased.
1720
1721
1722
1723

        Returns:
            img (Tensor): Erased Tensor image.
        """
1724
1725
1726
1727
        if torch.rand(1) < self.p:

            # cast self.value to script acceptable type
            if isinstance(self.value, (int, float)):
1728
1729
1730
                value = [
                    self.value,
                ]
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
            elif isinstance(self.value, str):
                value = None
            elif isinstance(self.value, tuple):
                value = list(self.value)
            else:
                value = self.value

            if value is not None and not (len(value) in (1, img.shape[-3])):
                raise ValueError(
                    "If value is a sequence, it should have either a single value or "
1741
                    f"{img.shape[-3]} (number of input channels)"
1742
1743
1744
                )

            x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)
1745
            return F.erase(img, x, y, h, w, v, self.inplace)
1746
        return img
1747

Joao Gomes's avatar
Joao Gomes committed
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
    def __repr__(self) -> str:
        s = (
            f"{self.__class__.__name__}"
            f"(p={self.p}, "
            f"scale={self.scale}, "
            f"ratio={self.ratio}, "
            f"value={self.value}, "
            f"inplace={self.inplace})"
        )
        return s
1758

1759

1760
1761
class GaussianBlur(torch.nn.Module):
    """Blurs image with randomly chosen Gaussian blur.
1762
1763
    If the image is torch Tensor, it is expected
    to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778

    Args:
        kernel_size (int or sequence): Size of the Gaussian kernel.
        sigma (float or tuple of float (min, max)): Standard deviation to be used for
            creating kernel to perform blurring. If float, sigma is fixed. If it is tuple
            of float (min, max), sigma is chosen uniformly at random to lie in the
            given range.

    Returns:
        PIL Image or Tensor: Gaussian blurred version of the input image.

    """

    def __init__(self, kernel_size, sigma=(0.1, 2.0)):
        super().__init__()
1779
        _log_api_usage_once(self)
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
        self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers")
        for ks in self.kernel_size:
            if ks <= 0 or ks % 2 == 0:
                raise ValueError("Kernel size value should be an odd and positive number.")

        if isinstance(sigma, numbers.Number):
            if sigma <= 0:
                raise ValueError("If sigma is a single number, it must be positive.")
            sigma = (sigma, sigma)
        elif isinstance(sigma, Sequence) and len(sigma) == 2:
1790
            if not 0.0 < sigma[0] <= sigma[1]:
1791
1792
1793
1794
1795
1796
1797
1798
                raise ValueError("sigma values should be positive and of the form (min, max).")
        else:
            raise ValueError("sigma should be a single number or a list/tuple with length 2.")

        self.sigma = sigma

    @staticmethod
    def get_params(sigma_min: float, sigma_max: float) -> float:
vfdev's avatar
vfdev committed
1799
        """Choose sigma for random gaussian blurring.
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812

        Args:
            sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel.
            sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel.

        Returns:
            float: Standard deviation to be passed to calculate kernel for gaussian blurring.
        """
        return torch.empty(1).uniform_(sigma_min, sigma_max).item()

    def forward(self, img: Tensor) -> Tensor:
        """
        Args:
vfdev's avatar
vfdev committed
1813
            img (PIL Image or Tensor): image to be blurred.
1814
1815
1816
1817
1818
1819
1820

        Returns:
            PIL Image or Tensor: Gaussian blurred image
        """
        sigma = self.get_params(self.sigma[0], self.sigma[1])
        return F.gaussian_blur(img, self.kernel_size, [sigma, sigma])

Joao Gomes's avatar
Joao Gomes committed
1821
1822
1823
    def __repr__(self) -> str:
        s = f"{self.__class__.__name__}(kernel_size={self.kernel_size}, sigma={self.sigma})"
        return s
1824
1825


1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
def _setup_size(size, error_msg):
    if isinstance(size, numbers.Number):
        return int(size), int(size)

    if isinstance(size, Sequence) and len(size) == 1:
        return size[0], size[0]

    if len(size) != 2:
        raise ValueError(error_msg)

    return size


def _check_sequence_input(x, name, req_sizes):
    msg = req_sizes[0] if len(req_sizes) < 2 else " or ".join([str(s) for s in req_sizes])
    if not isinstance(x, Sequence):
1842
        raise TypeError(f"{name} should be a sequence of length {msg}.")
1843
    if len(x) not in req_sizes:
1844
        raise ValueError(f"{name} should be sequence of length {msg}.")
1845
1846


1847
def _setup_angle(x, name, req_sizes=(2,)):
1848
1849
    if isinstance(x, numbers.Number):
        if x < 0:
1850
            raise ValueError(f"If {name} is a single number, it must be positive.")
1851
1852
1853
1854
1855
        x = [-x, x]
    else:
        _check_sequence_input(x, name, req_sizes)

    return [float(d) for d in x]
1856
1857
1858
1859


class RandomInvert(torch.nn.Module):
    """Inverts the colors of the given image randomly with a given probability.
1860
1861
1862
    If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
    where ... means it can have an arbitrary number of leading dimensions.
    If img is PIL Image, it is expected to be in mode "L" or "RGB".
1863
1864
1865
1866
1867
1868
1869

    Args:
        p (float): probability of the image being color inverted. Default value is 0.5
    """

    def __init__(self, p=0.5):
        super().__init__()
1870
        _log_api_usage_once(self)
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
        self.p = p

    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be inverted.

        Returns:
            PIL Image or Tensor: Randomly color inverted image.
        """
        if torch.rand(1).item() < self.p:
            return F.invert(img)
        return img

Joao Gomes's avatar
Joao Gomes committed
1885
1886
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(p={self.p})"
1887
1888
1889
1890


class RandomPosterize(torch.nn.Module):
    """Posterize the image randomly with a given probability by reducing the
1891
1892
1893
    number of bits for each color channel. If the image is torch Tensor, it should be of type torch.uint8,
    and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
    If img is PIL Image, it is expected to be in mode "L" or "RGB".
1894
1895
1896
1897
1898
1899
1900
1901

    Args:
        bits (int): number of bits to keep for each channel (0-8)
        p (float): probability of the image being color inverted. Default value is 0.5
    """

    def __init__(self, bits, p=0.5):
        super().__init__()
1902
        _log_api_usage_once(self)
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
        self.bits = bits
        self.p = p

    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be posterized.

        Returns:
            PIL Image or Tensor: Randomly posterized image.
        """
        if torch.rand(1).item() < self.p:
            return F.posterize(img, self.bits)
        return img

Joao Gomes's avatar
Joao Gomes committed
1918
1919
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(bits={self.bits},p={self.p})"
1920
1921
1922
1923


class RandomSolarize(torch.nn.Module):
    """Solarize the image randomly with a given probability by inverting all pixel
1924
1925
1926
    values above a threshold. If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format,
    where ... means it can have an arbitrary number of leading dimensions.
    If img is PIL Image, it is expected to be in mode "L" or "RGB".
1927
1928
1929
1930
1931
1932
1933
1934

    Args:
        threshold (float): all pixels equal or above this value are inverted.
        p (float): probability of the image being color inverted. Default value is 0.5
    """

    def __init__(self, threshold, p=0.5):
        super().__init__()
1935
        _log_api_usage_once(self)
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
        self.threshold = threshold
        self.p = p

    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be solarized.

        Returns:
            PIL Image or Tensor: Randomly solarized image.
        """
        if torch.rand(1).item() < self.p:
            return F.solarize(img, self.threshold)
        return img

Joao Gomes's avatar
Joao Gomes committed
1951
1952
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(threshold={self.threshold},p={self.p})"
1953
1954
1955


class RandomAdjustSharpness(torch.nn.Module):
1956
1957
    """Adjust the sharpness of the image randomly with a given probability. If the image is torch Tensor,
    it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967

    Args:
        sharpness_factor (float):  How much to adjust the sharpness. Can be
            any non negative number. 0 gives a blurred image, 1 gives the
            original image while 2 increases the sharpness by a factor of 2.
        p (float): probability of the image being color inverted. Default value is 0.5
    """

    def __init__(self, sharpness_factor, p=0.5):
        super().__init__()
1968
        _log_api_usage_once(self)
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
        self.sharpness_factor = sharpness_factor
        self.p = p

    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be sharpened.

        Returns:
            PIL Image or Tensor: Randomly sharpened image.
        """
        if torch.rand(1).item() < self.p:
            return F.adjust_sharpness(img, self.sharpness_factor)
        return img

Joao Gomes's avatar
Joao Gomes committed
1984
1985
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(sharpness_factor={self.sharpness_factor},p={self.p})"
1986
1987
1988
1989


class RandomAutocontrast(torch.nn.Module):
    """Autocontrast the pixels of the given image randomly with a given probability.
1990
1991
1992
    If the image is torch Tensor, it is expected
    to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
    If img is PIL Image, it is expected to be in mode "L" or "RGB".
1993
1994
1995
1996
1997
1998
1999

    Args:
        p (float): probability of the image being autocontrasted. Default value is 0.5
    """

    def __init__(self, p=0.5):
        super().__init__()
2000
        _log_api_usage_once(self)
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
        self.p = p

    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be autocontrasted.

        Returns:
            PIL Image or Tensor: Randomly autocontrasted image.
        """
        if torch.rand(1).item() < self.p:
            return F.autocontrast(img)
        return img

Joao Gomes's avatar
Joao Gomes committed
2015
2016
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(p={self.p})"
2017
2018
2019
2020


class RandomEqualize(torch.nn.Module):
    """Equalize the histogram of the given image randomly with a given probability.
2021
2022
2023
    If the image is torch Tensor, it is expected
    to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
    If img is PIL Image, it is expected to be in mode "P", "L" or "RGB".
2024
2025
2026
2027
2028
2029
2030

    Args:
        p (float): probability of the image being equalized. Default value is 0.5
    """

    def __init__(self, p=0.5):
        super().__init__()
2031
        _log_api_usage_once(self)
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
        self.p = p

    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be equalized.

        Returns:
            PIL Image or Tensor: Randomly equalized image.
        """
        if torch.rand(1).item() < self.p:
            return F.equalize(img)
        return img

Joao Gomes's avatar
Joao Gomes committed
2046
2047
    def __repr__(self) -> str:
        return f"{self.__class__.__name__}(p={self.p})"