_auto_augment.py 31.2 KB
Newer Older
1
import math
2
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
3
4
5

import PIL.Image
import torch
6

7
from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec
8
from torchvision import transforms as _transforms, tv_tensors
9
from torchvision.transforms import _functional_tensor as _FT
10
11
from torchvision.transforms.v2 import AutoAugmentPolicy, functional as F, InterpolationMode, Transform
from torchvision.transforms.v2.functional._geometry import _check_interpolation
Philip Meier's avatar
Philip Meier committed
12
from torchvision.transforms.v2.functional._meta import get_size
13
from torchvision.transforms.v2.functional._utils import _FillType, _FillTypeJIT
14

Nicolas Hug's avatar
Nicolas Hug committed
15
from ._utils import _get_fill, _setup_fill_arg, check_type, is_pure_tensor
16
17


18
ImageOrVideo = Union[torch.Tensor, PIL.Image.Image, tv_tensors.Image, tv_tensors.Video]
19
20


21
22
class _AutoAugmentBase(Transform):
    def __init__(
23
24
        self,
        *,
25
        interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
26
        fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
27
28
    ) -> None:
        super().__init__()
29
        self.interpolation = _check_interpolation(interpolation)
30
31
32
33
34
35
        self.fill = fill
        self._fill = _setup_fill_arg(fill)

    def _extract_params_for_v1_transform(self) -> Dict[str, Any]:
        params = super()._extract_params_for_v1_transform()

36
37
        if isinstance(params["fill"], dict):
            raise ValueError(f"{type(self).__name__}() can not be scripted for when `fill` is a dictionary.")
38
39

        return params
40

41
    def _get_random_item(self, dct: Dict[str, Tuple[Callable, bool]]) -> Tuple[str, Tuple[Callable, bool]]:
42
43
44
45
        keys = tuple(dct.keys())
        key = keys[int(torch.randint(len(keys), ()))]
        return key, dct[key]

46
    def _flatten_and_extract_image_or_video(
47
        self,
48
        inputs: Any,
49
        unsupported_types: Tuple[Type, ...] = (tv_tensors.BoundingBoxes, tv_tensors.Mask),
50
    ) -> Tuple[Tuple[List[Any], TreeSpec, int], ImageOrVideo]:
51
        flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
52
        needs_transform_list = self._needs_transform_list(flat_inputs)
53

54
        image_or_videos = []
55
56
        for idx, (inpt, needs_transform) in enumerate(zip(flat_inputs, needs_transform_list)):
            if needs_transform and check_type(
57
58
                inpt,
                (
59
                    tv_tensors.Image,
60
                    PIL.Image.Image,
61
                    is_pure_tensor,
62
                    tv_tensors.Video,
63
64
                ),
            ):
65
                image_or_videos.append((idx, inpt))
66
67
68
            elif isinstance(inpt, unsupported_types):
                raise TypeError(f"Inputs of type {type(inpt).__name__} are not supported by {type(self).__name__}()")

69
        if not image_or_videos:
70
            raise TypeError("Found no image in the sample.")
71
        if len(image_or_videos) > 1:
72
            raise TypeError(
73
74
                f"Auto augment transformations are only properly defined for a single image or video, "
                f"but found {len(image_or_videos)}."
75
76
            )

77
78
79
80
81
82
        idx, image_or_video = image_or_videos[0]
        return (flat_inputs, spec, idx), image_or_video

    def _unflatten_and_insert_image_or_video(
        self,
        flat_inputs_with_spec: Tuple[List[Any], TreeSpec, int],
83
        image_or_video: ImageOrVideo,
84
85
86
87
    ) -> Any:
        flat_inputs, spec, idx = flat_inputs_with_spec
        flat_inputs[idx] = image_or_video
        return tree_unflatten(flat_inputs, spec)
88

89
    def _apply_image_or_video_transform(
90
        self,
91
        image: ImageOrVideo,
92
93
        transform_id: str,
        magnitude: float,
94
        interpolation: Union[InterpolationMode, int],
95
96
        fill: Dict[Union[Type, str], _FillTypeJIT],
    ) -> ImageOrVideo:
97
        fill_ = _get_fill(fill, type(image))
98

99
100
101
        if transform_id == "Identity":
            return image
        elif transform_id == "ShearX":
102
103
104
105
106
107
            # magnitude should be arctan(magnitude)
            # official autoaug: (1, level, 0, 0, 1, 0)
            # https://github.com/tensorflow/models/blob/dd02069717128186b88afa8d857ce57d17957f03/research/autoaugment/augmentation_transforms.py#L290
            # compared to
            # torchvision:      (1, tan(level), 0, 0, 1, 0)
            # https://github.com/pytorch/vision/blob/0c2373d0bba3499e95776e7936e207d8a1676e65/torchvision/transforms/functional.py#L976
108
            return F.affine(
109
110
111
112
                image,
                angle=0.0,
                translate=[0, 0],
                scale=1.0,
113
                shear=[math.degrees(math.atan(magnitude)), 0.0],
114
                interpolation=interpolation,
115
116
                fill=fill_,
                center=[0, 0],
117
118
            )
        elif transform_id == "ShearY":
119
120
            # magnitude should be arctan(magnitude)
            # See above
121
            return F.affine(
122
123
124
125
                image,
                angle=0.0,
                translate=[0, 0],
                scale=1.0,
126
                shear=[0.0, math.degrees(math.atan(magnitude))],
127
                interpolation=interpolation,
128
129
                fill=fill_,
                center=[0, 0],
130
131
            )
        elif transform_id == "TranslateX":
132
            return F.affine(
133
134
135
136
137
                image,
                angle=0.0,
                translate=[int(magnitude), 0],
                scale=1.0,
                interpolation=interpolation,
138
                shear=[0.0, 0.0],
139
                fill=fill_,
140
141
            )
        elif transform_id == "TranslateY":
142
            return F.affine(
143
144
145
146
147
                image,
                angle=0.0,
                translate=[0, int(magnitude)],
                scale=1.0,
                interpolation=interpolation,
148
                shear=[0.0, 0.0],
149
                fill=fill_,
150
151
            )
        elif transform_id == "Rotate":
152
            return F.rotate(image, angle=magnitude, interpolation=interpolation, fill=fill_)
153
        elif transform_id == "Brightness":
154
            return F.adjust_brightness(image, brightness_factor=1.0 + magnitude)
155
        elif transform_id == "Color":
156
            return F.adjust_saturation(image, saturation_factor=1.0 + magnitude)
157
        elif transform_id == "Contrast":
158
            return F.adjust_contrast(image, contrast_factor=1.0 + magnitude)
159
        elif transform_id == "Sharpness":
160
            return F.adjust_sharpness(image, sharpness_factor=1.0 + magnitude)
161
        elif transform_id == "Posterize":
162
            return F.posterize(image, bits=int(magnitude))
163
        elif transform_id == "Solarize":
Philip Meier's avatar
Philip Meier committed
164
            bound = _FT._max_value(image.dtype) if isinstance(image, torch.Tensor) else 255.0
165
            return F.solarize(image, threshold=bound * magnitude)
166
        elif transform_id == "AutoContrast":
167
            return F.autocontrast(image)
168
        elif transform_id == "Equalize":
169
            return F.equalize(image)
170
        elif transform_id == "Invert":
171
            return F.invert(image)
172
173
        else:
            raise ValueError(f"No transform available for {transform_id}")
174
175
176


class AutoAugment(_AutoAugmentBase):
177
178
179
    r"""[BETA] AutoAugment data augmentation method based on
    `"AutoAugment: Learning Augmentation Strategies from Data" <https://arxiv.org/pdf/1805.09501.pdf>`_.

180
    .. v2betastatus:: AutoAugment transform
181

182
183
184
    This transformation works on images and videos only.

    If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
185
186
187
188
    to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
    If img is PIL Image, it is expected to be in mode "L" or "RGB".

    Args:
189
        policy (AutoAugmentPolicy, optional): Desired policy enum defined by
190
            :class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``.
191
        interpolation (InterpolationMode, optional): Desired interpolation enum defined by
192
193
194
195
196
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
    """
197
198
    _v1_transform_cls = _transforms.AutoAugment

199
    _AUGMENTATION_SPACE = {
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
        "ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
        "ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
        "TranslateX": (
            lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * width, num_bins),
            True,
        ),
        "TranslateY": (
            lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * height, num_bins),
            True,
        ),
        "Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),
        "Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
        "Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
        "Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
        "Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
215
        "Posterize": (
216
            lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),
217
218
            False,
        ),
219
        "Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
220
221
222
        "AutoContrast": (lambda num_bins, height, width: None, False),
        "Equalize": (lambda num_bins, height, width: None, False),
        "Invert": (lambda num_bins, height, width: None, False),
223
224
    }

225
226
227
    def __init__(
        self,
        policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET,
228
        interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
229
        fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
230
231
    ) -> None:
        super().__init__(interpolation=interpolation, fill=fill)
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
        self.policy = policy
        self._policies = self._get_policies(policy)

    def _get_policies(
        self, policy: AutoAugmentPolicy
    ) -> List[Tuple[Tuple[str, float, Optional[int]], Tuple[str, float, Optional[int]]]]:
        if policy == AutoAugmentPolicy.IMAGENET:
            return [
                (("Posterize", 0.4, 8), ("Rotate", 0.6, 9)),
                (("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)),
                (("Equalize", 0.8, None), ("Equalize", 0.6, None)),
                (("Posterize", 0.6, 7), ("Posterize", 0.6, 6)),
                (("Equalize", 0.4, None), ("Solarize", 0.2, 4)),
                (("Equalize", 0.4, None), ("Rotate", 0.8, 8)),
                (("Solarize", 0.6, 3), ("Equalize", 0.6, None)),
                (("Posterize", 0.8, 5), ("Equalize", 1.0, None)),
                (("Rotate", 0.2, 3), ("Solarize", 0.6, 8)),
                (("Equalize", 0.6, None), ("Posterize", 0.4, 6)),
                (("Rotate", 0.8, 8), ("Color", 0.4, 0)),
                (("Rotate", 0.4, 9), ("Equalize", 0.6, None)),
                (("Equalize", 0.0, None), ("Equalize", 0.8, None)),
                (("Invert", 0.6, None), ("Equalize", 1.0, None)),
                (("Color", 0.6, 4), ("Contrast", 1.0, 8)),
                (("Rotate", 0.8, 8), ("Color", 1.0, 2)),
                (("Color", 0.8, 8), ("Solarize", 0.8, 7)),
                (("Sharpness", 0.4, 7), ("Invert", 0.6, None)),
                (("ShearX", 0.6, 5), ("Equalize", 1.0, None)),
                (("Color", 0.4, 0), ("Equalize", 0.6, None)),
                (("Equalize", 0.4, None), ("Solarize", 0.2, 4)),
                (("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)),
                (("Invert", 0.6, None), ("Equalize", 1.0, None)),
                (("Color", 0.6, 4), ("Contrast", 1.0, 8)),
                (("Equalize", 0.8, None), ("Equalize", 0.6, None)),
            ]
        elif policy == AutoAugmentPolicy.CIFAR10:
            return [
                (("Invert", 0.1, None), ("Contrast", 0.2, 6)),
                (("Rotate", 0.7, 2), ("TranslateX", 0.3, 9)),
                (("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)),
                (("ShearY", 0.5, 8), ("TranslateY", 0.7, 9)),
                (("AutoContrast", 0.5, None), ("Equalize", 0.9, None)),
                (("ShearY", 0.2, 7), ("Posterize", 0.3, 7)),
                (("Color", 0.4, 3), ("Brightness", 0.6, 7)),
                (("Sharpness", 0.3, 9), ("Brightness", 0.7, 9)),
                (("Equalize", 0.6, None), ("Equalize", 0.5, None)),
                (("Contrast", 0.6, 7), ("Sharpness", 0.6, 5)),
                (("Color", 0.7, 7), ("TranslateX", 0.5, 8)),
                (("Equalize", 0.3, None), ("AutoContrast", 0.4, None)),
                (("TranslateY", 0.4, 3), ("Sharpness", 0.2, 6)),
                (("Brightness", 0.9, 6), ("Color", 0.2, 8)),
                (("Solarize", 0.5, 2), ("Invert", 0.0, None)),
                (("Equalize", 0.2, None), ("AutoContrast", 0.6, None)),
                (("Equalize", 0.2, None), ("Equalize", 0.6, None)),
                (("Color", 0.9, 9), ("Equalize", 0.6, None)),
                (("AutoContrast", 0.8, None), ("Solarize", 0.2, 8)),
                (("Brightness", 0.1, 3), ("Color", 0.7, 0)),
                (("Solarize", 0.4, 5), ("AutoContrast", 0.9, None)),
                (("TranslateY", 0.9, 9), ("TranslateY", 0.7, 9)),
                (("AutoContrast", 0.9, None), ("Solarize", 0.8, 3)),
                (("Equalize", 0.8, None), ("Invert", 0.1, None)),
                (("TranslateY", 0.7, 9), ("AutoContrast", 0.9, None)),
            ]
        elif policy == AutoAugmentPolicy.SVHN:
            return [
                (("ShearX", 0.9, 4), ("Invert", 0.2, None)),
                (("ShearY", 0.9, 8), ("Invert", 0.7, None)),
                (("Equalize", 0.6, None), ("Solarize", 0.6, 6)),
                (("Invert", 0.9, None), ("Equalize", 0.6, None)),
                (("Equalize", 0.6, None), ("Rotate", 0.9, 3)),
                (("ShearX", 0.9, 4), ("AutoContrast", 0.8, None)),
                (("ShearY", 0.9, 8), ("Invert", 0.4, None)),
                (("ShearY", 0.9, 5), ("Solarize", 0.2, 6)),
                (("Invert", 0.9, None), ("AutoContrast", 0.8, None)),
                (("Equalize", 0.6, None), ("Rotate", 0.9, 3)),
                (("ShearX", 0.9, 4), ("Solarize", 0.3, 3)),
                (("ShearY", 0.8, 8), ("Invert", 0.7, None)),
                (("Equalize", 0.9, None), ("TranslateY", 0.6, 6)),
                (("Invert", 0.9, None), ("Equalize", 0.6, None)),
                (("Contrast", 0.3, 3), ("Rotate", 0.8, 4)),
                (("Invert", 0.8, None), ("TranslateY", 0.0, 2)),
                (("ShearY", 0.7, 6), ("Solarize", 0.4, 8)),
                (("Invert", 0.6, None), ("Rotate", 0.8, 4)),
                (("ShearY", 0.3, 7), ("TranslateX", 0.9, 3)),
                (("ShearX", 0.1, 6), ("Invert", 0.6, None)),
                (("Solarize", 0.7, 2), ("TranslateY", 0.6, 7)),
                (("ShearY", 0.8, 4), ("Invert", 0.8, None)),
                (("ShearX", 0.7, 9), ("TranslateY", 0.8, 3)),
                (("ShearY", 0.8, 5), ("AutoContrast", 0.7, None)),
                (("ShearX", 0.7, 2), ("Invert", 0.1, None)),
            ]
        else:
            raise ValueError(f"The provided policy {policy} is not recognized.")

325
    def forward(self, *inputs: Any) -> Any:
326
        flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)
Philip Meier's avatar
Philip Meier committed
327
        height, width = get_size(image_or_video)
328

329
        policy = self._policies[int(torch.randint(len(self._policies), ()))]
330

331
        for transform_id, probability, magnitude_idx in policy:
332
333
334
335
336
            if not torch.rand(()) <= probability:
                continue

            magnitudes_fn, signed = self._AUGMENTATION_SPACE[transform_id]

337
            magnitudes = magnitudes_fn(10, height, width)
338
339
340
341
342
343
344
            if magnitudes is not None:
                magnitude = float(magnitudes[magnitude_idx])
                if signed and torch.rand(()) <= 0.5:
                    magnitude *= -1
            else:
                magnitude = 0.0

345
            image_or_video = self._apply_image_or_video_transform(
346
                image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
347
            )
348

349
        return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)
350
351
352


class RandAugment(_AutoAugmentBase):
353
354
355
356
    r"""[BETA] RandAugment data augmentation method based on
    `"RandAugment: Practical automated data augmentation with a reduced search space"
    <https://arxiv.org/abs/1909.13719>`_.

357
    .. v2betastatus:: RandAugment transform
358

359
360
361
    This transformation works on images and videos only.

    If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
362
363
364
365
    to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
    If img is PIL Image, it is expected to be in mode "L" or "RGB".

    Args:
366
367
368
369
        num_ops (int, optional): Number of augmentation transformations to apply sequentially.
        magnitude (int, optional): Magnitude for all the transformations.
        num_magnitude_bins (int, optional): The number of different magnitude values.
        interpolation (InterpolationMode, optional): Desired interpolation enum defined by
370
371
372
373
374
375
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
    """

376
    _v1_transform_cls = _transforms.RandAugment
377
    _AUGMENTATION_SPACE = {
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
        "Identity": (lambda num_bins, height, width: None, False),
        "ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
        "ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
        "TranslateX": (
            lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * width, num_bins),
            True,
        ),
        "TranslateY": (
            lambda num_bins, height, width: torch.linspace(0.0, 150.0 / 331.0 * height, num_bins),
            True,
        ),
        "Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),
        "Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
        "Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
        "Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
        "Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
394
        "Posterize": (
395
            lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),
396
397
            False,
        ),
398
        "Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
399
400
        "AutoContrast": (lambda num_bins, height, width: None, False),
        "Equalize": (lambda num_bins, height, width: None, False),
401
402
    }

403
404
405
406
407
    def __init__(
        self,
        num_ops: int = 2,
        magnitude: int = 9,
        num_magnitude_bins: int = 31,
408
        interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
409
        fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
410
411
    ) -> None:
        super().__init__(interpolation=interpolation, fill=fill)
412
413
414
415
        self.num_ops = num_ops
        self.magnitude = magnitude
        self.num_magnitude_bins = num_magnitude_bins

416
    def forward(self, *inputs: Any) -> Any:
417
        flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)
Philip Meier's avatar
Philip Meier committed
418
        height, width = get_size(image_or_video)
419
420
421

        for _ in range(self.num_ops):
            transform_id, (magnitudes_fn, signed) = self._get_random_item(self._AUGMENTATION_SPACE)
422
            magnitudes = magnitudes_fn(self.num_magnitude_bins, height, width)
423
            if magnitudes is not None:
424
                magnitude = float(magnitudes[self.magnitude])
425
426
427
428
                if signed and torch.rand(()) <= 0.5:
                    magnitude *= -1
            else:
                magnitude = 0.0
429
            image_or_video = self._apply_image_or_video_transform(
430
                image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
431
            )
432

433
        return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)
434
435
436


class TrivialAugmentWide(_AutoAugmentBase):
437
438
439
    r"""[BETA] Dataset-independent data-augmentation with TrivialAugment Wide, as described in
    `"TrivialAugment: Tuning-free Yet State-of-the-Art Data Augmentation" <https://arxiv.org/abs/2103.10158>`_.

440
    .. v2betastatus:: TrivialAugmentWide transform
441

442
443
444
    This transformation works on images and videos only.

    If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
445
446
447
448
    to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
    If img is PIL Image, it is expected to be in mode "L" or "RGB".

    Args:
449
450
        num_magnitude_bins (int, optional): The number of different magnitude values.
        interpolation (InterpolationMode, optional): Desired interpolation enum defined by
451
452
453
454
455
456
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
    """

457
    _v1_transform_cls = _transforms.TrivialAugmentWide
458
    _AUGMENTATION_SPACE = {
459
460
461
462
463
464
465
466
467
468
        "Identity": (lambda num_bins, height, width: None, False),
        "ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
        "ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
        "TranslateX": (lambda num_bins, height, width: torch.linspace(0.0, 32.0, num_bins), True),
        "TranslateY": (lambda num_bins, height, width: torch.linspace(0.0, 32.0, num_bins), True),
        "Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 135.0, num_bins), True),
        "Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
        "Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
        "Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
        "Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.99, num_bins), True),
469
        "Posterize": (
470
            lambda num_bins, height, width: (8 - (torch.arange(num_bins) / ((num_bins - 1) / 6))).round().int(),
471
472
            False,
        ),
473
        "Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
474
475
        "AutoContrast": (lambda num_bins, height, width: None, False),
        "Equalize": (lambda num_bins, height, width: None, False),
476
477
    }

478
479
480
    def __init__(
        self,
        num_magnitude_bins: int = 31,
481
        interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,
482
        fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
483
484
    ):
        super().__init__(interpolation=interpolation, fill=fill)
485
486
        self.num_magnitude_bins = num_magnitude_bins

487
    def forward(self, *inputs: Any) -> Any:
488
        flat_inputs_with_spec, image_or_video = self._flatten_and_extract_image_or_video(inputs)
Philip Meier's avatar
Philip Meier committed
489
        height, width = get_size(image_or_video)
490
491
492

        transform_id, (magnitudes_fn, signed) = self._get_random_item(self._AUGMENTATION_SPACE)

493
        magnitudes = magnitudes_fn(self.num_magnitude_bins, height, width)
494
495
496
497
498
499
500
        if magnitudes is not None:
            magnitude = float(magnitudes[int(torch.randint(self.num_magnitude_bins, ()))])
            if signed and torch.rand(()) <= 0.5:
                magnitude *= -1
        else:
            magnitude = 0.0

501
        image_or_video = self._apply_image_or_video_transform(
502
            image_or_video, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
503
        )
504
        return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, image_or_video)
505
506
507


class AugMix(_AutoAugmentBase):
508
509
510
    r"""[BETA] AugMix data augmentation method based on
    `"AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty" <https://arxiv.org/abs/1912.02781>`_.

511
    .. v2betastatus:: AugMix transform
512

513
514
515
    This transformation works on images and videos only.

    If the input is :class:`torch.Tensor`, it should be of type ``torch.uint8``, and it is expected
516
517
518
519
    to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
    If img is PIL Image, it is expected to be in mode "L" or "RGB".

    Args:
520
521
522
        severity (int, optional): The severity of base augmentation operators. Default is ``3``.
        mixture_width (int, optional): The number of augmentation chains. Default is ``3``.
        chain_depth (int, optional): The depth of augmentation chains. A negative value denotes stochastic depth sampled from the interval [1, 3].
523
            Default is ``-1``.
524
525
526
        alpha (float, optional): The hyperparameter for the probability distributions. Default is ``1.0``.
        all_ops (bool, optional): Use all operations (including brightness, contrast, color and sharpness). Default is ``True``.
        interpolation (InterpolationMode, optional): Desired interpolation enum defined by
527
528
529
530
531
532
            :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
            If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
        fill (sequence or number, optional): Pixel fill value for the area outside the transformed
            image. If given a number, the value is used for all bands respectively.
    """

533
534
    _v1_transform_cls = _transforms.AugMix

535
    _PARTIAL_AUGMENTATION_SPACE = {
536
537
538
539
540
        "ShearX": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
        "ShearY": (lambda num_bins, height, width: torch.linspace(0.0, 0.3, num_bins), True),
        "TranslateX": (lambda num_bins, height, width: torch.linspace(0.0, width / 3.0, num_bins), True),
        "TranslateY": (lambda num_bins, height, width: torch.linspace(0.0, height / 3.0, num_bins), True),
        "Rotate": (lambda num_bins, height, width: torch.linspace(0.0, 30.0, num_bins), True),
541
        "Posterize": (
542
            lambda num_bins, height, width: (4 - (torch.arange(num_bins) / ((num_bins - 1) / 4))).round().int(),
543
544
            False,
        ),
545
        "Solarize": (lambda num_bins, height, width: torch.linspace(1.0, 0.0, num_bins), False),
546
547
        "AutoContrast": (lambda num_bins, height, width: None, False),
        "Equalize": (lambda num_bins, height, width: None, False),
548
    }
549
    _AUGMENTATION_SPACE: Dict[str, Tuple[Callable[[int, int, int], Optional[torch.Tensor]], bool]] = {
550
        **_PARTIAL_AUGMENTATION_SPACE,
551
552
553
554
        "Brightness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
        "Color": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
        "Contrast": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
        "Sharpness": (lambda num_bins, height, width: torch.linspace(0.0, 0.9, num_bins), True),
555
556
557
558
559
560
561
562
563
    }

    def __init__(
        self,
        severity: int = 3,
        mixture_width: int = 3,
        chain_depth: int = -1,
        alpha: float = 1.0,
        all_ops: bool = True,
564
        interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
565
        fill: Union[_FillType, Dict[Union[Type, str], _FillType]] = None,
566
    ) -> None:
567
        super().__init__(interpolation=interpolation, fill=fill)
568
569
570
571
572
573
574
575
576
577
578
579
580
        self._PARAMETER_MAX = 10
        if not (1 <= severity <= self._PARAMETER_MAX):
            raise ValueError(f"The severity must be between [1, {self._PARAMETER_MAX}]. Got {severity} instead.")
        self.severity = severity
        self.mixture_width = mixture_width
        self.chain_depth = chain_depth
        self.alpha = alpha
        self.all_ops = all_ops

    def _sample_dirichlet(self, params: torch.Tensor) -> torch.Tensor:
        # Must be on a separate method so that we can overwrite it in tests.
        return torch._sample_dirichlet(params)

581
    def forward(self, *inputs: Any) -> Any:
582
        flat_inputs_with_spec, orig_image_or_video = self._flatten_and_extract_image_or_video(inputs)
Philip Meier's avatar
Philip Meier committed
583
        height, width = get_size(orig_image_or_video)
584

585
586
        if isinstance(orig_image_or_video, torch.Tensor):
            image_or_video = orig_image_or_video
587
        else:  # isinstance(inpt, PIL.Image.Image):
588
            image_or_video = F.pil_to_tensor(orig_image_or_video)
589
590
591

        augmentation_space = self._AUGMENTATION_SPACE if self.all_ops else self._PARTIAL_AUGMENTATION_SPACE

592
        orig_dims = list(image_or_video.shape)
593
        expected_ndim = 5 if isinstance(orig_image_or_video, tv_tensors.Video) else 4
594
        batch = image_or_video.reshape([1] * max(expected_ndim - image_or_video.ndim, 0) + orig_dims)
595
596
        batch_dims = [batch.size(0)] + [1] * (batch.ndim - 1)

597
598
599
        # Sample the beta weights for combining the original and augmented image or video. To get Beta, we use a
        # Dirichlet with 2 parameters. The 1st column stores the weights of the original and the 2nd the ones of
        # augmented image or video.
600
601
602
603
        m = self._sample_dirichlet(
            torch.tensor([self.alpha, self.alpha], device=batch.device).expand(batch_dims[0], -1)
        )

604
        # Sample the mixing weights and combine them with the ones sampled from Beta for the augmented images or videos.
605
606
        combined_weights = self._sample_dirichlet(
            torch.tensor([self.alpha] * self.mixture_width, device=batch.device).expand(batch_dims[0], -1)
607
        ) * m[:, 1].reshape([batch_dims[0], -1])
608

609
        mix = m[:, 0].reshape(batch_dims) * batch
610
611
612
613
614
615
        for i in range(self.mixture_width):
            aug = batch
            depth = self.chain_depth if self.chain_depth > 0 else int(torch.randint(low=1, high=4, size=(1,)).item())
            for _ in range(depth):
                transform_id, (magnitudes_fn, signed) = self._get_random_item(augmentation_space)

616
                magnitudes = magnitudes_fn(self._PARAMETER_MAX, height, width)
617
618
619
620
621
622
623
                if magnitudes is not None:
                    magnitude = float(magnitudes[int(torch.randint(self.severity, ()))])
                    if signed and torch.rand(()) <= 0.5:
                        magnitude *= -1
                else:
                    magnitude = 0.0

624
                aug = self._apply_image_or_video_transform(
625
                    aug, transform_id, magnitude, interpolation=self.interpolation, fill=self._fill
626
                )
627
            mix.add_(combined_weights[:, i].reshape(batch_dims) * aug)
628
        mix = mix.reshape(orig_dims).to(dtype=image_or_video.dtype)
629

630
631
        if isinstance(orig_image_or_video, (tv_tensors.Image, tv_tensors.Video)):
            mix = tv_tensors.wrap(mix, like=orig_image_or_video)
632
        elif isinstance(orig_image_or_video, PIL.Image.Image):
633
            mix = F.to_pil_image(mix)
634

635
        return self._unflatten_and_insert_image_or_video(flat_inputs_with_spec, mix)